27 Commits

Author SHA1 Message Date
Valere Fedronic
40fdef89eb Merge pull request #3780 from JephDiel/Download-Avatars-with-MC4039
Some checks failed
Build / build_full_element_call (push) Has been cancelled
Build / build_embedded_element_call (push) Has been cancelled
Build / build_sdk_element_call (push) Has been cancelled
Build & publish embedded packages for releases / Versioning (push) Has been cancelled
Test / Run unit tests (push) Has been cancelled
Test / Run end-to-end tests (push) Has been cancelled
GitHub Actions Security Analysis with zizmor 🌈 / Run zizmor 🌈 (push) Has been cancelled
Build / deploy_develop (push) Has been cancelled
Build / docker_for_develop (push) Has been cancelled
Build & publish embedded packages for releases / build_element_call (push) Has been cancelled
Build & publish embedded packages for releases / Publish tarball (push) Has been cancelled
Build & publish embedded packages for releases / Publish NPM (push) Has been cancelled
Build & publish embedded packages for releases / Publish Android AAR (push) Has been cancelled
Build & publish embedded packages for releases / Publish SwiftPM Library (push) Has been cancelled
Build & publish embedded packages for releases / Update release notes (push) Has been cancelled
Download avatars using the MC4039 Widget API
2026-03-26 09:03:15 +01:00
Valere Fedronic
2ab5e07f73 Merge pull request #3801 from element-hq/valere/default_route
Audio Control | Default to using earpiece when doing audio vs speaker for video call (depending on presence of headset or not)
2026-03-25 13:16:53 +01:00
Valere
09092e97c8 fix error in test 2026-03-25 12:15:12 +01:00
Valere
cb6bef3849 try extend timeout? 2026-03-25 10:10:29 +01:00
Valere
4d491be4a9 post merge fix 2026-03-25 08:21:32 +01:00
Valere
aad160a715 Merge branch 'livekit' into valere/default_route 2026-03-24 18:28:54 +01:00
Valere
fc61a36d4a review: improve comments 2026-03-24 18:25:09 +01:00
Valere
1bc2abb84f rename ControlledAudioOutput to iOSControlledAudioOutput 2026-03-24 18:03:33 +01:00
Valere
4f518819d3 review: extract ControlledAudioOutput in its own file 2026-03-24 18:02:27 +01:00
Robin
b40feae7da Merge pull request #3810 from element-hq/robin/button-accessibility
Improve accessibility of microphone, camera, and screen share buttons
2026-03-24 17:36:36 +01:00
Robin
e15761d8ee Merge pull request #3703 from element-hq/renovate/compound
Update Compound
2026-03-24 17:20:01 +01:00
Robin
c60ed50a9d Attempt to fix end-to-end widget tests 2026-03-24 17:04:28 +01:00
Robin
6dcd883fc3 Update test snapshots 2026-03-24 16:10:19 +01:00
Robin
c0d60b2c29 Improve accessibility of microphone, camera, and screen share buttons
Taking Valere's suggestion of giving them the 'switch' role. Also, the aria-label attributes were redundant (having tooltips already gives the buttons aria-labelledby).
2026-03-21 02:53:48 +01:00
Valere
c8b8d350d5 Merge branch 'livekit' into valere/default_route 2026-03-19 18:41:36 +01:00
Valere
9c2b2d4780 fix: playwright, new web popup interfering with test to witch room 2026-03-19 10:44:38 +01:00
Valere
396225a900 fix playwright earpiece mode 2026-03-19 10:44:38 +01:00
Valere
c4ec52ae15 add some test for normal AudiOutput 2026-03-19 10:44:38 +01:00
Valere
4be2bc7560 android: Select default output device based on callIntent
Add comments on existing code
Extracted a specific android controller for isolation and better testing

lint fixes

Fix device update logic and more tests

better typescript
2026-03-19 10:44:38 +01:00
renovate[bot]
32ea8f522c Update Compound 2026-03-15 01:47:49 +00:00
JephDiel
997cc9bb27 Lint fixes v2 2026-03-13 11:41:58 -05:00
JephDiel
fa74aaa81e Import order lint fixes
Co-authored-by: Timo <16718859+toger5@users.noreply.github.com>
2026-03-13 11:33:07 -05:00
JephDiel
af807489f9 Import order lint fixes
Co-authored-by: Timo <16718859+toger5@users.noreply.github.com>
2026-03-13 11:31:55 -05:00
JephDiel
b198721969 Ignore stale downloads
If src or sizePx changes while we're downloading,
discard the now-stale fetch result so we don't
override the fresh one.
2026-03-12 22:20:19 -05:00
JephDiel
8ecb1b3dbf Simplify widget detection
Use the exists check for the widget API directly
instead of a feature flag.
2026-03-12 22:18:38 -05:00
JephDiel
699e31f59a Download Avatar from relevent source
Instead of relying on failures directly use the
available method to download the avatar.
2026-03-09 23:16:12 -05:00
JephDiel
005b965fba Download avatars using the Widget API
If we can't authenticate media because we're
running as a widget, use the MC4039 widget API
instead of a direct fetch to download the avatar.
2026-03-07 19:53:57 -06:00
38 changed files with 1664 additions and 1703 deletions

View File

@@ -22,8 +22,8 @@ test("Start a new call then leave and show the feedback screen", async ({
await expect(page.getByTestId("lobby_joinCall")).toBeVisible();
// Check the button toolbar
// await expect(page.getByRole('button', { name: 'Mute microphone' })).toBeVisible();
// await expect(page.getByRole('button', { name: 'Stop video' })).toBeVisible();
// await expect(page.getByRole('switch', { name: 'Mute microphone' })).toBeVisible();
// await expect(page.getByRole('switch', { name: 'Stop video' })).toBeVisible();
await expect(page.getByRole("button", { name: "Settings" })).toBeVisible();
await expect(page.getByRole("button", { name: "End call" })).toBeVisible();

View File

@@ -100,8 +100,16 @@ mobileTest(
{ id: "earpiece", name: "Handset", isEarpiece: true },
{ id: "headphones", name: "Headphones" },
]);
window.controls.setAudioDevice("earpiece");
});
// Open settings to select earpiece
await guestPage.getByRole("button", { name: "Settings" }).click();
await guestPage.getByText("Handset", { exact: true }).click();
// dismiss settings
await guestPage.locator("#root").getByLabel("Settings").press("Escape");
await guestPage.pause();
await expect(
guestPage.getByRole("heading", { name: "Handset Mode" }),
).toBeVisible();

View File

@@ -49,12 +49,12 @@ test("can only interact with header and footer while reconnecting", async ({
).toBeVisible();
// Tab order should jump directly from header to footer, skipping media tiles
await page.getByRole("button", { name: "Mute microphone" }).focus();
await page.getByRole("switch", { name: "Mute microphone" }).focus();
await expect(
page.getByRole("button", { name: "Mute microphone" }),
page.getByRole("switch", { name: "Mute microphone" }),
).toBeFocused();
await page.keyboard.press("Tab");
await expect(page.getByRole("button", { name: "Stop video" })).toBeFocused();
await expect(page.getByRole("switch", { name: "Stop video" })).toBeFocused();
// Most critically, we should be able to press the hangup button
await page.getByRole("button", { name: "End call" }).click();
});

View File

@@ -55,13 +55,10 @@ widgetTest("Create and join a group call", async ({ addUser, browserName }) => {
const frame = user.page
.locator('iframe[title="Element Call"]')
.contentFrame();
// No lobby, should start with video on
// The only way to know if it is muted or not is to look at the data-kind attribute..
const videoButton = frame.getByTestId("incall_videomute");
await expect(videoButton).toBeVisible();
// video should be on
await expect(videoButton).toHaveAttribute("aria-label", /^Stop video$/);
await expect(
frame.getByRole("switch", { name: "Stop video", checked: true }),
).toBeVisible();
}
// We should see 5 video tiles everywhere now
@@ -101,13 +98,15 @@ widgetTest("Create and join a group call", async ({ addUser, browserName }) => {
const florianFrame = florian.page
.locator('iframe[title="Element Call"]')
.contentFrame();
const florianMuteButton = florianFrame.getByTestId("incall_videomute");
await florianMuteButton.click();
const florianVideoButton = florianFrame.getByRole("switch", {
name: /video/,
});
await expect(florianVideoButton).toHaveAccessibleName("Stop video");
await expect(florianVideoButton).toBeChecked();
await florianVideoButton.click();
// Now the button should indicate we can start video
await expect(florianMuteButton).toHaveAttribute(
"aria-label",
/^Start video$/,
);
await expect(florianVideoButton).toHaveAccessibleName("Start video");
await expect(florianVideoButton).not.toBeChecked();
// wait a bit for the state to propagate
await valere.page.waitForTimeout(3000);

View File

@@ -47,14 +47,17 @@ widgetTest("Footer interaction in PiP", async ({ addUser, browserName }) => {
{
// Check for a bug where the video had the wrong fit in PIP
const hangupBtn = iFrame.getByRole("button", { name: "End call" });
const audioBtn = iFrame.getByTestId("incall_mute");
const videoBtn = iFrame.getByTestId("incall_videomute");
await expect(hangupBtn).toBeVisible();
const audioBtn = iFrame.getByRole("switch", { name: /microphone/ });
const videoBtn = iFrame.getByRole("switch", { name: /video/ });
await expect(
iFrame.getByRole("button", { name: "End call" }),
).toBeVisible();
await expect(audioBtn).toBeVisible();
await expect(videoBtn).toBeVisible();
await expect(audioBtn).toHaveAttribute("aria-label", /^Mute microphone$/);
await expect(videoBtn).toHaveAttribute("aria-label", /^Stop video$/);
await expect(audioBtn).toHaveAccessibleName("Mute microphone");
await expect(audioBtn).toBeChecked();
await expect(videoBtn).toHaveAccessibleName("Stop video");
await expect(videoBtn).toBeChecked();
await videoBtn.click();
await audioBtn.click();
@@ -62,7 +65,9 @@ widgetTest("Footer interaction in PiP", async ({ addUser, browserName }) => {
// stop hovering on any of the buttons
await iFrame.getByTestId("videoTile").hover();
await expect(audioBtn).toHaveAttribute("aria-label", /^Unmute microphone$/);
await expect(videoBtn).toHaveAttribute("aria-label", /^Start video$/);
await expect(audioBtn).toHaveAccessibleName("Unmute microphone");
await expect(audioBtn).not.toBeChecked();
await expect(videoBtn).toHaveAccessibleName("Start video");
await expect(videoBtn).not.toBeChecked();
}
});

View File

@@ -40,16 +40,14 @@ widgetTest("Put call in PIP", async ({ addUser, browserName }) => {
await TestHelpers.joinCallInCurrentRoom(timo.page);
{
const frame = timo.page
.locator('iframe[title="Element Call"]')
.contentFrame();
const videoButton = frame.getByTestId("incall_videomute");
await expect(videoButton).toBeVisible();
// check that the video is on
await expect(videoButton).toHaveAttribute("aria-label", /^Stop video$/);
}
await expect(
frame.getByRole("switch", { name: "Stop video", checked: true }),
).toBeVisible();
// Switch to the other room, the call should go to PIP
await TestHelpers.switchToRoomNamed(valere.page, "DoubleTask");

View File

@@ -152,6 +152,22 @@ export class TestHelpers {
}
}
public static async maybeDismissKeyBackupToast(page: Page): Promise<void> {
const toast = page
.locator(".mx_Toast_toast")
.getByText("Back up your chats");
try {
await expect(toast).toBeVisible({ timeout: 700 });
await page
.locator(".mx_Toast_toast")
.getByRole("button", { name: "Dismiss" })
.click();
} catch {
// toast not visible, continue as normal
}
}
public static async createRoom(
name: string,
page: Page,
@@ -167,6 +183,7 @@ export class TestHelpers {
await page.getByRole("button", { name: "Create room" }).click();
await expect(page.getByText("You created this room.")).toBeVisible();
await expect(page.getByText("Encryption enabled")).toBeVisible();
await TestHelpers.maybeDismissKeyBackupToast(page);
// Invite users if any
if (andInvite.length > 0) {
@@ -201,6 +218,7 @@ export class TestHelpers {
await expect(
page.getByRole("main").getByRole("heading", { name: roomName }),
).toBeVisible();
await TestHelpers.maybeDismissKeyBackupToast(page);
}
/**

View File

@@ -54,34 +54,36 @@ widgetTest(
.contentFrame();
// ASSERT the button states for whistler (the callee)
{
// The only way to know if it is muted or not is to look at the data-kind attribute..
const videoButton = whistlerFrame.getByTestId("incall_videomute");
// video should be off by default in a voice call
await expect(videoButton).toHaveAttribute("aria-label", /^Start video$/);
const audioButton = whistlerFrame.getByTestId("incall_mute");
await expect(
whistlerFrame.getByRole("switch", {
name: "Start video",
checked: false,
}),
).toBeVisible();
// audio should be on for the voice call
await expect(audioButton).toHaveAttribute(
"aria-label",
/^Mute microphone$/,
);
}
await expect(
whistlerFrame.getByRole("switch", {
name: "Mute microphone",
checked: true,
}),
).toBeVisible();
// ASSERT the button states for brools (the caller)
{
// The only way to know if it is muted or not is to look at the data-kind attribute..
const videoButton = brooksFrame.getByTestId("incall_videomute");
// video should be off by default in a voice call
await expect(videoButton).toHaveAttribute("aria-label", /^Start video$/);
const audioButton = brooksFrame.getByTestId("incall_mute");
await expect(
whistlerFrame.getByRole("switch", {
name: "Start video",
checked: false,
}),
).toBeVisible();
// audio should be on for the voice call
await expect(audioButton).toHaveAttribute(
"aria-label",
/^Mute microphone$/,
);
}
await expect(
whistlerFrame.getByRole("switch", {
name: "Mute microphone",
checked: true,
}),
).toBeVisible();
// In order to confirm that the call is disconnected we will check that the message composer is shown again.
// So first we need to confirm that it is hidden when in the call.
@@ -93,10 +95,7 @@ widgetTest(
).not.toBeVisible();
// ASSERT hanging up on one side ends the call for both
{
const hangupButton = brooksFrame.getByTestId("incall_leave");
await hangupButton.click();
}
await brooksFrame.getByRole("button", { name: "End call" }).click();
// The widget should be closed on both sides and the timeline should be back on screen
await expect(
@@ -148,34 +147,30 @@ widgetTest(
.contentFrame();
// ASSERT the button states for whistler (the callee)
{
// The only way to know if it is muted or not is to look at the data-kind attribute..
const videoButton = whistlerFrame.getByTestId("incall_videomute");
// video should be on by default in a voice call
await expect(videoButton).toHaveAttribute("aria-label", /^Stop video$/);
const audioButton = whistlerFrame.getByTestId("incall_mute");
// audio should be on for the voice call
await expect(audioButton).toHaveAttribute(
"aria-label",
/^Mute microphone$/,
);
}
// video should be off by default in a video call
await expect(
whistlerFrame.getByRole("switch", { name: "Stop video", checked: true }),
).toBeVisible();
// audio should be on too
await expect(
whistlerFrame.getByRole("switch", {
name: "Mute microphone",
checked: true,
}),
).toBeVisible();
// ASSERT the button states for brools (the caller)
{
// The only way to know if it is muted or not is to look at the data-kind attribute..
const videoButton = brooksFrame.getByTestId("incall_videomute");
// video should be on by default in a voice call
await expect(videoButton).toHaveAttribute("aria-label", /^Stop video$/);
const audioButton = brooksFrame.getByTestId("incall_mute");
// audio should be on for the voice call
await expect(audioButton).toHaveAttribute(
"aria-label",
/^Mute microphone$/,
);
}
// video should be off by default in a video call
await expect(
whistlerFrame.getByRole("switch", { name: "Stop video", checked: true }),
).toBeVisible();
// audio should be on too
await expect(
whistlerFrame.getByRole("switch", {
name: "Mute microphone",
checked: true,
}),
).toBeVisible();
// In order to confirm that the call is disconnected we will check that the message composer is shown again.
// So first we need to confirm that it is hidden when in the call.
@@ -187,10 +182,7 @@ widgetTest(
).not.toBeVisible();
// ASSERT hanging up on one side ends the call for both
{
const hangupButton = brooksFrame.getByTestId("incall_leave");
await hangupButton.click();
}
await brooksFrame.getByRole("button", { name: "End call" }).click();
// The widget should be closed on both sides and the timeline should be back on screen
await expect(

View File

@@ -1,30 +0,0 @@
var createVADModule = (() => {
var _scriptDir = import.meta.url;
return (
function(createVADModule) {
createVADModule = createVADModule || {};
var a;a||(a=typeof createVADModule !== 'undefined' ? createVADModule : {});var k,l;a.ready=new Promise(function(b,c){k=b;l=c});var p=Object.assign({},a),r="object"==typeof window,u="function"==typeof importScripts,v="",w;
if(r||u)u?v=self.location.href:"undefined"!=typeof document&&document.currentScript&&(v=document.currentScript.src),_scriptDir&&(v=_scriptDir),0!==v.indexOf("blob:")?v=v.substr(0,v.replace(/[?#].*/,"").lastIndexOf("/")+1):v="",u&&(w=b=>{var c=new XMLHttpRequest;c.open("GET",b,!1);c.responseType="arraybuffer";c.send(null);return new Uint8Array(c.response)});var aa=a.print||console.log.bind(console),x=a.printErr||console.warn.bind(console);Object.assign(a,p);p=null;var y;a.wasmBinary&&(y=a.wasmBinary);
var noExitRuntime=a.noExitRuntime||!0;"object"!=typeof WebAssembly&&z("no native wasm support detected");var A,B=!1,C="undefined"!=typeof TextDecoder?new TextDecoder("utf8"):void 0,D,E,F;function J(){var b=A.buffer;D=b;a.HEAP8=new Int8Array(b);a.HEAP16=new Int16Array(b);a.HEAP32=new Int32Array(b);a.HEAPU8=E=new Uint8Array(b);a.HEAPU16=new Uint16Array(b);a.HEAPU32=F=new Uint32Array(b);a.HEAPF32=new Float32Array(b);a.HEAPF64=new Float64Array(b)}var K=[],L=[],M=[];
function ba(){var b=a.preRun.shift();K.unshift(b)}var N=0,O=null,P=null;function z(b){if(a.onAbort)a.onAbort(b);b="Aborted("+b+")";x(b);B=!0;b=new WebAssembly.RuntimeError(b+". Build with -sASSERTIONS for more info.");l(b);throw b;}function Q(){return R.startsWith("data:application/octet-stream;base64,")}var R;if(a.locateFile){if(R="ten_vad.wasm",!Q()){var S=R;R=a.locateFile?a.locateFile(S,v):v+S}}else R=(new URL("ten_vad.wasm",import.meta.url)).href;
function T(){var b=R;try{if(b==R&&y)return new Uint8Array(y);if(w)return w(b);throw"both async and sync fetching of the wasm failed";}catch(c){z(c)}}function ca(){return y||!r&&!u||"function"!=typeof fetch?Promise.resolve().then(function(){return T()}):fetch(R,{credentials:"same-origin"}).then(function(b){if(!b.ok)throw"failed to load wasm binary file at '"+R+"'";return b.arrayBuffer()}).catch(function(){return T()})}function U(b){for(;0<b.length;)b.shift()(a)}
var da=[null,[],[]],ea={a:function(){z("")},f:function(b,c,m){E.copyWithin(b,c,c+m)},c:function(b){var c=E.length;b>>>=0;if(2147483648<b)return!1;for(var m=1;4>=m;m*=2){var h=c*(1+.2/m);h=Math.min(h,b+100663296);var d=Math;h=Math.max(b,h);d=d.min.call(d,2147483648,h+(65536-h%65536)%65536);a:{try{A.grow(d-D.byteLength+65535>>>16);J();var e=1;break a}catch(W){}e=void 0}if(e)return!0}return!1},e:function(){return 52},b:function(){return 70},d:function(b,c,m,h){for(var d=0,e=0;e<m;e++){var W=F[c>>2],
X=F[c+4>>2];c+=8;for(var G=0;G<X;G++){var f=E[W+G],H=da[b];if(0===f||10===f){f=H;for(var n=0,q=n+NaN,t=n;f[t]&&!(t>=q);)++t;if(16<t-n&&f.buffer&&C)f=C.decode(f.subarray(n,t));else{for(q="";n<t;){var g=f[n++];if(g&128){var I=f[n++]&63;if(192==(g&224))q+=String.fromCharCode((g&31)<<6|I);else{var Y=f[n++]&63;g=224==(g&240)?(g&15)<<12|I<<6|Y:(g&7)<<18|I<<12|Y<<6|f[n++]&63;65536>g?q+=String.fromCharCode(g):(g-=65536,q+=String.fromCharCode(55296|g>>10,56320|g&1023))}}else q+=String.fromCharCode(g)}f=q}(1===
b?aa:x)(f);H.length=0}else H.push(f)}d+=X}F[h>>2]=d;return 0}};
(function(){function b(d){a.asm=d.exports;A=a.asm.g;J();L.unshift(a.asm.h);N--;a.monitorRunDependencies&&a.monitorRunDependencies(N);0==N&&(null!==O&&(clearInterval(O),O=null),P&&(d=P,P=null,d()))}function c(d){b(d.instance)}function m(d){return ca().then(function(e){return WebAssembly.instantiate(e,h)}).then(function(e){return e}).then(d,function(e){x("failed to asynchronously prepare wasm: "+e);z(e)})}var h={a:ea};N++;a.monitorRunDependencies&&a.monitorRunDependencies(N);if(a.instantiateWasm)try{return a.instantiateWasm(h,
b)}catch(d){x("Module.instantiateWasm callback failed with error: "+d),l(d)}(function(){return y||"function"!=typeof WebAssembly.instantiateStreaming||Q()||"function"!=typeof fetch?m(c):fetch(R,{credentials:"same-origin"}).then(function(d){return WebAssembly.instantiateStreaming(d,h).then(c,function(e){x("wasm streaming compile failed: "+e);x("falling back to ArrayBuffer instantiation");return m(c)})})})().catch(l);return{}})();
a.___wasm_call_ctors=function(){return(a.___wasm_call_ctors=a.asm.h).apply(null,arguments)};a._malloc=function(){return(a._malloc=a.asm.i).apply(null,arguments)};a._free=function(){return(a._free=a.asm.j).apply(null,arguments)};a._ten_vad_create=function(){return(a._ten_vad_create=a.asm.k).apply(null,arguments)};a._ten_vad_process=function(){return(a._ten_vad_process=a.asm.l).apply(null,arguments)};a._ten_vad_destroy=function(){return(a._ten_vad_destroy=a.asm.m).apply(null,arguments)};
a._ten_vad_get_version=function(){return(a._ten_vad_get_version=a.asm.n).apply(null,arguments)};var V;P=function fa(){V||Z();V||(P=fa)};
function Z(){function b(){if(!V&&(V=!0,a.calledRun=!0,!B)){U(L);k(a);if(a.onRuntimeInitialized)a.onRuntimeInitialized();if(a.postRun)for("function"==typeof a.postRun&&(a.postRun=[a.postRun]);a.postRun.length;){var c=a.postRun.shift();M.unshift(c)}U(M)}}if(!(0<N)){if(a.preRun)for("function"==typeof a.preRun&&(a.preRun=[a.preRun]);a.preRun.length;)ba();U(K);0<N||(a.setStatus?(a.setStatus("Running..."),setTimeout(function(){setTimeout(function(){a.setStatus("")},1);b()},1)):b())}}
if(a.preInit)for("function"==typeof a.preInit&&(a.preInit=[a.preInit]);0<a.preInit.length;)a.preInit.pop()();Z();
return createVADModule.ready
}
);
})();
export default createVADModule;

Binary file not shown.

View File

@@ -9,14 +9,18 @@ import { afterEach, expect, test, vi } from "vitest";
import { render, screen } from "@testing-library/react";
import { type MatrixClient } from "matrix-js-sdk";
import { type FC, type PropsWithChildren } from "react";
import { type WidgetApi } from "matrix-widget-api";
import { ClientContextProvider } from "./ClientContext";
import { Avatar } from "./Avatar";
import { mockMatrixRoomMember, mockRtcMembership } from "./utils/test";
import { widget } from "./widget";
const TestComponent: FC<
PropsWithChildren<{ client: MatrixClient; supportsThumbnails?: boolean }>
> = ({ client, children, supportsThumbnails }) => {
PropsWithChildren<{
client: MatrixClient;
}>
> = ({ client, children }) => {
return (
<ClientContextProvider
value={{
@@ -24,7 +28,6 @@ const TestComponent: FC<
disconnected: false,
supportedFeatures: {
reactions: true,
thumbnails: supportsThumbnails ?? true,
},
setClient: vi.fn(),
authenticated: {
@@ -40,6 +43,12 @@ const TestComponent: FC<
);
};
vi.mock("./widget", () => ({
widget: {
api: null, // Ideally we'd only mock this in the as a widget test so the whole module is otherwise null, but just nulling `api` by default works well enough
},
}));
afterEach(() => {
vi.unstubAllGlobals();
});
@@ -73,36 +82,7 @@ test("should just render a placeholder when the user has no avatar", () => {
expect(client.mxcUrlToHttp).toBeCalledTimes(0);
});
test("should just render a placeholder when thumbnails are not supported", () => {
const client = vi.mocked<MatrixClient>({
getAccessToken: () => "my-access-token",
mxcUrlToHttp: () => vi.fn(),
} as unknown as MatrixClient);
vi.spyOn(client, "mxcUrlToHttp");
const member = mockMatrixRoomMember(
mockRtcMembership("@alice:example.org", "AAAA"),
{
getMxcAvatarUrl: () => "mxc://example.org/alice-avatar",
},
);
const displayName = "Alice";
render(
<TestComponent client={client} supportsThumbnails={false}>
<Avatar
id={member.userId}
name={displayName}
size={96}
src={member.getMxcAvatarUrl()}
/>
</TestComponent>,
);
const element = screen.getByRole("img", { name: "@alice:example.org" });
expect(element.tagName).toEqual("SPAN");
expect(client.mxcUrlToHttp).toBeCalledTimes(0);
});
test("should attempt to fetch authenticated media", async () => {
test("should attempt to fetch authenticated media from the server", async () => {
const expectedAuthUrl = "http://example.org/media/alice-avatar";
const expectedObjectURL = "my-object-url";
const accessToken = "my-access-token";
@@ -154,3 +134,47 @@ test("should attempt to fetch authenticated media", async () => {
headers: { Authorization: `Bearer ${accessToken}` },
});
});
test("should attempt to use widget API if running as a widget", async () => {
const expectedMXCUrl = "mxc://example.org/alice-avatar";
const expectedObjectURL = "my-object-url";
const theBlob = new Blob([]);
// vitest doesn't have a implementation of create/revokeObjectURL, so we need
// to delete the property. It's a bit odd, but it works.
Reflect.deleteProperty(global.window.URL, "createObjectURL");
globalThis.URL.createObjectURL = vi.fn().mockReturnValue(expectedObjectURL);
Reflect.deleteProperty(global.window.URL, "revokeObjectURL");
globalThis.URL.revokeObjectURL = vi.fn();
const client = vi.mocked<MatrixClient>({
getAccessToken: () => undefined,
} as unknown as MatrixClient);
widget!.api = { downloadFile: vi.fn() } as unknown as WidgetApi;
vi.spyOn(widget!.api, "downloadFile").mockResolvedValue({ file: theBlob });
const member = mockMatrixRoomMember(
mockRtcMembership("@alice:example.org", "AAAA"),
{
getMxcAvatarUrl: () => expectedMXCUrl,
},
);
const displayName = "Alice";
render(
<TestComponent client={client}>
<Avatar
id={member.userId}
name={displayName}
size={96}
src={member.getMxcAvatarUrl()}
/>
</TestComponent>,
);
// Fetch is asynchronous, so wait for this to resolve.
await vi.waitUntil(() =>
document.querySelector(`img[src='${expectedObjectURL}']`),
);
expect(widget!.api.downloadFile).toBeCalledWith(expectedMXCUrl);
});

View File

@@ -14,8 +14,10 @@ import {
} from "react";
import { Avatar as CompoundAvatar } from "@vector-im/compound-web";
import { type MatrixClient } from "matrix-js-sdk";
import { type WidgetApi } from "matrix-widget-api";
import { useClientState } from "./ClientContext";
import { widget } from "./widget";
export enum Size {
XS = "xs",
@@ -78,50 +80,54 @@ export const Avatar: FC<Props> = ({
const sizePx = useMemo(
() =>
Object.values(Size).includes(size as Size)
? sizes.get(size as Size)
? sizes.get(size as Size)!
: (size as number),
[size],
);
const [avatarUrl, setAvatarUrl] = useState<string | undefined>(undefined);
// In theory, a change in `clientState` or `sizePx` could run extra getAvatarFromWidgetAPI calls, but in practice they should be stable long before this code runs.
useEffect(() => {
if (clientState?.state !== "valid") {
return;
}
const { authenticated, supportedFeatures } = clientState;
const client = authenticated?.client;
if (!client || !src || !sizePx || !supportedFeatures.thumbnails) {
if (!src) {
setAvatarUrl(undefined);
return;
}
const token = client.getAccessToken();
if (!token) {
return;
}
const resolveSrc = getAvatarUrl(client, src, sizePx);
if (!resolveSrc) {
let blob: Promise<Blob>;
if (widget?.api) {
blob = getAvatarFromWidgetAPI(widget.api, src);
} else if (
clientState?.state === "valid" &&
clientState.authenticated?.client &&
sizePx
) {
blob = getAvatarFromServer(clientState.authenticated.client, src, sizePx);
} else {
setAvatarUrl(undefined);
return;
}
let objectUrl: string | undefined;
fetch(resolveSrc, {
headers: {
Authorization: `Bearer ${token}`,
},
})
.then(async (req) => req.blob())
let stale = false;
blob
.then((blob) => {
if (stale) {
return;
}
objectUrl = URL.createObjectURL(blob);
setAvatarUrl(objectUrl);
})
.catch((ex) => {
if (stale) {
return;
}
setAvatarUrl(undefined);
});
return (): void => {
stale = true;
if (objectUrl) {
URL.revokeObjectURL(objectUrl);
}
@@ -140,3 +146,44 @@ export const Avatar: FC<Props> = ({
/>
);
};
async function getAvatarFromServer(
client: MatrixClient,
src: string,
sizePx: number,
): Promise<Blob> {
const httpSrc = getAvatarUrl(client, src, sizePx);
if (!httpSrc) {
throw new Error("Failed to get http avatar URL");
}
const token = client.getAccessToken();
if (!token) {
throw new Error("Failed to get access token");
}
const request = await fetch(httpSrc, {
headers: {
Authorization: `Bearer ${token}`,
},
});
const blob = await request.blob();
return blob;
}
async function getAvatarFromWidgetAPI(
api: WidgetApi,
src: string,
): Promise<Blob> {
const response = await api.downloadFile(src);
const file = response.file;
// element-web sends a Blob, and the MSC4039 is considering changing the spec to strictly Blob, so only handling that
if (!(file instanceof Blob)) {
throw new Error("Downloaded file is not a Blob");
}
return file;
}

View File

@@ -48,7 +48,6 @@ export type ValidClientState = {
disconnected: boolean;
supportedFeatures: {
reactions: boolean;
thumbnails: boolean;
};
setClient: (client: MatrixClient, session: Session) => void;
};
@@ -249,7 +248,6 @@ export const ClientProvider: FC<Props> = ({ children }) => {
const [isDisconnected, setIsDisconnected] = useState(false);
const [supportsReactions, setSupportsReactions] = useState(false);
const [supportsThumbnails, setSupportsThumbnails] = useState(false);
const state: ClientState | undefined = useMemo(() => {
if (alreadyOpenedErr) {
@@ -275,7 +273,6 @@ export const ClientProvider: FC<Props> = ({ children }) => {
disconnected: isDisconnected,
supportedFeatures: {
reactions: supportsReactions,
thumbnails: supportsThumbnails,
},
};
}, [
@@ -286,7 +283,6 @@ export const ClientProvider: FC<Props> = ({ children }) => {
setClient,
isDisconnected,
supportsReactions,
supportsThumbnails,
]);
const onSync = useCallback(
@@ -312,8 +308,6 @@ export const ClientProvider: FC<Props> = ({ children }) => {
}
if (initClientState.widgetApi) {
// There is currently no widget API for authenticated media thumbnails.
setSupportsThumbnails(false);
const reactSend = initClientState.widgetApi.hasCapability(
"org.matrix.msc2762.send.event:m.reaction",
);
@@ -335,7 +329,6 @@ export const ClientProvider: FC<Props> = ({ children }) => {
}
} else {
setSupportsReactions(true);
setSupportsThumbnails(true);
}
return (): void => {

View File

@@ -31,7 +31,6 @@ interface Props {
max: number;
step: number;
disabled?: boolean;
tooltip?: boolean;
}
/**
@@ -47,7 +46,6 @@ export const Slider: FC<Props> = ({
max,
step,
disabled,
tooltip = true,
}) => {
const onValueChange = useCallback(
([v]: number[]) => onValueChangeProp(v),
@@ -73,13 +71,9 @@ export const Slider: FC<Props> = ({
<Range className={styles.highlight} />
</Track>
{/* Note: This is expected not to be visible on mobile.*/}
{tooltip ? (
<Tooltip placement="top" label={Math.round(value * 100).toString() + "%"}>
<Thumb className={styles.handle} aria-label={label} />
</Tooltip>
) : (
<Thumb className={styles.handle} aria-label={label} />
)}
</Root>
);
};

View File

@@ -14,7 +14,7 @@ exports[`AppBar > renders 1`] = `
>
<button
aria-labelledby="_r_0_"
class="_icon-button_1pz9o_8"
class="_icon-button_1215g_8"
data-kind="primary"
role="button"
style="--cpd-icon-button-size: 32px;"

View File

@@ -37,9 +37,10 @@ export const MicButton: FC<MicButtonProps> = ({ enabled, ...props }) => {
<Tooltip label={label}>
<CpdButton
iconOnly
aria-label={label}
Icon={Icon}
kind={enabled ? "primary" : "secondary"}
role="switch"
aria-checked={enabled}
{...props}
/>
</Tooltip>
@@ -62,9 +63,10 @@ export const VideoButton: FC<VideoButtonProps> = ({ enabled, ...props }) => {
<Tooltip label={label}>
<CpdButton
iconOnly
aria-label={label}
Icon={Icon}
kind={enabled ? "primary" : "secondary"}
role="switch"
aria-checked={enabled}
{...props}
/>
</Tooltip>
@@ -91,6 +93,8 @@ export const ShareScreenButton: FC<ShareScreenButtonProps> = ({
iconOnly
Icon={ShareScreenSolidIcon}
kind={enabled ? "primary" : "secondary"}
role="switch"
aria-checked={enabled}
{...props}
/>
</Tooltip>
@@ -112,7 +116,6 @@ export const EndCallButton: FC<EndCallButtonProps> = ({
<CpdButton
className={classNames(className, styles.endCall)}
iconOnly
aria-label={t("hangup_button_label")}
Icon={EndCallIcon}
destructive
{...props}

View File

@@ -33,12 +33,38 @@ export interface Controls {
showNativeOutputDevicePicker?: () => void;
}
/**
* Output Audio device when using the controlled audio output mode (mobile).
*/
export interface OutputDevice {
id: string;
name: string;
/**
* `forEarpiece` in an iOS only flag, that will be set on the default speaker device.
* The default speaker device will be used for the earpiece mode by
* using a stereo pan and reducing the volume significantly. (in combination this is similar to a dedicated earpiece mode)
* - on iOS this is true if output is routed to speaker.
* In that case then ElementCalls manually appends an earpiece device with id `EARPIECE_CONFIG_ID` and `{ type: "earpiece" }`
* - on Android this is unused.
*/
forEarpiece?: boolean;
/**
* Is the device the OS earpiece audio configuration?
* - on iOS always undefined
* - on Android true for the `TYPE_BUILTIN_EARPIECE`
*/
isEarpiece?: boolean;
/**
* Is the device the OS default speaker:
* - on iOS always true if output is routed to speaker. In other case iOS on declare a `dummy` id device.
* - on Android true for the `TYPE_BUILTIN_SPEAKER`
*/
isSpeaker?: boolean;
/**
* Is the device the OS default external headset (bluetooth):
* - on iOS always undefined.
* - on Android true for the `TYPE_BLUETOOTH_SCO`
*/
isExternalHeadset?: boolean;
}
@@ -47,8 +73,16 @@ export interface OutputDevice {
*/
export const setPipEnabled$ = new Subject<boolean>();
/**
* Stores the list of available controlled audio output devices.
* This is set when the native code calls `setAvailableAudioDevices` with the list of available audio output devices.
*/
export const availableOutputDevices$ = new Subject<OutputDevice[]>();
/**
* Stores the current audio output device id.
* This is set when the native code calls `setAudioDevice`
*/
export const outputDevice$ = new Subject<string>();
/**
@@ -80,16 +114,41 @@ window.controls = {
setPipEnabled$.next(false);
},
/**
* Reverse engineered:
*
* - on iOS:
* This always a list of one thing. If current route output is speaker it returns
* the single `{"id":"Speaker","name":"Speaker","forEarpiece":true,"isSpeaker":true}` Notice that EC will
* also manually add a virtual earpiece device with id `EARPIECE_CONFIG_ID` and `{ type: "earpiece" }`.
* If the route output is not speaker then it will be `{id: 'dummy', name: 'dummy'}`
*
*
* - on Android:
* This is a list of all available output audio devices. The `id` is the Android AudioDeviceInfo.getId()
* and the `name` is based the Android AudioDeviceInfo.productName (mapped to static strings for known types)
* The `isEarpiece`, `isSpeaker` and `isExternalHeadset` are set based on the Android AudioDeviceInfo.type
* matching the corresponding types for earpiece, speaker and bluetooth headset.
*/
setAvailableAudioDevices(devices: OutputDevice[]): void {
logger.info("setAvailableAudioDevices called from native:", devices);
logger.info(
"[MediaDevices controls] setAvailableAudioDevices called from native:",
devices,
);
availableOutputDevices$.next(devices);
},
setAudioDevice(id: string): void {
logger.info("setAudioDevice called from native", id);
logger.info(
"[MediaDevices controls] setAudioDevice called from native",
id,
);
outputDevice$.next(id);
},
setAudioEnabled(enabled: boolean): void {
logger.info("setAudioEnabled called from native:", enabled);
logger.info(
"[MediaDevices controls] setAudioEnabled called from native:",
enabled,
);
if (!setAudioEnabled$.observed)
throw new Error(
"Output controls are disabled. No setAudioEnabled$ observer",

View File

@@ -74,8 +74,6 @@ export function LivekitRoomAudioRenderer({
)
// Only keep audio tracks
.filter((ref) => ref.publication.kind === Track.Kind.Audio)
// Never render local participant's own audio back to themselves
.filter((ref) => !ref.participant.isLocal)
// Only keep tracks from participants that are in the validIdentities list
.filter((ref) => {
const isValid = validIdentities.includes(ref.participant.identity);

View File

@@ -1,438 +0,0 @@
/*
Copyright 2026 New Vector Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
declare const sampleRate: number;
declare class AudioWorkletProcessor {
public readonly port: MessagePort;
public constructor(options?: {
processorOptions?: Record<string, unknown>;
});
public process(
inputs: Float32Array[][],
outputs: Float32Array[][],
parameters: Record<string, Float32Array>,
): boolean;
}
declare function registerProcessor(
name: string,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
processorCtor: new (...args: any[]) => AudioWorkletProcessor,
): void;
interface NoiseGateParams {
noiseGateActive: boolean;
threshold: number; // dBFS — gate opens above this, closes below it
attackMs: number;
holdMs: number;
releaseMs: number;
transientEnabled: boolean;
transientThresholdDb: number; // dB above background RMS that triggers suppression
transientReleaseMs: number; // how quickly suppression fades after transient ends
// TEN-VAD params
vadEnabled: boolean;
vadPositiveThreshold: number; // open gate when prob >= this (01)
vadNegativeThreshold: number; // close gate when prob < this (01)
}
interface VADGateMessage {
type: "vad-gate";
open: boolean;
}
function dbToLinear(db: number): number {
return Math.pow(10, db / 20);
}
/**
* Thin synchronous wrapper around the TEN-VAD Emscripten WASM module.
* Instantiated synchronously in the AudioWorklet constructor from a
* pre-compiled WebAssembly.Module passed via processorOptions.
*/
class TenVADRuntime {
private readonly mem: WebAssembly.Memory;
private readonly freeFn: (ptr: number) => void;
private readonly processFn: (
handle: number,
audioPtr: number,
hopSize: number,
probPtr: number,
flagPtr: number,
) => number;
private readonly destroyFn: (handle: number) => number;
private readonly handle: number;
private readonly audioBufPtr: number;
private readonly probPtr: number;
private readonly flagPtr: number;
public readonly hopSize: number;
public constructor(
module: WebAssembly.Module,
hopSize: number,
threshold: number,
) {
this.hopSize = hopSize;
// Late-bound memory reference — emscripten_resize_heap and memmove
// are only called after instantiation, so closing over this is safe.
const state = { mem: null as WebAssembly.Memory | null };
const imports = {
a: {
// abort
a: (): never => {
throw new Error("ten_vad abort");
},
// fd_write / proc_exit stub
b: (): number => 0,
// emscripten_resize_heap
c: (reqBytes: number): number => {
if (!state.mem) return 0;
try {
const cur = state.mem.buffer.byteLength;
if (cur >= reqBytes) return 1;
state.mem.grow(Math.ceil((reqBytes - cur) / 65536));
return 1;
} catch {
return 0;
}
},
// fd_write stub
d: (): number => 0,
// environ stub
e: (): number => 0,
// memmove
f: (dest: number, src: number, len: number): void => {
if (state.mem) {
new Uint8Array(state.mem.buffer).copyWithin(dest, src, src + len);
}
},
},
};
// Synchronous instantiation — valid in Worker/AudioWorklet global scope
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const instance = new WebAssembly.Instance(module, imports as any);
const asm = instance.exports as {
g: WebAssembly.Memory; // exported memory
h: () => void; // __wasm_call_ctors
i: (n: number) => number; // malloc
j: (p: number) => void; // free
k: (handlePtr: number, hopSize: number, threshold: number) => number; // ten_vad_create
l: (handle: number, audioPtr: number, hopSize: number, probPtr: number, flagPtr: number) => number; // ten_vad_process
m: (handle: number) => number; // ten_vad_destroy
};
state.mem = asm.g;
this.mem = asm.g;
this.freeFn = asm.j;
this.processFn = asm.l;
this.destroyFn = asm.m;
// Run Emscripten static constructors
asm.h();
// Allocate persistent buffers (malloc is 8-byte aligned, so alignment is fine)
this.audioBufPtr = asm.i(hopSize * 2); // Int16Array
this.probPtr = asm.i(4); // float
this.flagPtr = asm.i(4); // int
// Create VAD handle — ten_vad_create(void** handle, int hopSize, float threshold)
const handlePtrPtr = asm.i(4);
const ret = asm.k(handlePtrPtr, hopSize, threshold);
if (ret !== 0) throw new Error(`ten_vad_create failed: ${ret}`);
this.handle = new Int32Array(this.mem.buffer)[handlePtrPtr >> 2];
asm.j(handlePtrPtr);
}
/** Process one hop of Int16 audio. Returns speech probability [01]. */
public process(samples: Int16Array): number {
new Int16Array(this.mem.buffer).set(samples, this.audioBufPtr >> 1);
this.processFn(
this.handle,
this.audioBufPtr,
this.hopSize,
this.probPtr,
this.flagPtr,
);
return new Float32Array(this.mem.buffer)[this.probPtr >> 2];
}
public destroy(): void {
this.destroyFn(this.handle);
this.freeFn(this.audioBufPtr);
this.freeFn(this.probPtr);
this.freeFn(this.flagPtr);
}
}
/**
* AudioWorkletProcessor implementing a noise gate, an optional transient
* suppressor, and an optional in-worklet TEN-VAD gate — all running
* per-sample in a single pass.
*
* Noise gate: opens when instantaneous peak exceeds threshold, closes below.
* Attack, hold, and release times smooth the attenuation envelope.
*
* Transient suppressor: tracks a slow-moving RMS background level. When the
* instantaneous peak exceeds the background by more than transientThresholdDb,
* gain is instantly cut to 0 and releases over transientReleaseMs.
*
* TEN-VAD gate: accumulates audio with 3:1 decimation (48 kHz → 16 kHz),
* runs the TEN-VAD model synchronously every 256 samples (16 ms), and
* controls vadGateOpen with hysteresis. No IPC round-trip required.
*/
class NoiseGateProcessor extends AudioWorkletProcessor {
// Noise gate state
private noiseGateActive = true;
private threshold = dbToLinear(-60);
private attackRate = 1.0 / (0.025 * sampleRate);
private releaseRate = 1.0 / (0.15 * sampleRate);
private holdTime = 0.2;
private isOpen = false;
private gateAttenuation = 0;
private heldTime = 0;
// Transient suppressor state
private transientEnabled = false;
private transientRatio = dbToLinear(15);
private transientReleaseRate = 1.0 / (0.08 * sampleRate);
private transientAttenuation = 1.0;
private slowRms = 0;
private rmsCoeff = Math.exp(-1.0 / (0.2 * sampleRate));
// VAD gate state
private vadGateOpen = true; // starts open; TEN-VAD closes it on first silent frame
private vadAttenuation = 1.0;
private readonly vadRampRate = 1.0 / (0.02 * sampleRate);
// TEN-VAD state
private vadEnabled = false;
private vadPositiveThreshold = 0.5;
private vadNegativeThreshold = 0.3;
private tenVadRuntime: TenVADRuntime | null = null;
// 3:1 decimation from AudioContext sample rate to 16 kHz
private readonly decRatio = Math.max(1, Math.round(sampleRate / 16000));
private decPhase = 0;
private decAcc = 0;
private readonly vadHopBuf = new Int16Array(256);
private vadHopCount = 0;
private logCounter = 0;
public constructor(options?: {
processorOptions?: Record<string, unknown>;
}) {
super(options);
// Try to instantiate TEN-VAD from the pre-compiled module passed by the main thread
const tenVadModule = options?.processorOptions?.tenVadModule as
| WebAssembly.Module
| undefined;
if (tenVadModule) {
try {
// hopSize = 256 samples @ 16 kHz = 16 ms; threshold = 0.5 (overridden via params)
this.tenVadRuntime = new TenVADRuntime(tenVadModule, 256, 0.5);
this.port.postMessage({
type: "log",
msg: "[NoiseGate worklet] TEN-VAD runtime initialized, decRatio=" + this.decRatio,
});
} catch (e) {
this.port.postMessage({
type: "log",
msg: "[NoiseGate worklet] TEN-VAD init failed: " + String(e),
});
}
}
this.port.onmessage = (
e: MessageEvent<NoiseGateParams | VADGateMessage>,
): void => {
if ((e.data as VADGateMessage).type === "vad-gate") {
this.vadGateOpen = (e.data as VADGateMessage).open;
} else {
this.updateParams(e.data as NoiseGateParams);
}
};
this.updateParams({
noiseGateActive: true,
threshold: -60,
attackMs: 25,
holdMs: 200,
releaseMs: 150,
transientEnabled: false,
transientThresholdDb: 15,
transientReleaseMs: 80,
vadEnabled: false,
vadPositiveThreshold: 0.5,
vadNegativeThreshold: 0.3,
});
this.port.postMessage({
type: "log",
msg: "[NoiseGate worklet] constructor called, sampleRate=" + sampleRate,
});
}
private updateParams(p: NoiseGateParams): void {
this.noiseGateActive = p.noiseGateActive ?? true;
this.threshold = dbToLinear(p.threshold);
this.attackRate = 1.0 / ((p.attackMs / 1000) * sampleRate);
this.releaseRate = 1.0 / ((p.releaseMs / 1000) * sampleRate);
this.holdTime = p.holdMs / 1000;
this.transientEnabled = p.transientEnabled;
this.transientRatio = dbToLinear(p.transientThresholdDb);
this.transientReleaseRate = 1.0 / ((p.transientReleaseMs / 1000) * sampleRate);
this.vadEnabled = p.vadEnabled ?? false;
this.vadPositiveThreshold = p.vadPositiveThreshold ?? 0.5;
this.vadNegativeThreshold = p.vadNegativeThreshold ?? 0.3;
// When VAD is disabled, open the gate immediately
if (!this.vadEnabled) this.vadGateOpen = true;
this.port.postMessage({
type: "log",
msg: "[NoiseGate worklet] params updated: threshold=" + p.threshold
+ " vadEnabled=" + p.vadEnabled
+ " vadPos=" + p.vadPositiveThreshold
+ " vadNeg=" + p.vadNegativeThreshold,
});
}
public process(inputs: Float32Array[][], outputs: Float32Array[][]): boolean {
const input = inputs[0];
const output = outputs[0];
if (!input || input.length === 0) return true;
const channels = input.length;
const blockSize = input[0]?.length ?? 128;
const samplePeriod = 1.0 / sampleRate;
for (let i = 0; i < blockSize; i++) {
// Peak detection across all channels
let curLevel = Math.abs(input[0]?.[i] ?? 0);
for (let j = 1; j < channels; j++) {
curLevel = Math.max(curLevel, Math.abs(input[j]?.[i] ?? 0));
}
// --- Transient suppressor ---
let transientGain = 1.0;
if (this.transientEnabled) {
this.slowRms = Math.sqrt(
this.rmsCoeff * this.slowRms * this.slowRms +
(1.0 - this.rmsCoeff) * curLevel * curLevel,
);
const background = Math.max(this.slowRms, 1e-6);
if (curLevel > background * this.transientRatio) {
this.transientAttenuation = 0.0;
} else {
this.transientAttenuation = Math.min(
1.0,
this.transientAttenuation + this.transientReleaseRate,
);
}
transientGain = this.transientAttenuation;
}
// --- Noise gate ---
if (this.noiseGateActive) {
if (curLevel > this.threshold && !this.isOpen) {
this.isOpen = true;
}
if (curLevel <= this.threshold && this.isOpen) {
this.heldTime = 0;
this.isOpen = false;
}
if (this.isOpen) {
this.gateAttenuation = Math.min(
1.0,
this.gateAttenuation + this.attackRate,
);
} else {
this.heldTime += samplePeriod;
if (this.heldTime > this.holdTime) {
this.gateAttenuation = Math.max(
0.0,
this.gateAttenuation - this.releaseRate,
);
}
}
} else {
this.gateAttenuation = 1.0;
}
// --- TEN-VAD in-worklet processing ---
// Accumulate raw mono samples with decRatio:1 decimation (48 kHz → 16 kHz).
// Every 256 output samples (16 ms) run the WASM VAD and update vadGateOpen.
if (this.vadEnabled && this.tenVadRuntime !== null) {
this.decAcc += input[0]?.[i] ?? 0;
this.decPhase++;
if (this.decPhase >= this.decRatio) {
this.decPhase = 0;
const avg = this.decAcc / this.decRatio;
this.decAcc = 0;
// Float32 [-1,1] → Int16 with clamping
const s16 =
avg >= 1.0
? 32767
: avg <= -1.0
? -32768
: (avg * 32767 + 0.5) | 0;
this.vadHopBuf[this.vadHopCount++] = s16;
if (this.vadHopCount >= 256) {
this.vadHopCount = 0;
const prob = this.tenVadRuntime.process(this.vadHopBuf);
if (!this.vadGateOpen && prob >= this.vadPositiveThreshold) {
this.vadGateOpen = true;
} else if (this.vadGateOpen && prob < this.vadNegativeThreshold) {
this.vadGateOpen = false;
}
}
}
}
// Ramp VAD attenuation toward target to avoid clicks
const vadTarget = this.vadGateOpen ? 1.0 : 0.0;
if (this.vadAttenuation < vadTarget) {
this.vadAttenuation = Math.min(
vadTarget,
this.vadAttenuation + this.vadRampRate,
);
} else if (this.vadAttenuation > vadTarget) {
this.vadAttenuation = Math.max(
vadTarget,
this.vadAttenuation - this.vadRampRate,
);
}
const gain = this.gateAttenuation * transientGain * this.vadAttenuation;
for (let c = 0; c < output.length; c++) {
const inCh = input[c] ?? input[0];
const outCh = output[c];
if (inCh && outCh) {
outCh[i] = (inCh[i] ?? 0) * gain;
}
}
}
this.logCounter++;
if (this.logCounter % 375 === 0) {
this.port.postMessage({
type: "log",
msg: "[NoiseGate worklet] gateOpen=" + this.isOpen
+ " gateAtten=" + this.gateAttenuation.toFixed(3)
+ " transientAtten=" + this.transientAttenuation.toFixed(3)
+ " vadOpen=" + this.vadGateOpen
+ " vadAtten=" + this.vadAttenuation.toFixed(3),
});
}
return true;
}
}
registerProcessor("noise-gate-processor", NoiseGateProcessor);

View File

@@ -1,171 +0,0 @@
/*
Copyright 2026 New Vector Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { type Track } from "livekit-client";
import { logger } from "matrix-js-sdk/lib/logger";
const log = logger.getChild("[NoiseGateTransformer]");
export interface NoiseGateParams {
noiseGateActive: boolean;
threshold: number; // dBFS — gate opens above this, closes below it
attackMs: number;
holdMs: number;
releaseMs: number;
transientEnabled: boolean;
transientThresholdDb: number; // dB above background RMS that triggers suppression
transientReleaseMs: number; // ms for suppression to fade after transient ends
// TEN-VAD params — processed entirely inside the AudioWorklet
vadEnabled: boolean;
vadPositiveThreshold: number; // open gate when isSpeech prob >= this (01)
vadNegativeThreshold: number; // close gate when isSpeech prob < this (01)
}
/**
* Matches LiveKit's AudioProcessorOptions (experimental API, not publicly
* exported, so we declare it locally based on the type definitions).
*/
interface AudioProcessorOptions {
kind: Track.Kind.Audio;
track: MediaStreamTrack;
audioContext: AudioContext;
element?: HTMLMediaElement;
}
/**
* Matches LiveKit's TrackProcessor<Track.Kind.Audio> interface.
*/
export interface AudioTrackProcessor {
name: string;
processedTrack?: MediaStreamTrack;
init(opts: AudioProcessorOptions): Promise<void>;
restart(opts: AudioProcessorOptions): Promise<void>;
destroy(): Promise<void>;
}
// Cached compiled TEN-VAD module — compiled once, reused across processor restarts.
let tenVadModulePromise: Promise<WebAssembly.Module> | null = null;
function getTenVADModule(): Promise<WebAssembly.Module> {
if (!tenVadModulePromise) {
tenVadModulePromise = fetch("/vad/ten_vad.wasm")
.then((r) => {
if (!r.ok) throw new Error(`Failed to fetch ten_vad.wasm: ${r.status}`);
return r.arrayBuffer();
})
.then((buf) => WebAssembly.compile(buf))
.catch((e) => {
// Clear the cache so a retry is possible on next attach
tenVadModulePromise = null;
throw e;
});
}
return tenVadModulePromise;
}
/**
* LiveKit audio track processor that applies a noise gate, optional transient
* suppressor, and optional TEN-VAD gate via AudioWorklet.
*
* The TEN-VAD WASM module is fetched once, compiled, and passed to the worklet
* via processorOptions so it runs synchronously inside the audio thread —
* no IPC round-trip, ~16 ms VAD latency.
*
* Audio graph: sourceNode → workletNode → destinationNode
* processedTrack is destinationNode.stream.getAudioTracks()[0]
*/
export class NoiseGateTransformer implements AudioTrackProcessor {
public readonly name = "noise-gate";
public processedTrack?: MediaStreamTrack;
private workletNode?: AudioWorkletNode;
private sourceNode?: MediaStreamAudioSourceNode;
private destinationNode?: MediaStreamAudioDestinationNode;
private params: NoiseGateParams;
public constructor(params: NoiseGateParams) {
this.params = { ...params };
}
public async init(opts: AudioProcessorOptions): Promise<void> {
const { track, audioContext } = opts;
log.info("init() called, audioContext state:", audioContext.state, "params:", this.params);
// Fetch and compile the TEN-VAD WASM module (cached after first call)
let tenVadModule: WebAssembly.Module | undefined;
try {
tenVadModule = await getTenVADModule();
log.info("TEN-VAD WASM module compiled");
} catch (e) {
log.warn("TEN-VAD WASM module unavailable — VAD disabled:", e);
}
const workletUrl = new URL(
"./NoiseGateProcessor.worklet.ts",
import.meta.url,
);
log.info("loading worklet from:", workletUrl.href);
await audioContext.audioWorklet.addModule(workletUrl);
log.info("worklet module loaded");
this.workletNode = new AudioWorkletNode(
audioContext,
"noise-gate-processor",
{
processorOptions: {
tenVadModule,
},
},
);
this.workletNode.port.onmessage = (
e: MessageEvent<{ type: string; msg: string }>,
): void => {
if (e.data?.type === "log") log.debug(e.data.msg);
};
this.sendParams();
this.sourceNode = audioContext.createMediaStreamSource(
new MediaStream([track]),
);
this.destinationNode = audioContext.createMediaStreamDestination();
this.sourceNode.connect(this.workletNode);
this.workletNode.connect(this.destinationNode);
this.processedTrack = this.destinationNode.stream.getAudioTracks()[0];
log.info("graph wired, processedTrack:", this.processedTrack);
}
public async restart(opts: AudioProcessorOptions): Promise<void> {
await this.destroy();
await this.init(opts);
}
// eslint-disable-next-line @typescript-eslint/require-await
public async destroy(): Promise<void> {
this.sourceNode?.disconnect();
this.workletNode?.disconnect();
this.destinationNode?.disconnect();
this.sourceNode = undefined;
this.workletNode = undefined;
this.destinationNode = undefined;
this.processedTrack = undefined;
}
/** Push updated gate/VAD parameters to the running worklet. */
public updateParams(params: NoiseGateParams): void {
this.params = { ...params };
this.sendParams();
}
private sendParams(): void {
if (!this.workletNode) return;
log.debug("sendParams:", this.params);
this.workletNode.port.postMessage(this.params);
}
}

View File

@@ -67,6 +67,6 @@ Initializer.initBeforeReact()
);
})
.catch((e) => {
logger.error("Failed to initialize app", e);
logger.error(`Failed to initialize app ${e.message}`, e);
root.render(e.message);
});

View File

@@ -127,25 +127,12 @@ exports[`InCallView > rendering > renders 1`] = `
viewBox="0 0 24 24"
width="1em"
xmlns="http://www.w3.org/2000/svg"
>
<g
clip-path="url(#a)"
>
<path
clip-rule="evenodd"
d="M8.929 15.1a13.6 13.6 0 0 0 4.654 3.066q2.62 1.036 5.492.923h.008l.003-.004.003-.002-.034-3.124-3.52-.483-1.791 1.792-.645-.322a13.5 13.5 0 0 1-3.496-2.52 13.4 13.4 0 0 1-2.52-3.496l-.322-.644 1.792-1.792-.483-3.519-3.123-.034-.003.002-.003.004v.002a13.65 13.65 0 0 0 .932 5.492A13.4 13.4 0 0 0 8.93 15.1m3.92 4.926a15.6 15.6 0 0 1-5.334-3.511 15.4 15.4 0 0 1-3.505-5.346 15.6 15.6 0 0 1-1.069-6.274 1.93 1.93 0 0 1 .589-1.366c.366-.366.84-.589 1.386-.589h.01l3.163.035a1.96 1.96 0 0 1 1.958 1.694v.005l.487 3.545v.003c.043.297.025.605-.076.907a2 2 0 0 1-.485.773l-.762.762a11.4 11.4 0 0 0 3.206 3.54q.457.33.948.614l.762-.761a2 2 0 0 1 .774-.486c.302-.1.61-.118.907-.076l3.553.487a1.96 1.96 0 0 1 1.694 1.958l.034 3.174c0 .546-.223 1.02-.588 1.386-.361.36-.827.582-1.363.588a15.3 15.3 0 0 1-6.29-1.062"
d="M8.929 15.1a13.6 13.6 0 0 0 4.654 3.066q2.62 1.036 5.492.923h.008l.003-.004.003-.002-.034-3.124-3.52-.483-1.791 1.792-.645-.322a13.5 13.5 0 0 1-3.496-2.52 13.4 13.4 0 0 1-2.52-3.496l-.322-.645 1.792-1.791-.483-3.52-3.123-.033-.003.002-.003.004v.002a13.65 13.65 0 0 0 .932 5.492A13.4 13.4 0 0 0 8.93 15.1m3.92 4.926a15.6 15.6 0 0 1-5.334-3.511 15.4 15.4 0 0 1-3.505-5.346 15.6 15.6 0 0 1-1.069-6.274 1.93 1.93 0 0 1 .589-1.366c.366-.366.84-.589 1.386-.589h.01l3.163.035a1.96 1.96 0 0 1 1.958 1.694v.005l.487 3.545v.003c.043.297.025.605-.076.907a2 2 0 0 1-.485.773l-.762.762a11.3 11.3 0 0 0 1.806 2.348 11.4 11.4 0 0 0 2.348 1.806l.762-.762a2 2 0 0 1 .774-.485c.302-.1.61-.118.907-.076l3.553.487a1.96 1.96 0 0 1 1.694 1.958l.034 3.174c0 .546-.223 1.02-.588 1.386-.36.36-.827.582-1.363.588a15.3 15.3 0 0 1-6.29-1.062"
fill-rule="evenodd"
/>
</g>
<defs>
<clippath
id="a"
>
<path
d="M0 0h24v24H0z"
/>
</clippath>
</defs>
</svg>
</div>
<h2
@@ -285,14 +272,14 @@ exports[`InCallView > rendering > renders 1`] = `
class="buttons"
>
<button
aria-checked="false"
aria-disabled="true"
aria-label="Unmute microphone"
aria-labelledby="_r_8_"
class="_button_13vu4_8 _has-icon_13vu4_60 _icon-only_13vu4_53"
data-kind="secondary"
data-size="lg"
data-testid="incall_mute"
role="button"
role="switch"
tabindex="0"
>
<svg
@@ -309,14 +296,14 @@ exports[`InCallView > rendering > renders 1`] = `
</svg>
</button>
<button
aria-checked="false"
aria-disabled="true"
aria-label="Start video"
aria-labelledby="_r_d_"
class="_button_13vu4_8 _has-icon_13vu4_60 _icon-only_13vu4_53"
data-kind="secondary"
data-size="lg"
data-testid="incall_videomute"
role="button"
role="switch"
tabindex="0"
>
<svg
@@ -354,7 +341,6 @@ exports[`InCallView > rendering > renders 1`] = `
</svg>
</button>
<button
aria-label="End call"
aria-labelledby="_r_n_"
class="_button_13vu4_8 endCall _has-icon_13vu4_60 _icon-only_13vu4_53 _destructive_13vu4_110"
data-kind="primary"

View File

@@ -1,87 +0,0 @@
/*
Copyright 2026 New Vector Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
.wrapper {
display: flex;
align-items: center;
position: relative;
width: 100%;
}
.slider {
display: flex;
align-items: center;
position: relative;
width: 100%;
}
.track {
flex-grow: 1;
border-radius: var(--cpd-radius-pill-effect);
height: var(--cpd-space-4x);
outline: var(--cpd-border-width-1) solid var(--cpd-color-border-interactive-primary);
outline-offset: calc(-1 * var(--cpd-border-width-1));
cursor: pointer;
position: relative;
overflow: hidden;
/* Base background */
background: var(--cpd-color-bg-subtle-primary);
}
/* Live mic level fill — driven by --mic-level CSS variable */
.track::before {
content: "";
position: absolute;
inset: 0;
width: var(--mic-level, 0%);
background: var(--cpd-color-gray-600, #808080);
opacity: 0.55;
border-radius: var(--cpd-radius-pill-effect);
transition: width 40ms linear;
pointer-events: none;
}
/* Green when mic level is at or above the threshold */
.track[data-active="true"]::before {
background: var(--cpd-color-green-900, #1a7f4b);
}
/* Threshold marker — driven by --threshold-pct CSS variable */
.track::after {
content: "";
position: absolute;
top: 0;
bottom: 0;
left: var(--threshold-pct, 50%);
width: 2px;
background: var(--cpd-color-text-primary);
opacity: 0.6;
pointer-events: none;
transform: translateX(-50%);
}
/* Hide the Radix Range highlight — we use our own visuals */
.range {
display: none;
}
.handle {
display: block;
block-size: var(--cpd-space-5x);
inline-size: var(--cpd-space-5x);
border-radius: var(--cpd-radius-pill-effect);
background: var(--cpd-color-bg-action-primary-rest);
box-shadow: 0 0 0 2px var(--cpd-color-bg-canvas-default);
cursor: pointer;
transition: background-color ease 0.15s;
z-index: 1;
}
.handle:focus-visible {
outline: 2px solid var(--cpd-color-border-focused);
outline-offset: 2px;
}

View File

@@ -1,139 +0,0 @@
/*
Copyright 2026 New Vector Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { Root, Track, Range, Thumb } from "@radix-ui/react-slider";
import { type FC, useCallback, useEffect, useRef } from "react";
import styles from "./NoiseLevelSlider.module.css";
interface Props {
label: string;
value: number;
onValueChange: (value: number) => void;
onValueCommit?: (value: number) => void;
min: number;
max: number;
step: number;
}
/**
* Threshold slider that shows live microphone input level as a background fill,
* similar to Discord's input sensitivity control.
*
* The green fill represents your current mic volume in real time.
* Drag the handle to set the gate threshold — audio below it will be silenced.
*/
export const NoiseLevelSlider: FC<Props> = ({
label,
value,
onValueChange: onValueChangeProp,
onValueCommit: onValueCommitProp,
min,
max,
step,
}) => {
const trackRef = useRef<HTMLSpanElement>(null);
const animFrameRef = useRef<number>(0);
const analyserRef = useRef<AnalyserNode | null>(null);
const streamRef = useRef<MediaStream | null>(null);
const dataRef = useRef<Float32Array<ArrayBuffer> | null>(null);
const thresholdPctRef = useRef<number>(0);
// Start mic monitoring via AnalyserNode
useEffect(() => {
let ctx: AudioContext | null = null;
navigator.mediaDevices
.getUserMedia({ audio: true, video: false })
.then((stream) => {
streamRef.current = stream;
ctx = new AudioContext();
const source = ctx.createMediaStreamSource(stream);
const analyser = ctx.createAnalyser();
analyser.fftSize = 1024;
analyser.smoothingTimeConstant = 0.6;
source.connect(analyser);
analyserRef.current = analyser;
dataRef.current = new Float32Array(analyser.fftSize) as Float32Array<ArrayBuffer>;
})
.catch(() => {
// Mic not available — live level stays at 0
});
return (): void => {
cancelAnimationFrame(animFrameRef.current);
streamRef.current?.getTracks().forEach((t) => t.stop());
streamRef.current = null;
analyserRef.current = null;
void ctx?.close();
};
}, []);
// rAF loop — reads RMS level and updates CSS variable on the track element
useEffect(() => {
const tick = (): void => {
animFrameRef.current = requestAnimationFrame(tick);
const analyser = analyserRef.current;
const data = dataRef.current;
const track = trackRef.current;
if (!analyser || !data || !track) return;
analyser.getFloatTimeDomainData(data);
// Peak detection — matches the gate's own measurement method
let peak = 0;
for (const s of data) peak = Math.max(peak, Math.abs(s));
const dbfs = peak > 0 ? 20 * Math.log10(peak) : -Infinity;
// Map dBFS value to slider percentage (same scale as min/max)
const clampedDb = Math.max(min, Math.min(max, dbfs));
const levelPct = ((clampedDb - min) / (max - min)) * 100;
track.style.setProperty("--mic-level", `${levelPct.toFixed(1)}%`);
track.dataset.active = String(levelPct >= thresholdPctRef.current);
};
animFrameRef.current = requestAnimationFrame(tick);
return (): void => cancelAnimationFrame(animFrameRef.current);
}, [min, max]);
// Keep threshold marker in sync with slider value
useEffect(() => {
const track = trackRef.current;
if (!track) return;
const thresholdPct = ((value - min) / (max - min)) * 100;
thresholdPctRef.current = thresholdPct;
track.style.setProperty("--threshold-pct", `${thresholdPct.toFixed(1)}%`);
}, [value, min, max]);
const onValueChange = useCallback(
([v]: number[]) => onValueChangeProp(v),
[onValueChangeProp],
);
const onValueCommit = useCallback(
([v]: number[]) => onValueCommitProp?.(v),
[onValueCommitProp],
);
return (
<Root
className={styles.slider}
value={[value]}
onValueChange={onValueChange}
onValueCommit={onValueCommit}
min={min}
max={max}
step={step}
>
<Track className={styles.track} ref={trackRef}>
<Range className={styles.range} />
</Track>
<Thumb className={styles.handle} aria-label={label} />
</Root>
);
};

View File

@@ -21,8 +21,7 @@ Please see LICENSE in the repository root for full details.
margin-top: var(--cpd-space-2x);
}
.volumeSlider > label,
.sliderLabel {
.volumeSlider > label {
margin-bottom: var(--cpd-space-1x);
display: block;
}
@@ -34,40 +33,3 @@ Please see LICENSE in the repository root for full details.
.volumeSlider > p {
color: var(--cpd-color-text-secondary);
}
.noiseGateSection {
margin-block-start: var(--cpd-space-6x);
}
.noiseGateHeading {
color: var(--cpd-color-text-secondary);
margin-block: var(--cpd-space-3x) 0;
}
.thresholdSlider {
margin-block-start: calc(-32px + var(--cpd-space-2x));
}
.noiseGateSeparator {
margin-block: 6px var(--cpd-space-4x);
}
.advancedGate {
margin-top: var(--cpd-space-3x);
}
.advancedGateToggle {
all: unset;
cursor: pointer;
font: var(--cpd-font-body-sm-semibold);
color: var(--cpd-color-text-secondary);
user-select: none;
}
.advancedGateToggle:hover {
color: var(--cpd-color-text-primary);
}
.restoreDefaults {
margin-top: var(--cpd-space-6x);
}

View File

@@ -5,10 +5,10 @@ SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { type ChangeEvent, type FC, type ReactNode, useEffect, useState, useCallback } from "react";
import { type FC, type ReactNode, useEffect, useState } from "react";
import { useTranslation } from "react-i18next";
import { type MatrixClient } from "matrix-js-sdk";
import { Button, Heading, Root as Form, Separator } from "@vector-im/compound-web";
import { Button, Root as Form, Separator } from "@vector-im/compound-web";
import { type Room as LivekitRoom } from "livekit-client";
import { Modal } from "../Modal";
@@ -24,21 +24,9 @@ import {
soundEffectVolume as soundEffectVolumeSetting,
backgroundBlur as backgroundBlurSetting,
developerMode,
noiseGateEnabled as noiseGateEnabledSetting,
noiseGateThreshold as noiseGateThresholdSetting,
noiseGateAttack as noiseGateAttackSetting,
noiseGateHold as noiseGateHoldSetting,
noiseGateRelease as noiseGateReleaseSetting,
transientSuppressorEnabled as transientSuppressorEnabledSetting,
transientThreshold as transientThresholdSetting,
transientRelease as transientReleaseSetting,
vadEnabled as vadEnabledSetting,
vadPositiveThreshold as vadPositiveThresholdSetting,
vadNegativeThreshold as vadNegativeThresholdSetting,
} from "./settings";
import { PreferencesSettingsTab } from "./PreferencesSettingsTab";
import { Slider } from "../Slider";
import { NoiseLevelSlider } from "./NoiseLevelSlider";
import { DeviceSelection } from "./DeviceSelection";
import { useTrackProcessor } from "../livekit/TrackProcessorContext";
import { DeveloperSettingsTab } from "./DeveloperSettingsTab";
@@ -119,49 +107,6 @@ export const SettingsModal: FC<Props> = ({
const [soundVolumeRaw, setSoundVolumeRaw] = useState(soundVolume);
const [showDeveloperSettingsTab] = useSetting(developerMode);
// Noise gate settings
const [noiseGateEnabled, setNoiseGateEnabled] = useSetting(noiseGateEnabledSetting);
const [noiseGateThreshold, setNoiseGateThreshold] = useSetting(noiseGateThresholdSetting);
const [noiseGateThresholdRaw, setNoiseGateThresholdRaw] = useState(noiseGateThreshold);
const [noiseGateAttack, setNoiseGateAttack] = useSetting(noiseGateAttackSetting);
const [noiseGateAttackRaw, setNoiseGateAttackRaw] = useState(noiseGateAttack);
const [noiseGateHold, setNoiseGateHold] = useSetting(noiseGateHoldSetting);
const [noiseGateHoldRaw, setNoiseGateHoldRaw] = useState(noiseGateHold);
const [noiseGateRelease, setNoiseGateRelease] = useSetting(noiseGateReleaseSetting);
const [noiseGateReleaseRaw, setNoiseGateReleaseRaw] = useState(noiseGateRelease);
const [showAdvancedGate, setShowAdvancedGate] = useState(false);
// Voice activity detection
const [vadActive, setVadActive] = useSetting(vadEnabledSetting);
const [vadPositiveThreshold, setVadPositiveThreshold] = useSetting(vadPositiveThresholdSetting);
const [vadPositiveThresholdRaw, setVadPositiveThresholdRaw] = useState(vadPositiveThreshold);
const [vadNegativeThreshold, setVadNegativeThreshold] = useSetting(vadNegativeThresholdSetting);
const [vadNegativeThresholdRaw, setVadNegativeThresholdRaw] = useState(vadNegativeThreshold);
// Transient suppressor settings
const [transientEnabled, setTransientEnabled] = useSetting(transientSuppressorEnabledSetting);
const [transientThreshold, setTransientThreshold] = useSetting(transientThresholdSetting);
const [transientThresholdRaw, setTransientThresholdRaw] = useState(transientThreshold);
const [transientRelease, setTransientRelease] = useSetting(transientReleaseSetting);
const [transientReleaseRaw, setTransientReleaseRaw] = useState(transientRelease);
const resetTransientDefaults = useCallback((): void => {
const t = transientThresholdSetting.defaultValue;
const r = transientReleaseSetting.defaultValue;
setTransientThreshold(t); setTransientThresholdRaw(t);
setTransientRelease(r); setTransientReleaseRaw(r);
}, [setTransientThreshold, setTransientRelease]);
const resetGateDefaults = useCallback((): void => {
const a = noiseGateAttackSetting.defaultValue;
const h = noiseGateHoldSetting.defaultValue;
const r = noiseGateReleaseSetting.defaultValue;
setNoiseGateAttack(a); setNoiseGateAttackRaw(a);
setNoiseGateHold(h); setNoiseGateHoldRaw(h);
setNoiseGateRelease(r); setNoiseGateReleaseRaw(r);
}, [setNoiseGateAttack, setNoiseGateHold, setNoiseGateRelease]);
const { available: isRageshakeAvailable } = useSubmitRageshake();
// For controlled devices, we will not show the input section:
@@ -220,237 +165,6 @@ export const SettingsModal: FC<Props> = ({
/>
</div>
</Form>
<div className={styles.noiseGateSection}>
<Heading
type="body"
weight="semibold"
size="sm"
as="h4"
className={styles.noiseGateHeading}
>
Noise Gate
</Heading>
<Separator className={styles.noiseGateSeparator} />
<FieldRow>
<InputField
id="noiseGateEnabled"
type="checkbox"
label="Enable noise gate"
description="Suppress audio below a configurable threshold."
checked={noiseGateEnabled}
onChange={(e: ChangeEvent<HTMLInputElement>): void =>
setNoiseGateEnabled(e.target.checked)
}
/>
</FieldRow>
{noiseGateEnabled && (
<>
<div className={`${styles.volumeSlider} ${styles.thresholdSlider}`}>
<span className={styles.sliderLabel}>Threshold</span>
<p>Gate opens above this level, closes below it.</p>
<NoiseLevelSlider
label="Noise gate threshold"
value={noiseGateThresholdRaw}
onValueChange={setNoiseGateThresholdRaw}
onValueCommit={setNoiseGateThreshold}
min={-100}
max={0}
step={1}
/>
</div>
<div className={styles.advancedGate}>
<button
className={styles.advancedGateToggle}
onClick={(): void => setShowAdvancedGate((v) => !v)}
>
{showAdvancedGate ? "▾" : "▸"} Advanced settings
</button>
{showAdvancedGate && (
<>
<div className={styles.volumeSlider}>
<label>Attack: {noiseGateAttackRaw} ms</label>
<p>How quickly the gate opens when signal exceeds threshold.</p>
<Slider
label="Noise gate attack"
value={noiseGateAttackRaw}
onValueChange={setNoiseGateAttackRaw}
onValueCommit={setNoiseGateAttack}
min={1}
max={100}
step={1}
tooltip={false}
/>
</div>
<div className={styles.volumeSlider}>
<label>Hold: {noiseGateHoldRaw} ms</label>
<p>How long the gate stays open after signal drops below threshold.</p>
<Slider
label="Noise gate hold"
value={noiseGateHoldRaw}
onValueChange={setNoiseGateHoldRaw}
onValueCommit={setNoiseGateHold}
min={0}
max={500}
step={10}
tooltip={false}
/>
</div>
<div className={styles.volumeSlider}>
<label>Release: {noiseGateReleaseRaw} ms</label>
<p>How quickly the gate closes after hold expires.</p>
<Slider
label="Noise gate release"
value={noiseGateReleaseRaw}
onValueChange={setNoiseGateReleaseRaw}
onValueCommit={setNoiseGateRelease}
min={10}
max={500}
step={10}
tooltip={false}
/>
</div>
<div className={styles.restoreDefaults}>
<Button kind="secondary" size="sm" onClick={resetGateDefaults}>
Restore defaults
</Button>
</div>
</>
)}
</div>
</>
)}
</div>
<div className={styles.noiseGateSection}>
<Heading
type="body"
weight="semibold"
size="sm"
as="h4"
className={styles.noiseGateHeading}
>
Voice Activity Detection
</Heading>
<Separator className={styles.noiseGateSeparator} />
<FieldRow>
<InputField
id="vadEnabled"
type="checkbox"
label="Enable voice activity detection"
description="Uses TEN-VAD to mute audio when no speech is detected (~16 ms latency)."
checked={vadActive}
onChange={(e: ChangeEvent<HTMLInputElement>): void =>
setVadActive(e.target.checked)
}
/>
</FieldRow>
{vadActive && (
<>
<div className={`${styles.volumeSlider} ${styles.thresholdSlider}`}>
<span className={styles.sliderLabel}>Open threshold: {Math.round(vadPositiveThresholdRaw * 100)}%</span>
<p>How confident the model must be before opening the gate.</p>
<Slider
label="VAD open threshold"
value={vadPositiveThresholdRaw}
onValueChange={setVadPositiveThresholdRaw}
onValueCommit={setVadPositiveThreshold}
min={0.1}
max={0.9}
step={0.05}
tooltip={false}
/>
</div>
<div className={styles.volumeSlider}>
<span className={styles.sliderLabel}>Close threshold: {Math.round(vadNegativeThresholdRaw * 100)}%</span>
<p>How low the probability must drop before closing the gate.</p>
<Slider
label="VAD close threshold"
value={vadNegativeThresholdRaw}
onValueChange={setVadNegativeThresholdRaw}
onValueCommit={setVadNegativeThreshold}
min={0.05}
max={0.7}
step={0.05}
tooltip={false}
/>
</div>
<div className={styles.restoreDefaults}>
<Button
kind="secondary"
size="sm"
onClick={(): void => {
const pos = vadPositiveThresholdSetting.defaultValue;
const neg = vadNegativeThresholdSetting.defaultValue;
setVadPositiveThreshold(pos); setVadPositiveThresholdRaw(pos);
setVadNegativeThreshold(neg); setVadNegativeThresholdRaw(neg);
}}
>
Restore defaults
</Button>
</div>
</>
)}
</div>
<div className={styles.noiseGateSection}>
<Heading
type="body"
weight="semibold"
size="sm"
as="h4"
className={styles.noiseGateHeading}
>
Transient Suppressor
</Heading>
<Separator className={styles.noiseGateSeparator} />
<FieldRow>
<InputField
id="transientEnabled"
type="checkbox"
label="Enable transient suppressor"
description="Cut sudden loud impacts like desk hits or mic bumps."
checked={transientEnabled}
onChange={(e: ChangeEvent<HTMLInputElement>): void =>
setTransientEnabled(e.target.checked)
}
/>
</FieldRow>
{transientEnabled && (
<>
<div className={`${styles.volumeSlider} ${styles.thresholdSlider}`}>
<span className={styles.sliderLabel}>Sensitivity: {transientThresholdRaw} dB above background</span>
<p>Lower values catch more impacts; higher values only catch the loudest ones.</p>
<Slider
label="Transient threshold"
value={transientThresholdRaw}
onValueChange={setTransientThresholdRaw}
onValueCommit={setTransientThreshold}
min={8}
max={30}
step={1}
tooltip={false}
/>
</div>
<div className={styles.volumeSlider}>
<span className={styles.sliderLabel}>Release: {transientReleaseRaw} ms</span>
<p>How quickly audio returns after suppression.</p>
<Slider
label="Transient release"
value={transientReleaseRaw}
onValueChange={setTransientReleaseRaw}
onValueCommit={setTransientRelease}
min={20}
max={200}
step={10}
tooltip={false}
/>
</div>
<div className={styles.restoreDefaults}>
<Button kind="secondary" size="sm" onClick={resetTransientDefaults}>
Restore defaults
</Button>
</div>
</>
)}
</div>
</>
),
};

View File

@@ -234,12 +234,12 @@ exports[`DeveloperSettingsTab > renders and matches snapshot 1`] = `
class="_inline-field-control_19upo_44"
>
<div
class="_container_1qhtc_10"
class="_container_1ug7n_10"
>
<input
aria-describedby="radix-_r_9_ radix-_r_b_ radix-_r_d_"
checked=""
class="_input_1qhtc_18"
class="_input_1ug7n_18"
id="radix-_r_8_"
name="_r_0_"
title=""
@@ -247,7 +247,7 @@ exports[`DeveloperSettingsTab > renders and matches snapshot 1`] = `
value="legacy"
/>
<div
class="_ui_1qhtc_19"
class="_ui_1ug7n_19"
/>
</div>
</div>
@@ -275,11 +275,11 @@ exports[`DeveloperSettingsTab > renders and matches snapshot 1`] = `
class="_inline-field-control_19upo_44"
>
<div
class="_container_1qhtc_10"
class="_container_1ug7n_10"
>
<input
aria-describedby="radix-_r_9_ radix-_r_b_ radix-_r_d_"
class="_input_1qhtc_18"
class="_input_1ug7n_18"
id="radix-_r_a_"
name="_r_0_"
title=""
@@ -287,7 +287,7 @@ exports[`DeveloperSettingsTab > renders and matches snapshot 1`] = `
value="compatibility"
/>
<div
class="_ui_1qhtc_19"
class="_ui_1ug7n_19"
/>
</div>
</div>
@@ -315,11 +315,11 @@ exports[`DeveloperSettingsTab > renders and matches snapshot 1`] = `
class="_inline-field-control_19upo_44"
>
<div
class="_container_1qhtc_10"
class="_container_1ug7n_10"
>
<input
aria-describedby="radix-_r_9_ radix-_r_b_ radix-_r_d_"
class="_input_1qhtc_18"
class="_input_1ug7n_18"
id="radix-_r_c_"
name="_r_0_"
title=""
@@ -327,7 +327,7 @@ exports[`DeveloperSettingsTab > renders and matches snapshot 1`] = `
value="matrix_2_0"
/>
<div
class="_ui_1qhtc_19"
class="_ui_1ug7n_19"
/>
</div>
</div>

View File

@@ -129,43 +129,6 @@ export const alwaysShowIphoneEarpiece = new Setting<boolean>(
false,
);
export const noiseGateEnabled = new Setting<boolean>(
"noise-gate-enabled",
false,
);
// Threshold in dBFS — gate opens above this, closes below it
export const noiseGateThreshold = new Setting<number>(
"noise-gate-threshold",
-60,
);
// Time in ms for the gate to fully open after signal exceeds threshold
export const noiseGateAttack = new Setting<number>("noise-gate-attack", 25);
// Time in ms the gate stays open after signal drops below threshold
export const noiseGateHold = new Setting<number>("noise-gate-hold", 200);
// Time in ms for the gate to fully close after hold expires
export const noiseGateRelease = new Setting<number>("noise-gate-release", 150);
export const vadEnabled = new Setting<boolean>("vad-enabled", false);
// Probability above which the VAD opens the gate (01)
export const vadPositiveThreshold = new Setting<number>("vad-positive-threshold", 0.2);
// Probability below which the VAD closes the gate (01)
export const vadNegativeThreshold = new Setting<number>("vad-negative-threshold", 0.1);
export const transientSuppressorEnabled = new Setting<boolean>(
"transient-suppressor-enabled",
false,
);
// How many dB above the background RMS a peak must be to trigger suppression
export const transientThreshold = new Setting<number>(
"transient-suppressor-threshold",
15,
);
// Time in ms for suppression to fade after transient ends
export const transientRelease = new Setting<number>(
"transient-suppressor-release",
80,
);
export enum MatrixRTCMode {
Legacy = "legacy",
Compatibility = "compatibility",

View File

@@ -78,7 +78,6 @@ function renderWithMockClient(
disconnected: false,
supportedFeatures: {
reactions: true,
thumbnails: true,
},
setClient: vi.fn(),
authenticated: {

View File

@@ -0,0 +1,563 @@
/*
Copyright 2026 Element Corp.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { it, vi, expect, beforeEach, afterEach, describe } from "vitest";
import { firstValueFrom, of, Subject, take, toArray } from "rxjs";
import { type RTCCallIntent } from "matrix-js-sdk/lib/matrixrtc";
import { AndroidControlledAudioOutput } from "./AndroidControlledAudioOutput.ts";
import type { Controls, OutputDevice } from "../controls";
import { ObservableScope } from "./ObservableScope";
import { withTestScheduler } from "../utils/test";
// All the following device types are real device types that have been observed in the wild on Android devices,
// gathered from logs.
// There are no BT Speakers because they are currently filtered out by EXA (native layer)
// A device type describing the speaker system (i.e. a mono speaker or stereo speakers) built in a device.
const SPEAKER_DEVICE: OutputDevice = {
id: "3",
name: "Built-in speaker",
isEarpiece: false,
isSpeaker: true,
isExternalHeadset: false,
};
// A device type describing the attached earphone speaker.
const EARPIECE_DEVICE: OutputDevice = {
id: "2",
name: "Built-in earpiece",
isEarpiece: true,
isSpeaker: false,
isExternalHeadset: false,
};
// A device type describing a Bluetooth device typically used for telephony
const BT_HEADSET_DEVICE: OutputDevice = {
id: "2226",
name: "Bluetooth - OpenMove by Shokz",
isEarpiece: false,
isSpeaker: false,
isExternalHeadset: true,
};
// A device type describing a USB audio headset.
const USB_HEADSET_DEVICE: OutputDevice = {
id: "29440",
name: "USB headset - USB-Audio - AB13X USB Audio",
isEarpiece: false,
isSpeaker: false,
isExternalHeadset: false,
};
// A device type describing a headset, which is the combination of a headphones and microphone
const WIRED_HEADSET_DEVICE: OutputDevice = {
id: "54509",
name: "Wired headset - 23117RA68G",
isEarpiece: false,
isSpeaker: false,
isExternalHeadset: false,
};
// A device type describing a pair of wired headphones
const WIRED_HEADPHONE_DEVICE: OutputDevice = {
id: "679",
name: "Wired headphones - TB02",
isEarpiece: false,
isSpeaker: false,
isExternalHeadset: false,
};
/**
* The base device list that is always present on Android devices.
* This list is ordered by the OS, the speaker is listed before the earpiece.
*/
const BASE_DEVICE_LIST = [SPEAKER_DEVICE, EARPIECE_DEVICE];
const BT_HEADSET_BASE_DEVICE_LIST = [BT_HEADSET_DEVICE, ...BASE_DEVICE_LIST];
const WIRED_HEADSET_BASE_DEVICE_LIST = [
WIRED_HEADSET_DEVICE,
...BASE_DEVICE_LIST,
];
/**
* A full device list containing all the observed device types in the wild on Android devices.
* Ordered as they would be ordered by the OS.
*/
const FULL_DEVICE_LIST = [
BT_HEADSET_DEVICE,
USB_HEADSET_DEVICE,
WIRED_HEADSET_DEVICE,
WIRED_HEADPHONE_DEVICE,
...BASE_DEVICE_LIST,
];
let testScope: ObservableScope;
let mockControls: Controls;
beforeEach(() => {
testScope = new ObservableScope();
mockControls = {
onAudioDeviceSelect: vi.fn(),
onOutputDeviceSelect: vi.fn(),
} as unknown as Controls;
});
afterEach(() => {
testScope.end();
});
describe("Default selection", () => {
it("Default to speaker for video calls", async () => {
const controlledAudioOutput = new AndroidControlledAudioOutput(
of(BASE_DEVICE_LIST),
testScope,
"video",
mockControls,
);
const emissions = await firstValueFrom(
controlledAudioOutput.selected$.pipe(take(1), toArray()),
);
expect(emissions).toEqual([
{ id: SPEAKER_DEVICE.id, virtualEarpiece: false },
]);
[
mockControls.onAudioDeviceSelect,
mockControls.onOutputDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(1);
expect(mockFn).toHaveBeenCalledWith(SPEAKER_DEVICE.id);
});
});
it("Default to earpiece for audio calls for base config", async () => {
const controlledAudioOutput = new AndroidControlledAudioOutput(
of(BASE_DEVICE_LIST),
testScope,
"audio",
mockControls,
);
const emissions = await firstValueFrom(
controlledAudioOutput.selected$.pipe(take(1), toArray()),
);
expect(emissions).toEqual([
{ id: EARPIECE_DEVICE.id, virtualEarpiece: false },
]);
[
mockControls.onAudioDeviceSelect,
mockControls.onOutputDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(1);
expect(mockFn).toHaveBeenCalledWith(EARPIECE_DEVICE.id);
});
});
["audio", "video"].forEach((callIntent) => {
it(`Default to BT headset for ${callIntent} calls if present`, async () => {
const controlledAudioOutput = new AndroidControlledAudioOutput(
of(BT_HEADSET_BASE_DEVICE_LIST),
testScope,
callIntent,
mockControls,
);
const emissions = await firstValueFrom(
controlledAudioOutput.selected$.pipe(take(1), toArray()),
);
expect(emissions).toEqual([
{ id: BT_HEADSET_DEVICE.id, virtualEarpiece: false },
]);
[
mockControls.onAudioDeviceSelect,
mockControls.onOutputDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(1);
expect(mockFn).toHaveBeenCalledWith(BT_HEADSET_DEVICE.id);
});
});
});
["audio", "video"].forEach((callIntent) => {
it(`Default to wired headset for ${callIntent} calls if present`, async () => {
const controlledAudioOutput = new AndroidControlledAudioOutput(
of(WIRED_HEADSET_BASE_DEVICE_LIST),
testScope,
callIntent,
mockControls,
);
const emissions = await firstValueFrom(
controlledAudioOutput.selected$.pipe(take(1), toArray()),
);
expect(emissions).toEqual([
{ id: WIRED_HEADSET_DEVICE.id, virtualEarpiece: false },
]);
expect(mockControls.onAudioDeviceSelect).toHaveBeenCalledExactlyOnceWith(
WIRED_HEADSET_DEVICE.id,
);
expect(mockControls.onOutputDeviceSelect).toHaveBeenCalledExactlyOnceWith(
WIRED_HEADSET_DEVICE.id,
);
});
});
});
describe("Test mappings", () => {
it("Should map output device to correct AudioDeviceLabel", async () => {
const controlledAudioOutput = new AndroidControlledAudioOutput(
of(FULL_DEVICE_LIST),
testScope,
undefined,
mockControls,
);
const availableDevices = await firstValueFrom(
controlledAudioOutput.available$.pipe(take(1)),
);
expect(availableDevices).toEqual(
new Map([
[BT_HEADSET_DEVICE.id, { type: "name", name: BT_HEADSET_DEVICE.name }],
[
USB_HEADSET_DEVICE.id,
{ type: "name", name: USB_HEADSET_DEVICE.name },
],
[
WIRED_HEADSET_DEVICE.id,
{ type: "name", name: WIRED_HEADSET_DEVICE.name },
],
[
WIRED_HEADPHONE_DEVICE.id,
{ type: "name", name: WIRED_HEADPHONE_DEVICE.name },
],
[SPEAKER_DEVICE.id, { type: "speaker" }],
[EARPIECE_DEVICE.id, { type: "earpiece" }],
]),
);
});
});
describe("Test select a device", () => {
it(`Switch to correct device `, () => {
withTestScheduler(({ cold, schedule, expectObservable, flush }) => {
const controlledAudioOutput = new AndroidControlledAudioOutput(
cold("a", { a: FULL_DEVICE_LIST }),
testScope,
undefined,
mockControls,
);
schedule("-abc", {
a: () => controlledAudioOutput.select(EARPIECE_DEVICE.id),
b: () => controlledAudioOutput.select(USB_HEADSET_DEVICE.id),
c: () => controlledAudioOutput.select(SPEAKER_DEVICE.id),
});
expectObservable(controlledAudioOutput.selected$).toBe("abcd", {
// virtualEarpiece is always false on android.
// Initially the BT_HEADSET is selected.
a: { id: BT_HEADSET_DEVICE.id, virtualEarpiece: false },
b: { id: EARPIECE_DEVICE.id, virtualEarpiece: false },
c: { id: USB_HEADSET_DEVICE.id, virtualEarpiece: false },
d: { id: SPEAKER_DEVICE.id, virtualEarpiece: false },
});
flush();
[
mockControls.onOutputDeviceSelect,
mockControls.onAudioDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(4);
expect(mockFn).toHaveBeenNthCalledWith(1, BT_HEADSET_DEVICE.id);
expect(mockFn).toHaveBeenNthCalledWith(2, EARPIECE_DEVICE.id);
expect(mockFn).toHaveBeenNthCalledWith(3, USB_HEADSET_DEVICE.id);
expect(mockFn).toHaveBeenNthCalledWith(4, SPEAKER_DEVICE.id);
});
});
});
it(`manually switch then a bt headset is added`, () => {
withTestScheduler(({ cold, schedule, expectObservable, flush }) => {
const controlledAudioOutput = new AndroidControlledAudioOutput(
cold("a--b", {
a: BASE_DEVICE_LIST,
b: BT_HEADSET_BASE_DEVICE_LIST,
}),
testScope,
"audio",
mockControls,
);
// Default was earpiece (audio call), let's switch to speaker
schedule("-a--", {
a: () => controlledAudioOutput.select(SPEAKER_DEVICE.id),
});
expectObservable(controlledAudioOutput.selected$).toBe("ab-c", {
// virtualEarpiece is always false on android.
// Initially the BT_HEADSET is selected.
a: { id: EARPIECE_DEVICE.id, virtualEarpiece: false },
b: { id: SPEAKER_DEVICE.id, virtualEarpiece: false },
c: { id: BT_HEADSET_DEVICE.id, virtualEarpiece: false },
});
flush();
[
mockControls.onOutputDeviceSelect,
mockControls.onAudioDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(3);
expect(mockFn).toHaveBeenNthCalledWith(1, EARPIECE_DEVICE.id);
expect(mockFn).toHaveBeenNthCalledWith(2, SPEAKER_DEVICE.id);
expect(mockFn).toHaveBeenNthCalledWith(3, BT_HEADSET_DEVICE.id);
});
});
});
it(`Go back to the previously selected after the auto-switch device goes away`, () => {
withTestScheduler(({ cold, schedule, expectObservable, flush }) => {
const controlledAudioOutput = new AndroidControlledAudioOutput(
cold("a--b-c", {
a: BASE_DEVICE_LIST,
b: BT_HEADSET_BASE_DEVICE_LIST,
c: BASE_DEVICE_LIST,
}),
testScope,
"audio",
mockControls,
);
// Default was earpiece (audio call), let's switch to speaker
schedule("-a---", {
a: () => controlledAudioOutput.select(SPEAKER_DEVICE.id),
});
expectObservable(controlledAudioOutput.selected$).toBe("ab-c-d", {
// virtualEarpiece is always false on android.
// Initially the BT_HEADSET is selected.
a: { id: EARPIECE_DEVICE.id, virtualEarpiece: false },
b: { id: SPEAKER_DEVICE.id, virtualEarpiece: false },
c: { id: BT_HEADSET_DEVICE.id, virtualEarpiece: false },
d: { id: SPEAKER_DEVICE.id, virtualEarpiece: false },
});
flush();
[
mockControls.onOutputDeviceSelect,
mockControls.onAudioDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(4);
expect(mockFn).toHaveBeenNthCalledWith(1, EARPIECE_DEVICE.id);
expect(mockFn).toHaveBeenNthCalledWith(2, SPEAKER_DEVICE.id);
expect(mockFn).toHaveBeenNthCalledWith(3, BT_HEADSET_DEVICE.id);
expect(mockFn).toHaveBeenNthCalledWith(4, SPEAKER_DEVICE.id);
});
});
});
});
describe("Available device changes", () => {
let availableSource$: Subject<OutputDevice[]>;
const createAudioControlledOutput = (
intent: RTCCallIntent,
): AndroidControlledAudioOutput => {
return new AndroidControlledAudioOutput(
availableSource$,
testScope,
intent,
mockControls,
);
};
beforeEach(() => {
availableSource$ = new Subject<OutputDevice[]>();
});
it("When a BT headset is added, control should switch to use it", () => {
createAudioControlledOutput("video");
// Emit the base device list, the speaker should be selected
availableSource$.next(BASE_DEVICE_LIST);
// Initially speaker would be selected
[
mockControls.onOutputDeviceSelect,
mockControls.onAudioDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(1);
expect(mockFn).toHaveBeenCalledWith(SPEAKER_DEVICE.id);
});
// Emit a new device list with a BT device, the control should switch to it
availableSource$.next([BT_HEADSET_DEVICE, ...BASE_DEVICE_LIST]);
[
mockControls.onOutputDeviceSelect,
mockControls.onAudioDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(2);
expect(mockFn).toHaveBeenLastCalledWith(BT_HEADSET_DEVICE.id);
});
});
// Android does not set `isExternalHeadset` to true for wired headphones, so we can't test this case.'
it.skip("When a wired headset is added, control should switch to use it", async () => {
const controlledAudioOutput = createAudioControlledOutput("video");
// Emit the base device list, the speaker should be selected
availableSource$.next(BASE_DEVICE_LIST);
await firstValueFrom(controlledAudioOutput.selected$.pipe(take(1)));
// Initially speaker would be selected
[
mockControls.onOutputDeviceSelect,
mockControls.onAudioDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(1);
expect(mockFn).toHaveBeenCalledWith(SPEAKER_DEVICE.id);
});
// Emit a new device list with a wired headset, the control should switch to it
availableSource$.next([WIRED_HEADPHONE_DEVICE, ...BASE_DEVICE_LIST]);
[
mockControls.onOutputDeviceSelect,
mockControls.onAudioDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(2);
expect(mockFn).toHaveBeenLastCalledWith(WIRED_HEADPHONE_DEVICE.id);
});
});
it("When the active bt headset is removed on audio call, control should switch to earpiece", () => {
createAudioControlledOutput("audio");
// Emit the BT headset device list, the BT headset should be selected
availableSource$.next(BT_HEADSET_BASE_DEVICE_LIST);
// Initially speaker would be selected
[
mockControls.onOutputDeviceSelect,
mockControls.onAudioDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(1);
expect(mockFn).toHaveBeenCalledWith(BT_HEADSET_DEVICE.id);
});
// Emit a new device list without the BT headset, the control should switch to the earpiece for
// audio calls
availableSource$.next(BASE_DEVICE_LIST);
[
mockControls.onOutputDeviceSelect,
mockControls.onAudioDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(2);
expect(mockFn).toHaveBeenLastCalledWith(EARPIECE_DEVICE.id);
});
});
it("When the active bt headset is removed on video call, control should switch to speaker", () => {
createAudioControlledOutput("video");
availableSource$.next(BT_HEADSET_BASE_DEVICE_LIST);
// Initially bt headset would be selected
[
mockControls.onOutputDeviceSelect,
mockControls.onAudioDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(1);
expect(mockFn).toHaveBeenCalledWith(BT_HEADSET_DEVICE.id);
});
// Emit a new device list without the BT headset, the control should switch to speaker for video call
availableSource$.next(BASE_DEVICE_LIST);
[
mockControls.onOutputDeviceSelect,
mockControls.onAudioDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(2);
expect(mockFn).toHaveBeenLastCalledWith(SPEAKER_DEVICE.id);
});
});
it("Do not repeatidly set the same device", () => {
createAudioControlledOutput("video");
availableSource$.next(BT_HEADSET_BASE_DEVICE_LIST);
availableSource$.next(BT_HEADSET_BASE_DEVICE_LIST);
availableSource$.next(BT_HEADSET_BASE_DEVICE_LIST);
availableSource$.next(BT_HEADSET_BASE_DEVICE_LIST);
availableSource$.next(BT_HEADSET_BASE_DEVICE_LIST);
// Initially bt headset would be selected
[
mockControls.onOutputDeviceSelect,
mockControls.onAudioDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(1);
expect(mockFn).toHaveBeenCalledWith(BT_HEADSET_DEVICE.id);
});
});
});
describe("Scope management", () => {
it("Should stop emitting when scope ends", () => {
const aScope = new ObservableScope();
const controlledAudioOutput = new AndroidControlledAudioOutput(
of(BASE_DEVICE_LIST),
aScope,
undefined,
mockControls,
);
expect(mockControls.onAudioDeviceSelect).toHaveBeenCalledOnce();
aScope.end();
controlledAudioOutput.select(EARPIECE_DEVICE.id);
expect(mockControls.onAudioDeviceSelect).not.toHaveBeenCalledTimes(2);
expect(mockControls.onAudioDeviceSelect).toHaveBeenCalledOnce();
});
it("Should stop updating when scope ends", () => {
const aScope = new ObservableScope();
const availableSource$ = new Subject<OutputDevice[]>();
new AndroidControlledAudioOutput(
availableSource$,
aScope,
undefined,
mockControls,
);
availableSource$.next(BT_HEADSET_BASE_DEVICE_LIST);
expect(mockControls.onAudioDeviceSelect).toHaveBeenCalledOnce();
expect(mockControls.onAudioDeviceSelect).toHaveBeenCalledWith(
BT_HEADSET_DEVICE.id,
);
aScope.end();
availableSource$.next(BASE_DEVICE_LIST);
expect(mockControls.onAudioDeviceSelect).not.toHaveBeenCalledTimes(2);
// Should have been called only once with the initial BT_HEADSET_DEVICE.id
expect(mockControls.onAudioDeviceSelect).toHaveBeenCalledOnce();
});
});

View File

@@ -0,0 +1,360 @@
/*
Copyright 2026 Element Corp.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { logger as rootLogger } from "matrix-js-sdk/lib/logger";
import {
distinctUntilChanged,
map,
merge,
type Observable,
scan,
startWith,
Subject,
tap,
} from "rxjs";
import {
type AudioOutputDeviceLabel,
type MediaDevice,
type SelectedAudioOutputDevice,
} from "./MediaDevices.ts";
import type { ObservableScope } from "./ObservableScope.ts";
import type { RTCCallIntent } from "matrix-js-sdk/lib/matrixrtc";
import { type Controls, type OutputDevice } from "../controls.ts";
import { type Behavior } from "./Behavior.ts";
type ControllerState = {
/**
* The list of available output devices, ordered by preference order (most preferred first).
*/
devices: OutputDevice[];
/**
* Explicit user preference for the selected device.
*/
preferredDeviceId: string | undefined;
/**
* The effective selected device, always valid against available devices.
*/
selectedDeviceId: string | undefined;
};
/**
* The possible actions that can be performed on the controller,
* either by the user or by the system.
*/
type ControllerAction =
| { type: "selectDevice"; deviceId: string | undefined }
| { type: "deviceUpdated"; devices: OutputDevice[] };
/**
* The implementation of the audio output media device for Android when using the controlled audio output mode.
*
* In this mode, the hosting application (e.g. Element Mobile) is responsible for providing the list of available audio output devices.
* There are some android specific logic compared to others:
* - AndroidControlledAudioOutput is the only one responsible for selecting the best output device.
* - On android, we don't listen to the selected device from native code (control.setAudioDevice).
* - If a new device is added or removed, this controller will determine the new selected device based
* on the available devices (that is ordered by preference order) and the user's selection (if any).
*
* Given the differences in how the native code is handling the audio routing on Android compared to iOS,
* we have this separate implementation. It allows us to have proper testing and avoid side effects
* from platform specific logic breaking the other platform's implementation.
*/
export class AndroidControlledAudioOutput implements MediaDevice<
AudioOutputDeviceLabel,
SelectedAudioOutputDevice
> {
private logger = rootLogger.getChild(
"[MediaDevices AndroidControlledAudioOutput]",
);
/**
* STATE stream: the current state of the controller, including the list of available devices and the selected device.
*/
private readonly controllerState$: Behavior<ControllerState>;
/**
* @inheritdoc
*/
public readonly available$: Behavior<Map<string, AudioOutputDeviceLabel>>;
/**
* Effective selected device, always valid against available devices.
*
* On android, we don't listen to the selected device from native code (control.setAudioDevice).
* Instead, we determine the selected device ourselves based on the available devices and the user's selection (if any).
*/
public readonly selected$: Behavior<SelectedAudioOutputDevice | undefined>;
// COMMAND stream: user asks to select a device
private readonly selectDeviceCommand$ = new Subject<string | undefined>();
public select(id: string): void {
this.logger.info(`select device: ${id}`);
this.selectDeviceCommand$.next(id);
}
/**
* Creates an instance of AndroidControlledAudioOutput.
*
* @constructor
* @param controlledDevices$ - The list of available output devices coming from the hosting application, ordered by preference order (most preferred first).
* @param scope - The ObservableScope to create the Behaviors in.
* @param initialIntent - The initial call intent (e.g. "audio" or "video") that can be used to determine the default audio routing (e.g. default to earpiece for audio calls and speaker for video calls).
* @param controls - The controls provided by the hosting application to control the audio routing and notify of user actions.
*/
public constructor(
private readonly controlledDevices$: Observable<OutputDevice[]>,
private readonly scope: ObservableScope,
private initialIntent: RTCCallIntent | undefined = undefined,
controls: Controls,
) {
this.controllerState$ = this.startObservingState$();
this.selected$ = this.effectiveSelectionFromState$(this.controllerState$);
this.available$ = scope.behavior(
this.controllerState$.pipe(
map((state) => {
this.logger.info("available devices updated:", state.devices);
return new Map<string, AudioOutputDeviceLabel>(
state.devices.map((outputDevice) => {
return [outputDevice.id, mapDeviceToLabel(outputDevice)];
}),
);
}),
),
);
// Effect 1: notify host when effective selection changes
this.selected$
// It is a behavior so it has built-in distinct until change
.pipe(scope.bind())
.subscribe((device) => {
// Let the hosting application know which output device has been selected.
if (device !== undefined) {
this.logger.info("onAudioDeviceSelect called:", device);
controls.onAudioDeviceSelect?.(device.id);
// Also invoke the deprecated callback for backward compatibility
// TODO: it appears that on Android the hosting application is only using the deprecated callback (onOutputDeviceSelect)
// and not the new one (onAudioDeviceSelect), we should clean this up and only have one callback for audio device selection.
controls.onOutputDeviceSelect?.(device.id);
}
});
}
private startObservingState$(): Behavior<ControllerState> {
const initialState: ControllerState = {
devices: [],
preferredDeviceId: undefined,
selectedDeviceId: undefined,
};
// Merge the two possible inputs observable as a single
// stream of actions that will update the state of the controller.
const actions$: Observable<ControllerAction> = merge(
this.controlledDevices$.pipe(
map(
(devices) =>
({ type: "deviceUpdated", devices }) satisfies ControllerAction,
),
),
this.selectDeviceCommand$.pipe(
map(
(deviceId) =>
({ type: "selectDevice", deviceId }) satisfies ControllerAction,
),
),
);
const initialAction: ControllerAction = {
type: "deviceUpdated",
devices: [],
};
return this.scope.behavior(
actions$.pipe(
startWith(initialAction),
scan((state, action): ControllerState => {
switch (action.type) {
case "deviceUpdated": {
const chosenDevice = this.chooseEffectiveSelection({
previousDevices: state.devices,
availableDevices: action.devices,
currentSelectedId: state.selectedDeviceId,
preferredDeviceId: state.preferredDeviceId,
});
return {
...state,
devices: action.devices,
selectedDeviceId: chosenDevice,
};
}
case "selectDevice": {
const chosenDevice = this.chooseEffectiveSelection({
previousDevices: state.devices,
availableDevices: state.devices,
currentSelectedId: state.selectedDeviceId,
preferredDeviceId: action.deviceId,
});
return {
...state,
preferredDeviceId: action.deviceId,
selectedDeviceId: chosenDevice,
};
}
}
}, initialState),
),
);
}
private effectiveSelectionFromState$(
state$: Observable<ControllerState>,
): Behavior<SelectedAudioOutputDevice | undefined> {
return this.scope.behavior(
state$
.pipe(
map((state) => {
if (state.selectedDeviceId) {
return {
id: state.selectedDeviceId,
/** This is an iOS thing, always false for android*/
virtualEarpiece: false,
};
}
return undefined;
}),
distinctUntilChanged((a, b) => a?.id === b?.id),
)
.pipe(
tap((selected) => {
this.logger.debug(`selected device: ${selected?.id}`);
}),
),
);
}
private chooseEffectiveSelection(args: {
previousDevices: OutputDevice[];
availableDevices: OutputDevice[];
currentSelectedId: string | undefined;
preferredDeviceId: string | undefined;
}): string | undefined {
const {
previousDevices,
availableDevices,
currentSelectedId,
preferredDeviceId,
} = args;
this.logger.debug(`chooseEffectiveSelection with args:`, args);
// Take preferredDeviceId in priority or default to the last effective selection.
const activeSelectedDeviceId = preferredDeviceId || currentSelectedId;
const isAvailable = availableDevices.some(
(device) => device.id === activeSelectedDeviceId,
);
// If there is no current device, or it is not available anymore,
// choose the default device selection logic.
if (activeSelectedDeviceId === undefined || !isAvailable) {
this.logger.debug(
`No current device or it is not available, using default selection logic.`,
);
// use the default selection logic
return this.chooseDefaultDeviceId(availableDevices);
}
// Is there a new added device?
// If a device is added, we might want to switch to it if it's more preferred than the currently selected device.
const newDeviceWasAdded = availableDevices.some(
(device) => !previousDevices.some((d) => d.id === device.id),
);
if (newDeviceWasAdded) {
// TODO only want to check from the added device, not all devices.?
// check if the currently selected device is the most preferred one, if not switch to the most preferred one.
const mostPreferredDevice = availableDevices[0];
this.logger.debug(
`A new device was added, checking if we should switch to it.`,
mostPreferredDevice,
);
if (mostPreferredDevice.id !== activeSelectedDeviceId) {
// Given this is automatic switching, we want to be careful and only switch to a more private device
// (e.g. from speaker to a BT headset) but not switch from a more private device to a less private one
// (e.g. from a BT headset to the speaker), as that can be disruptive for the user if it happens unexpectedly.
if (mostPreferredDevice.isExternalHeadset == true) {
this.logger.info(
`The currently selected device ${mostPreferredDevice.id} is not the most preferred one, switching to the most preferred one ${activeSelectedDeviceId} instead.`,
);
// Let's switch as it is a more private device.
return mostPreferredDevice.id;
}
}
}
// no changes
return activeSelectedDeviceId;
}
/**
* The logic for the default is different based on the call type.
* For example for a voice call we want to default to the earpiece if it's available,
* but for a video call we want to default to the speaker.
* If the user is using a BT headset we want to default to that, as it's likely what they want to use for both video and voice calls.
*
* @param available the available audio output devices to choose from, keyed by their id, sorted by likelihood of it being used for communication.
*
*/
private chooseDefaultDeviceId(available: OutputDevice[]): string | undefined {
this.logger.debug(
`Android routing logic intent: ${this.initialIntent} finding best default...`,
);
if (this.initialIntent === "audio") {
const systemProposed = available[0];
// If no headset is connected, android will route to the speaker by default,
// but for a voice call we want to route to the earpiece instead,
// so override the system proposed routing in that case.
if (systemProposed?.isSpeaker == true) {
// search for the earpiece
const earpieceDevice = available.find(
(device) => device.isEarpiece == true,
);
if (earpieceDevice) {
this.logger.debug(
`Android routing: Switch to earpiece instead of speaker for voice call`,
);
return earpieceDevice.id;
} else {
this.logger.debug(
`Android routing: no earpiece found, cannot switch, use system proposed routing`,
);
return systemProposed.id;
}
} else {
this.logger.debug(
`Android routing: Use system proposed routing `,
systemProposed,
);
return systemProposed?.id;
}
} else {
// Use the system best proposed best routing.
return available[0]?.id;
}
}
}
// Utilities
function mapDeviceToLabel(device: OutputDevice): AudioOutputDeviceLabel {
const { name, isEarpiece, isSpeaker } = device;
if (isEarpiece) return { type: "earpiece" };
else if (isSpeaker) return { type: "speaker" };
else return { type: "name", name };
}

View File

@@ -0,0 +1,193 @@
/*
Copyright 2026 Element Corp.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { afterEach, beforeEach, describe, vi, it } from "vitest";
import * as ComponentsCore from "@livekit/components-core";
import { ObservableScope } from "./ObservableScope";
import { AudioOutput } from "./MediaDevices";
import { withTestScheduler } from "../utils/test";
const BT_SPEAKER = {
deviceId: "f9fc8f5f94578fe3abd89e086c1e78c08477aa564dd9e917950f0e7ebb37a6a2",
kind: "audiooutput",
label: "JBL (Bluetooth)",
groupId: "309a5c086cd8eb885a164046db6ec834c349be01d86448d02c1a5279456ff9e4",
} as unknown as MediaDeviceInfo;
const BUILT_IN_SPEAKER = {
deviceId: "acdbb8546ea6fa85ba2d861e9bcc0e71810d03bbaf6d1712c69e8d9c0c6c2e0a",
kind: "audiooutput",
label: "MacBook Speakers (Built-in)",
groupId: "08a5a3a486473aaa898eb81cda3113f3e21053fb8b84155f4e612fe3f8db5d17",
} as unknown as MediaDeviceInfo;
const BT_HEADSET = {
deviceId: "ff8e6edb4ebb512b2b421335bfd14994a5b4c7192b3e84a8696863d83cf46d12",
kind: "audiooutput",
label: "OpenMove (Bluetooth)",
groupId: "c2893c2438c44248368e0533300245c402764991506f42cd73818dc8c3ee9c88",
} as unknown as MediaDeviceInfo;
const AMAC_DEVICE_LIST = [BT_SPEAKER, BUILT_IN_SPEAKER];
const AMAC_DEVICE_LIST_WITH_DEFAULT = [
asDefault(BUILT_IN_SPEAKER),
...AMAC_DEVICE_LIST,
];
const AMAC_HS_DEVICE_LIST = [
asDefault(BT_HEADSET),
BT_SPEAKER,
BT_HEADSET,
BUILT_IN_SPEAKER,
];
const LAPTOP_SPEAKER = {
deviceId: "EcUxTMu8He2wz+3Y8m/u0fy6M92pUk=",
kind: "audiooutput",
label: "Raptor AVS Speaker",
groupId: "kSrdanhpEDLg3vN8z6Z9MJ1EdanB8zI+Q1dxA=",
} as unknown as MediaDeviceInfo;
const MONITOR_SPEAKER = {
deviceId: "gBryZdAdC8I/rrJpr9r6R+rZzKkoIK5cpU=",
kind: "audiooutput",
label: "Raptor AVS HDMI / DisplayPort 1 Output",
groupId: "kSrdanhpEDLg3vN8z6Z9MJ1EdanB8zI+Q1dxA=",
} as unknown as MediaDeviceInfo;
const DEVICE_LIST_B = [LAPTOP_SPEAKER, MONITOR_SPEAKER];
// On chrome, there is an additional synthetic device called "Default - <device name>",
// it represents what the OS default is now.
function asDefault(device: MediaDeviceInfo): MediaDeviceInfo {
return {
...device,
deviceId: "default",
label: `Default - ${device.label}`,
};
}
// When the authorization is not yet granted, every device is still listed
// but only with empty/blank labels and ids.
// This is a transition state.
function toBlankDevice(device: MediaDeviceInfo): MediaDeviceInfo {
return {
...device,
deviceId: "",
label: "",
groupId: "",
};
}
vi.mock("@livekit/components-core", () => ({
createMediaDeviceObserver: vi.fn(),
}));
describe("AudioOutput Tests", () => {
let testScope: ObservableScope;
beforeEach(() => {
testScope = new ObservableScope();
});
afterEach(() => {
testScope.end();
});
it("should select the default audio output device", () => {
// In a real life setup there would be first a blanked list
// then the real one.
withTestScheduler(({ behavior, cold, expectObservable }) => {
vi.mocked(ComponentsCore.createMediaDeviceObserver).mockReturnValue(
cold("ab", {
// In a real life setup there would be first a blanked list
// then the real one.
a: AMAC_DEVICE_LIST_WITH_DEFAULT.map(toBlankDevice),
b: AMAC_DEVICE_LIST_WITH_DEFAULT,
}),
);
const audioOutput = new AudioOutput(
behavior("a", { a: true }),
testScope,
);
expectObservable(audioOutput.selected$).toBe("ab", {
a: undefined,
b: { id: "default", virtualEarpiece: false },
});
});
});
it("Select the correct device when requested", () => {
// In a real life setup there would be first a blanked list
// then the real one.
withTestScheduler(({ behavior, cold, schedule, expectObservable }) => {
vi.mocked(ComponentsCore.createMediaDeviceObserver).mockReturnValue(
cold("ab", {
// In a real life setup there would be first a blanked list
// then the real one.
a: DEVICE_LIST_B.map(toBlankDevice),
b: DEVICE_LIST_B,
}),
);
const audioOutput = new AudioOutput(
behavior("a", { a: true }),
testScope,
);
schedule("--abc", {
a: () => audioOutput.select(MONITOR_SPEAKER.deviceId),
b: () => audioOutput.select(LAPTOP_SPEAKER.deviceId),
c: () => audioOutput.select(MONITOR_SPEAKER.deviceId),
});
expectObservable(audioOutput.selected$).toBe("abcde", {
a: undefined,
b: { id: LAPTOP_SPEAKER.deviceId, virtualEarpiece: false },
c: { id: MONITOR_SPEAKER.deviceId, virtualEarpiece: false },
d: { id: LAPTOP_SPEAKER.deviceId, virtualEarpiece: false },
e: { id: MONITOR_SPEAKER.deviceId, virtualEarpiece: false },
});
});
});
it("Test mappings", () => {
// In a real life setup there would be first a blanked list
// then the real one.
withTestScheduler(({ behavior, cold, schedule, expectObservable }) => {
vi.mocked(ComponentsCore.createMediaDeviceObserver).mockReturnValue(
cold("a", {
// In a real life setup there would be first a blanked list
// then the real one.
a: AMAC_HS_DEVICE_LIST,
}),
);
const audioOutput = new AudioOutput(
behavior("a", { a: true }),
testScope,
);
const expectedMappings = new Map([
[`default`, { type: "name", name: asDefault(BT_HEADSET).label }],
[BT_SPEAKER.deviceId, { type: "name", name: BT_SPEAKER.label }],
[BT_HEADSET.deviceId, { type: "name", name: BT_HEADSET.label }],
[
BUILT_IN_SPEAKER.deviceId,
{ type: "name", name: BUILT_IN_SPEAKER.label },
],
]);
expectObservable(audioOutput.available$).toBe("a", {
a: expectedMappings,
});
});
});
});

View File

@@ -7,7 +7,6 @@ Please see LICENSE in the repository root for full details.
*/
import {
ConnectionState as LivekitConnectionState,
LocalAudioTrack,
type LocalTrackPublication,
LocalVideoTrack,
ParticipantEvent,
@@ -15,7 +14,6 @@ import {
Track,
} from "livekit-client";
import {
combineLatest,
map,
NEVER,
type Observable,
@@ -32,23 +30,6 @@ import {
trackProcessorSync,
} from "../../../livekit/TrackProcessorContext.tsx";
import { getUrlParams } from "../../../UrlParams.ts";
import {
noiseGateEnabled,
noiseGateThreshold,
noiseGateAttack,
noiseGateHold,
noiseGateRelease,
transientSuppressorEnabled,
transientThreshold,
transientRelease,
vadEnabled,
vadPositiveThreshold,
vadNegativeThreshold,
} from "../../../settings/settings.ts";
import {
type NoiseGateParams,
NoiseGateTransformer,
} from "../../../livekit/NoiseGateTransformer.ts";
import { observeTrackReference$ } from "../../observeTrackReference";
import { type Connection } from "../remoteMembers/Connection.ts";
import { ObservableScope } from "../../ObservableScope.ts";
@@ -92,8 +73,6 @@ export class Publisher {
// Setup track processor syncing (blur)
this.observeTrackProcessors(this.scope, room, trackerProcessorState$);
// Setup noise gate on the local microphone track
this.applyNoiseGate(this.scope, room);
// Observe media device changes and update LiveKit active devices accordingly
this.observeMediaDevices(this.scope, devices, controlledAudioDevices);
@@ -421,111 +400,6 @@ export class Publisher {
});
}
private applyNoiseGate(scope: ObservableScope, room: LivekitRoom): void {
// Observe the local microphone track
const audioTrack$ = scope.behavior(
observeTrackReference$(
room.localParticipant,
Track.Source.Microphone,
).pipe(
map((ref) => {
const track = ref?.publication.track;
return track instanceof LocalAudioTrack ? track : null;
}),
),
null,
);
let transformer: NoiseGateTransformer | null = null;
let audioCtx: AudioContext | null = null;
const currentParams = (): NoiseGateParams => ({
noiseGateActive: noiseGateEnabled.getValue(),
threshold: noiseGateThreshold.getValue(),
attackMs: noiseGateAttack.getValue(),
holdMs: noiseGateHold.getValue(),
releaseMs: noiseGateRelease.getValue(),
transientEnabled: transientSuppressorEnabled.getValue(),
transientThresholdDb: transientThreshold.getValue(),
transientReleaseMs: transientRelease.getValue(),
vadEnabled: vadEnabled.getValue(),
vadPositiveThreshold: vadPositiveThreshold.getValue(),
vadNegativeThreshold: vadNegativeThreshold.getValue(),
});
// Attach / detach processor when any processing feature changes or the track changes.
combineLatest([audioTrack$, noiseGateEnabled.value$, vadEnabled.value$, transientSuppressorEnabled.value$])
.pipe(scope.bind())
.subscribe(([audioTrack, ngEnabled, vadActive, transientActive]) => {
if (!audioTrack) return;
const shouldAttach = ngEnabled || vadActive || transientActive;
if (shouldAttach && !audioTrack.getProcessor()) {
const params = currentParams();
this.logger.info("[NoiseGate] attaching processor, params:", params);
transformer = new NoiseGateTransformer(params);
audioCtx = new AudioContext();
this.logger.info("[NoiseGate] AudioContext state before resume:", audioCtx.state);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(audioTrack as any).setAudioContext(audioCtx);
audioCtx.resume().then(async () => {
this.logger.info("[NoiseGate] AudioContext state after resume:", audioCtx?.state);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
return audioTrack.setProcessor(transformer as any);
}).then(() => {
this.logger.info("[NoiseGate] setProcessor resolved");
}).catch((e: unknown) => {
this.logger.error("[NoiseGate] setProcessor failed", e);
});
} else if (!shouldAttach && audioTrack.getProcessor()) {
this.logger.info("[NoiseGate] removing processor");
void audioTrack.stopProcessor();
void audioCtx?.close();
audioCtx = null;
transformer = null;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(audioTrack as any).setAudioContext(undefined);
} else if (shouldAttach && audioTrack.getProcessor()) {
// Processor already attached — push updated params (e.g. noiseGateActive toggled)
transformer?.updateParams(currentParams());
} else {
this.logger.info(
"[NoiseGate] tick — ngEnabled:", ngEnabled,
"vadActive:", vadActive,
"hasProcessor:", !!audioTrack.getProcessor(),
);
}
});
// Push all param changes (noise gate + VAD) to the live worklet.
combineLatest([
noiseGateEnabled.value$,
noiseGateThreshold.value$,
noiseGateAttack.value$,
noiseGateHold.value$,
noiseGateRelease.value$,
transientSuppressorEnabled.value$,
transientThreshold.value$,
transientRelease.value$,
vadEnabled.value$,
vadPositiveThreshold.value$,
vadNegativeThreshold.value$,
])
.pipe(scope.bind())
.subscribe(([
noiseGateActive, threshold, attackMs, holdMs, releaseMs,
transientEnabled, transientThresholdDb, transientReleaseMs,
vadActive, vadPos, vadNeg,
]) => {
transformer?.updateParams({
noiseGateActive, threshold, attackMs, holdMs, releaseMs,
transientEnabled, transientThresholdDb, transientReleaseMs,
vadEnabled: vadActive,
vadPositiveThreshold: vadPos,
vadNegativeThreshold: vadNeg,
});
});
}
private observeTrackProcessors(
scope: ObservableScope,
room: LivekitRoom,
@@ -542,5 +416,4 @@ export class Publisher {
);
trackProcessorSync(scope, track$, trackerProcessorState$);
}
}

View File

@@ -0,0 +1,132 @@
/*
Copyright 2026 Element Corp.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { logger as rootLogger } from "matrix-js-sdk/lib/logger";
import { combineLatest, merge, startWith, Subject, tap } from "rxjs";
import {
availableOutputDevices$ as controlledAvailableOutputDevices$,
outputDevice$ as controlledOutputSelection$,
} from "../controls.ts";
import type { Behavior } from "./Behavior.ts";
import type { ObservableScope } from "./ObservableScope.ts";
import {
type AudioOutputDeviceLabel,
availableRawDevices$,
iosDeviceMenu$,
type MediaDevice,
type SelectedAudioOutputDevice,
} from "./MediaDevices.ts";
// This hardcoded id is used in EX ios! It can only be changed in coordination with
// the ios swift team.
const EARPIECE_CONFIG_ID = "earpiece-id";
/**
* A special implementation of audio output that allows the hosting application
* to have more control over the device selection process. This is used when the
* `controlledAudioDevices` URL parameter is set, which is currently only true on mobile.
*/
export class IOSControlledAudioOutput implements MediaDevice<
AudioOutputDeviceLabel,
SelectedAudioOutputDevice
> {
private logger = rootLogger.getChild("[MediaDevices ControlledAudioOutput]");
// We need to subscribe to the raw devices so that the OS does update the input
// back to what it was before. otherwise we will switch back to the default
// whenever we allocate a new stream.
public readonly availableRaw$ = availableRawDevices$(
"audiooutput",
this.usingNames$,
this.scope,
this.logger,
);
public readonly available$ = this.scope.behavior(
combineLatest(
[controlledAvailableOutputDevices$.pipe(startWith([])), iosDeviceMenu$],
(availableRaw, iosDeviceMenu) => {
const available = new Map<string, AudioOutputDeviceLabel>(
availableRaw.map(
({ id, name, isEarpiece, isSpeaker /*,isExternalHeadset*/ }) => {
let deviceLabel: AudioOutputDeviceLabel;
// if (isExternalHeadset) // Do we want this?
if (isEarpiece) deviceLabel = { type: "earpiece" };
else if (isSpeaker) deviceLabel = { type: "speaker" };
else deviceLabel = { type: "name", name };
return [id, deviceLabel];
},
),
);
// Create a virtual earpiece device in case a non-earpiece device is
// designated for this purpose
if (iosDeviceMenu && availableRaw.some((d) => d.forEarpiece)) {
this.logger.info(
`IOS Add virtual earpiece device with id ${EARPIECE_CONFIG_ID}`,
);
available.set(EARPIECE_CONFIG_ID, { type: "earpiece" });
}
return available;
},
),
);
private readonly deviceSelection$ = new Subject<string>();
public select(id: string): void {
this.logger.info(`select device: ${id}`);
this.deviceSelection$.next(id);
}
public readonly selected$ = this.scope.behavior(
combineLatest(
[
this.available$,
merge(
controlledOutputSelection$.pipe(startWith(undefined)),
this.deviceSelection$,
),
],
(available, preferredId) => {
const id = preferredId ?? available.keys().next().value;
return id === undefined
? undefined
: { id, virtualEarpiece: id === EARPIECE_CONFIG_ID };
},
).pipe(
tap((selected) => {
this.logger.debug(`selected device: ${selected?.id}`);
}),
),
);
public constructor(
private readonly usingNames$: Behavior<boolean>,
private readonly scope: ObservableScope,
) {
this.selected$.subscribe((device) => {
// Let the hosting application know which output device has been selected.
// This information is probably only of interest if the earpiece mode has
// been selected - for example, Element X iOS listens to this to determine
// whether it should enable the proximity sensor.
if (device !== undefined) {
this.logger.info("onAudioDeviceSelect called:", device);
window.controls.onAudioDeviceSelect?.(device.id);
// Also invoke the deprecated callback for backward compatibility
window.controls.onOutputDeviceSelect?.(device.id);
}
});
this.available$.subscribe((available) => {
this.logger.debug("available devices:", available);
});
this.availableRaw$.subscribe((availableRaw) => {
this.logger.debug("available raw devices:", availableRaw);
});
}
}

View File

@@ -9,35 +9,28 @@ import {
combineLatest,
filter,
map,
merge,
type Observable,
pairwise,
startWith,
Subject,
switchMap,
type Observable,
} from "rxjs";
import { createMediaDeviceObserver } from "@livekit/components-core";
import { type Logger, logger as rootLogger } from "matrix-js-sdk/lib/logger";
import {
alwaysShowIphoneEarpiece as alwaysShowIphoneEarpieceSetting,
audioInput as audioInputSetting,
audioOutput as audioOutputSetting,
videoInput as videoInputSetting,
alwaysShowIphoneEarpiece as alwaysShowIphoneEarpieceSetting,
} from "../settings/settings";
import { type ObservableScope } from "./ObservableScope";
import {
outputDevice$ as controlledOutputSelection$,
availableOutputDevices$ as controlledAvailableOutputDevices$,
} from "../controls";
import { availableOutputDevices$ as controlledAvailableOutputDevices$ } from "../controls";
import { getUrlParams } from "../UrlParams";
import { platform } from "../Platform";
import { switchWhen } from "../utils/observable";
import { type Behavior, constant } from "./Behavior";
// This hardcoded id is used in EX ios! It can only be changed in coordination with
// the ios swift team.
const EARPIECE_CONFIG_ID = "earpiece-id";
import { AndroidControlledAudioOutput } from "./AndroidControlledAudioOutput.ts";
import { IOSControlledAudioOutput } from "./IOSControlledAudioOutput.ts";
export type DeviceLabel =
| { type: "name"; name: string }
@@ -49,10 +42,18 @@ export type AudioOutputDeviceLabel =
| { type: "earpiece" }
| { type: "default"; name: string | null };
/**
* Base selected-device value shared by all media kinds.
*
* `id` is the effective device identifier used by browser media APIs.
*/
export interface SelectedDevice {
id: string;
}
/**
* Selected audio input value with audio-input-specific metadata.
*/
export interface SelectedAudioInputDevice extends SelectedDevice {
/**
* Emits whenever we think that this audio input device has logically changed
@@ -61,6 +62,9 @@ export interface SelectedAudioInputDevice extends SelectedDevice {
hardwareDeviceChange$: Observable<void>;
}
/**
* Selected audio output value with output-routing-specific metadata.
*/
export interface SelectedAudioOutputDevice extends SelectedDevice {
/**
* Whether this device is a "virtual earpiece" device. If so, we should output
@@ -69,23 +73,42 @@ export interface SelectedAudioOutputDevice extends SelectedDevice {
virtualEarpiece: boolean;
}
/**
* Common reactive contract for selectable input/output media devices (mic, speaker, camera).
*
* `Label` is the type used to represent a device in UI lists.
* `Selected` is the type used to represent the active selection for a device kind.
*/
export interface MediaDevice<Label, Selected> {
/**
* A map from available device IDs to labels.
* Reactive map of currently available devices keyed by device ID.
*
* `Label` defines the UI-facing label data structure for each device type.
*/
available$: Behavior<Map<string, Label>>;
/**
* The selected device.
* The active device selection.
* Can be `undefined` when no device is yet selected.
*
* When defined, `Selected` contains the selected device ID plus any
* type-specific metadata.
*/
selected$: Behavior<Selected | undefined>;
/**
* Selects a new device.
* Requests selection of a device by ID.
*
* Implementations typically persist this preference and let `selected$`
* converge to the effective device (which may differ if the requested ID is
* unavailable).
*/
select(id: string): void;
}
/**
* An observable that represents if we should display the devices menu for iOS.
*
* This implies the following
* - hide any input devices (they do not work anyhow on ios)
* - Show a button to show the native output picker instead.
@@ -95,7 +118,7 @@ export interface MediaDevice<Label, Selected> {
export const iosDeviceMenu$ =
platform === "ios" ? constant(true) : alwaysShowIphoneEarpieceSetting.value$;
function availableRawDevices$(
export function availableRawDevices$(
kind: MediaDeviceKind,
usingNames$: Behavior<boolean>,
scope: ObservableScope,
@@ -146,16 +169,23 @@ function selectDevice$<Label>(
): Observable<string | undefined> {
return combineLatest([available$, preferredId$], (available, preferredId) => {
if (available.size) {
// If the preferred device is available, use it. Or if every available
// device ID is falsy, the browser is probably just being paranoid about
// fingerprinting and we should still try using the preferred device.
// Worst case it is not available and the browser will gracefully fall
// back to some other device for us when requesting the media stream.
// Otherwise, select the first available device.
return (preferredId !== undefined && available.has(preferredId)) ||
(available.size === 1 && available.has(""))
? preferredId
: available.keys().next().value;
if (preferredId !== undefined && available.has(preferredId)) {
// If the preferred device is available, use it.
return preferredId;
} else if (available.size === 1 && available.has("")) {
// In some cases the enumerateDevices will list the devices with empty string details:
// `{deviceId:'', kind:'audiooutput|audioinput|videoinput', label:'', groupId:''}`
// This can happen when:
// 1. The user has not yet granted permissions to microphone/devices
// 2. The page is not running in a secure context (e.g. localhost or https)
// 3. In embedded WebViews, restrictions are often tighter, need active capture..
// 3. The browser is blocking access to device details for privacy reasons (?)
// This is most likely transitional, so keep the current device selected until we get a more accurate enumerateDevices.
return preferredId;
} else {
// No preferred, so pick a default.
return available.keys().next().value;
}
}
return undefined;
});
@@ -212,7 +242,7 @@ class AudioInput implements MediaDevice<DeviceLabel, SelectedAudioInputDevice> {
}
}
class AudioOutput implements MediaDevice<
export class AudioOutput implements MediaDevice<
AudioOutputDeviceLabel,
SelectedAudioOutputDevice
> {
@@ -251,14 +281,16 @@ class AudioOutput implements MediaDevice<
public readonly selected$ = this.scope.behavior(
selectDevice$(this.available$, audioOutputSetting.value$).pipe(
map((id) =>
id === undefined
? undefined
: {
map((id) => {
if (id === undefined) {
return undefined;
} else {
return {
id,
virtualEarpiece: false,
},
),
};
}
}),
),
);
public select(id: string): void {
@@ -275,103 +307,6 @@ class AudioOutput implements MediaDevice<
}
}
class ControlledAudioOutput implements MediaDevice<
AudioOutputDeviceLabel,
SelectedAudioOutputDevice
> {
private logger = rootLogger.getChild("[MediaDevices ControlledAudioOutput]");
// We need to subscribe to the raw devices so that the OS does update the input
// back to what it was before. otherwise we will switch back to the default
// whenever we allocate a new stream.
public readonly availableRaw$ = availableRawDevices$(
"audiooutput",
this.usingNames$,
this.scope,
this.logger,
);
public readonly available$ = this.scope.behavior(
combineLatest(
[controlledAvailableOutputDevices$.pipe(startWith([])), iosDeviceMenu$],
(availableRaw, iosDeviceMenu) => {
const available = new Map<string, AudioOutputDeviceLabel>(
availableRaw.map(
({ id, name, isEarpiece, isSpeaker /*,isExternalHeadset*/ }) => {
let deviceLabel: AudioOutputDeviceLabel;
// if (isExternalHeadset) // Do we want this?
if (isEarpiece) deviceLabel = { type: "earpiece" };
else if (isSpeaker) deviceLabel = { type: "speaker" };
else deviceLabel = { type: "name", name };
return [id, deviceLabel];
},
),
);
// Create a virtual earpiece device in case a non-earpiece device is
// designated for this purpose
if (iosDeviceMenu && availableRaw.some((d) => d.forEarpiece))
available.set(EARPIECE_CONFIG_ID, { type: "earpiece" });
return available;
},
),
);
private readonly deviceSelection$ = new Subject<string>();
public select(id: string): void {
this.deviceSelection$.next(id);
}
public readonly selected$ = this.scope.behavior(
combineLatest(
[
this.available$,
merge(
controlledOutputSelection$.pipe(startWith(undefined)),
this.deviceSelection$,
),
],
(available, preferredId) => {
const id = preferredId ?? available.keys().next().value;
return id === undefined
? undefined
: { id, virtualEarpiece: id === EARPIECE_CONFIG_ID };
},
),
);
public constructor(
private readonly usingNames$: Behavior<boolean>,
private readonly scope: ObservableScope,
) {
this.selected$.subscribe((device) => {
// Let the hosting application know which output device has been selected.
// This information is probably only of interest if the earpiece mode has
// been selected - for example, Element X iOS listens to this to determine
// whether it should enable the proximity sensor.
if (device !== undefined) {
this.logger.info(
"[controlled-output] onAudioDeviceSelect called:",
device,
);
window.controls.onAudioDeviceSelect?.(device.id);
// Also invoke the deprecated callback for backward compatibility
window.controls.onOutputDeviceSelect?.(device.id);
}
});
this.available$.subscribe((available) => {
this.logger.info("[controlled-output] available devices:", available);
});
this.availableRaw$.subscribe((availableRaw) => {
this.logger.info(
"[controlled-output] available raw devices:",
availableRaw,
);
});
}
}
class VideoInput implements MediaDevice<DeviceLabel, SelectedDevice> {
private logger = rootLogger.getChild("[MediaDevices VideoInput]");
@@ -434,7 +369,14 @@ export class MediaDevices {
AudioOutputDeviceLabel,
SelectedAudioOutputDevice
> = getUrlParams().controlledAudioDevices
? new ControlledAudioOutput(this.usingNames$, this.scope)
? platform == "android"
? new AndroidControlledAudioOutput(
controlledAvailableOutputDevices$,
this.scope,
getUrlParams().callIntent,
window.controls,
)
: new IOSControlledAudioOutput(this.usingNames$, this.scope)
: new AudioOutput(this.usingNames$, this.scope);
public readonly videoInput: MediaDevice<DeviceLabel, SelectedDevice> =

View File

@@ -93,6 +93,7 @@ export const initializeWidget = (
logger.info("Widget API is available");
const api = new WidgetApi(widgetId, parentOrigin);
api.requestCapability(MatrixCapabilities.AlwaysOnScreen);
api.requestCapability(MatrixCapabilities.MSC4039DownloadFile);
// Set up the lazy action emitter, but only for select actions that we
// intend for the app to handle

View File

@@ -7,6 +7,7 @@ Please see LICENSE in the repository root for full details.
import {
loadEnv,
PluginOption,
searchForWorkspaceRoot,
type ConfigEnv,
type UserConfig,
@@ -33,8 +34,7 @@ export default ({
// In future we might be able to do what is needed via code splitting at
// build time.
process.env.VITE_PACKAGE = packageType ?? "full";
const plugins = [
const plugins: PluginOption[] = [
react(),
svgrPlugin({
svgrOptions: {

View File

@@ -6236,8 +6236,8 @@ __metadata:
linkType: hard
"@vector-im/compound-design-tokens@npm:^6.0.0":
version: 6.6.0
resolution: "@vector-im/compound-design-tokens@npm:6.6.0"
version: 6.10.2
resolution: "@vector-im/compound-design-tokens@npm:6.10.2"
peerDependencies:
"@types/react": "*"
react: ^17 || ^18 || ^19.0.0
@@ -6246,13 +6246,13 @@ __metadata:
optional: true
react:
optional: true
checksum: 10c0/93b152dd1de96371f9b6b1f7dbcc381d7ab598031dbc900f52d610f015766c0d4426ae6e47d417e723bfb62d1a53099155b4d788848b78232916ba132c03c2fe
checksum: 10c0/bcac6d79fcfb8cc1356d65dff576bdad217edd0df189a5dea032b0fd57cef335b73ad6d8e395709245bc1c6a8c672a83144ecea48550ca560544d2399af8f2d3
languageName: node
linkType: hard
"@vector-im/compound-web@npm:^8.0.0":
version: 8.3.4
resolution: "@vector-im/compound-web@npm:8.3.4"
version: 8.4.0
resolution: "@vector-im/compound-web@npm:8.4.0"
dependencies:
"@floating-ui/react": "npm:^0.27.0"
"@radix-ui/react-context-menu": "npm:^2.2.16"
@@ -6272,7 +6272,7 @@ __metadata:
peerDependenciesMeta:
"@types/react":
optional: true
checksum: 10c0/44764fa64b5fce2e7181e25b50ee970eda4d921cf650b92bd5e88df0eb60872f3086b8702d18f55c3e39b3751ac19f10bafda8c4306df65c3605bd44b297d95c
checksum: 10c0/31b73555c47b373d4250872bfe863a030b487197bf1198e3cf3a1ec344f2b02f0c72c1513bb598c1cbd7a91d3c6a334d0c8ae37bd7c90d4859c864fc223e059a
languageName: node
linkType: hard