summaryrefslogtreecommitdiff
path: root/client-web/source
diff options
context:
space:
mode:
Diffstat (limited to 'client-web/source')
-rw-r--r--client-web/source/preferences/decl.ts2
-rw-r--r--client-web/source/resource/track.ts35
2 files changed, 35 insertions, 2 deletions
diff --git a/client-web/source/preferences/decl.ts b/client-web/source/preferences/decl.ts
index effd885..f3f8e84 100644
--- a/client-web/source/preferences/decl.ts
+++ b/client-web/source/preferences/decl.ts
@@ -32,6 +32,8 @@ export const PREF_DECLS = {
camera_facing_mode: { type: optional(string), possible_values: ["environment", "user"], description: "Prefer user-facing or env-facing camera" },
auto_gain_control: { type: bool, description: "Automatically adjust mic gain" },
echo_cancellation: { type: bool, description: "Cancel echo" },
+ audio_activity_threshold: { type: number, optional: true, default: 0.003, description: "Audio activity threshold" },
+
// TODO differenciate between mic, cam and screen
optional_audio_default_enable: { type: bool, default: true, description: "Enable audio tracks by default" },
optional_video_default_enable: { type: bool, default: false, description: "Enable video tracks by default" },
diff --git a/client-web/source/resource/track.ts b/client-web/source/resource/track.ts
index 7d53522..58157cf 100644
--- a/client-web/source/resource/track.ts
+++ b/client-web/source/resource/track.ts
@@ -62,24 +62,55 @@ export function new_local_track(info: ProvideInfo, track: TrackHandle): LocalRes
}
function create_track_display(track: TrackHandle): HTMLElement {
- const el = document.createElement("div")
const is_video = track.kind == "video"
- const media_el = is_video ? document.createElement("video") : document.createElement("audio")
+ const is_audio = track.kind == "audio"
+
const stream = new MediaStream([track.track])
+
+ const el = document.createElement("div")
+
+ const media_el = is_video
+ ? document.createElement("video")
+ : document.createElement("audio")
+
media_el.srcObject = stream
media_el.classList.add("media")
media_el.autoplay = true
media_el.controls = true
media_el.addEventListener("pause", () => media_el.play())
+
if (track.local) media_el.muted = true
el.append(media_el)
track.addEventListener("ended", () => {
media_el.srcObject = null // TODO // TODO figure out why i wrote todo here
el.remove()
})
+
+ if (is_audio && PREFS.audio_activity_threshold !== undefined) check_volume(stream, vol => {
+ const active = vol > PREFS.audio_activity_threshold
+ if (active != el.classList.contains("audio-active")) {
+ if (active) el.classList.add("audio-active")
+ else el.classList.remove("audio-active")
+ }
+ })
+
return el
}
+function check_volume(track: MediaStream, cb: (vol: number) => void) {
+ const ctx = new AudioContext();
+ const s = ctx.createMediaStreamSource(track)
+ const a = ctx.createAnalyser()
+ s.connect(a)
+ const samples = new Float32Array(a.fftSize);
+ setInterval(() => {
+ a.getFloatTimeDomainData(samples);
+ let sum = 0.0;
+ for (const amplitude of samples) { sum += amplitude * amplitude; }
+ cb(Math.sqrt(sum / samples.length))
+ }, 1000 / 15)
+}
+
export async function create_camera_res() {
log("media", "requesting user media (camera)")
const user_media = await window.navigator.mediaDevices.getUserMedia({