Fix joining calls without audio or video inputs (#9486)

The lobby view was requesting a stream with both video and audio, even if the system lacked video or audio devices. Requesting one of audio or video is enough to get all device labels.
This commit is contained in:
Robin 2022-10-24 09:03:05 -04:00 committed by GitHub
parent eafc2d23a7
commit daf097e123
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -33,7 +33,7 @@ import MatrixClientContext from "../../../contexts/MatrixClientContext";
import AppTile from "../elements/AppTile"; import AppTile from "../elements/AppTile";
import { _t } from "../../../languageHandler"; import { _t } from "../../../languageHandler";
import { useAsyncMemo } from "../../../hooks/useAsyncMemo"; import { useAsyncMemo } from "../../../hooks/useAsyncMemo";
import MediaDeviceHandler, { MediaDeviceKindEnum } from "../../../MediaDeviceHandler"; import MediaDeviceHandler from "../../../MediaDeviceHandler";
import { CallStore } from "../../../stores/CallStore"; import { CallStore } from "../../../stores/CallStore";
import IconizedContextMenu, { import IconizedContextMenu, {
IconizedContextMenuOption, IconizedContextMenuOption,
@ -141,36 +141,38 @@ export const Lobby: FC<LobbyProps> = ({ room, joinCallButtonDisabled, joinCallBu
}, [videoMuted, setVideoMuted]); }, [videoMuted, setVideoMuted]);
const [videoStream, audioInputs, videoInputs] = useAsyncMemo(async () => { const [videoStream, audioInputs, videoInputs] = useAsyncMemo(async () => {
let previewStream: MediaStream; let devices = await MediaDeviceHandler.getDevices();
// We get the preview stream before requesting devices: this is because
// we need (in some browsers) an active media stream in order to get
// non-blank labels for the devices.
let stream: MediaStream | null = null;
try { try {
// We get the preview stream before requesting devices: this is because if (devices.audioinput.length > 0) {
// we need (in some browsers) an active media stream in order to get // Holding just an audio stream will be enough to get us all device labels, so
// non-blank labels for the devices. According to the docs, we // if video is muted, don't bother requesting video.
// need a stream of each type (audio + video) if we want to enumerate stream = await navigator.mediaDevices.getUserMedia({
// audio & video devices, although this didn't seem to be the case audio: true,
// in practice for me. We request both anyway. video: !videoMuted && devices.videoinput.length > 0 && { deviceId: videoInputId },
// For similar reasons, we also request a stream even if video is muted, });
// which could be a bit strange but allows us to get the device list } else if (devices.videoinput.length > 0) {
// reliably. One option could be to try & get devices without a stream, // We have to resort to a video stream, even if video is supposed to be muted.
// then try again with a stream if we get blank deviceids, but... ew. stream = await navigator.mediaDevices.getUserMedia({ video: { deviceId: videoInputId } });
previewStream = await navigator.mediaDevices.getUserMedia({ }
video: { deviceId: videoInputId },
audio: { deviceId: MediaDeviceHandler.getAudioInput() },
});
} catch (e) { } catch (e) {
logger.error(`Failed to get stream for device ${videoInputId}`, e); logger.error(`Failed to get stream for device ${videoInputId}`, e);
} }
const devices = await MediaDeviceHandler.getDevices(); // Refresh the devices now that we hold a stream
if (stream !== null) devices = await MediaDeviceHandler.getDevices();
// If video is muted, we don't actually want the stream, so we can get rid of // If video is muted, we don't actually want the stream, so we can get rid of it now.
// it now.
if (videoMuted) { if (videoMuted) {
previewStream.getTracks().forEach(t => t.stop()); stream?.getTracks().forEach(t => t.stop());
previewStream = undefined; stream = null;
} }
return [previewStream, devices[MediaDeviceKindEnum.AudioInput], devices[MediaDeviceKindEnum.VideoInput]]; return [stream, devices.audioinput, devices.videoinput];
}, [videoInputId, videoMuted], [null, [], []]); }, [videoInputId, videoMuted], [null, [], []]);
const setAudioInput = useCallback((device: MediaDeviceInfo) => { const setAudioInput = useCallback((device: MediaDeviceInfo) => {
@ -188,7 +190,7 @@ export const Lobby: FC<LobbyProps> = ({ room, joinCallButtonDisabled, joinCallBu
videoElement.play(); videoElement.play();
return () => { return () => {
videoStream?.getTracks().forEach(track => track.stop()); videoStream.getTracks().forEach(track => track.stop());
videoElement.srcObject = null; videoElement.srcObject = null;
}; };
} }
@ -358,7 +360,7 @@ const JoinCallView: FC<JoinCallViewProps> = ({ room, resizing, call }) => {
lobby = <Lobby lobby = <Lobby
room={room} room={room}
connect={connect} connect={connect}
joinCallButtonTooltip={joinCallButtonTooltip} joinCallButtonTooltip={joinCallButtonTooltip ?? undefined}
joinCallButtonDisabled={joinCallButtonDisabled} joinCallButtonDisabled={joinCallButtonDisabled}
> >
{ facePile } { facePile }