openvidu-browser: Use Track.contentHint to configure WebRTC Sender

pull/699/head
Juan Navarro 2022-02-03 16:49:01 +01:00
parent f10649b40c
commit b5e645f13d
2 changed files with 81 additions and 72 deletions

View File

@ -27,6 +27,7 @@ import { OpenViduError, OpenViduErrorName } from '../OpenViduInternal/Enums/Open
import { VideoInsertMode } from '../OpenViduInternal/Enums/VideoInsertMode'; import { VideoInsertMode } from '../OpenViduInternal/Enums/VideoInsertMode';
import { OpenViduLogger } from '../OpenViduInternal/Logger/OpenViduLogger'; import { OpenViduLogger } from '../OpenViduInternal/Logger/OpenViduLogger';
import { PlatformUtils } from '../OpenViduInternal/Utils/Platform'; import { PlatformUtils } from '../OpenViduInternal/Utils/Platform';
import { TypeOfVideo } from '../OpenViduInternal/Enums/TypeOfVideo';
/** /**
* @hidden * @hidden
@ -398,6 +399,37 @@ export class Publisher extends StreamManager {
mediaStream.getVideoTracks()[0].enabled = enabled; mediaStream.getVideoTracks()[0].enabled = enabled;
} }
// Set Content Hint on all MediaStreamTracks
for (const track of mediaStream.getAudioTracks()) {
if (!track.contentHint?.length) {
// contentHint for audio: "", "speech", "speech-recognition", "music".
// https://w3c.github.io/mst-content-hint/#audio-content-hints
track.contentHint = "";
logger.info(`Audio track Content Hint set: '${track.contentHint}'`);
}
}
for (const track of mediaStream.getVideoTracks()) {
if (!track.contentHint?.length) {
// contentHint for video: "", "motion", "detail", "text".
// https://w3c.github.io/mst-content-hint/#video-content-hints
switch (this.stream.typeOfVideo) {
case TypeOfVideo.SCREEN:
track.contentHint = "detail";
break;
case TypeOfVideo.CUSTOM:
logger.warn("CUSTOM type video track was provided without Content Hint!");
track.contentHint = "motion";
break;
case TypeOfVideo.CAMERA:
case TypeOfVideo.IPCAM:
default:
track.contentHint = "motion";
break;
}
logger.info(`Video track Content Hint set: '${track.contentHint}'`);
}
}
this.initializeVideoReference(mediaStream); this.initializeVideoReference(mediaStream);
if (!this.stream.displayMyRemote()) { if (!this.stream.displayMyRemote()) {
@ -443,6 +475,7 @@ export class Publisher extends StreamManager {
this.stream.isLocalStreamReadyToPublish = true; this.stream.isLocalStreamReadyToPublish = true;
this.stream.ee.emitEvent('stream-ready-to-publish', []); this.stream.ee.emitEvent('stream-ready-to-publish', []);
} }
return resolve(); return resolve();
}; };
@ -606,7 +639,7 @@ export class Publisher extends StreamManager {
/** /**
* @hidden * @hidden
* *
* To obtain the videoDimensions we wait for the video reference to have enough metadata * To obtain the videoDimensions we wait for the video reference to have enough metadata
* and then try to use MediaStreamTrack.getSettingsMethod(). If not available, then we * and then try to use MediaStreamTrack.getSettingsMethod(). If not available, then we
* use the HTMLVideoElement properties videoWidth and videoHeight * use the HTMLVideoElement properties videoWidth and videoHeight

View File

@ -175,76 +175,55 @@ export class WebRtcPeer {
streams: [this.configuration.mediaStream], streams: [this.configuration.mediaStream],
}; };
if (track.kind === "audio") { if (track.kind === "video" && this.configuration.simulcast) {
if ("contentHint" in track) { // Check if the requested size is enough to ask for 3 layers.
// For audio: "", "speech", "speech-recognition", "music". const trackSettings = track.getSettings();
// https://w3c.github.io/mst-content-hint/#audio-content-hints const trackConsts = track.getConstraints();
track.contentHint = "";
logger.info(`[createOffer] Audio track Content Hint set: '${track.contentHint}'`); const trackWidth: number =
} trackSettings.width ??
} else if (track.kind === "video") { (trackConsts.width as ConstrainULongRange).ideal ??
if ("contentHint" in track) { (trackConsts.width as number) ??
// For video: "", "motion", "detail", "text". 0;
// https://w3c.github.io/mst-content-hint/#video-content-hints const trackHeight: number =
if (this.configuration.typeOfVideo === TypeOfVideo.SCREEN) { trackSettings.height ??
track.contentHint = "detail"; (trackConsts.height as ConstrainULongRange).ideal ??
} else { (trackConsts.height as number) ??
track.contentHint = "motion"; 0;
} logger.info(`[createOffer] Video track dimensions: ${trackWidth}x${trackHeight}`);
logger.info(`[createOffer] Video track Content Hint set: '${track.contentHint}'`);
const trackPixels = trackWidth * trackHeight;
let maxLayers = 0;
if (trackPixels >= 960 * 540) {
maxLayers = 3;
} else if (trackPixels >= 480 * 270) {
maxLayers = 2;
} else {
maxLayers = 1;
} }
if (this.configuration.simulcast) { tcInit.sendEncodings = [];
// Check if the requested size is enough to ask for 3 layers. for (let l = 0; l < maxLayers; l++) {
const trackSettings = track.getSettings(); const layerDiv = 2 ** (maxLayers - l - 1);
const trackConsts = track.getConstraints();
const trackWidth: number = const encoding: RTCRtpEncodingParameters = {
trackSettings.width ?? rid: "rDiv" + layerDiv.toString(),
(trackConsts.width as ConstrainULongRange).ideal ??
(trackConsts.width as number) ??
0;
const trackHeight: number =
trackSettings.height ??
(trackConsts.height as ConstrainULongRange).ideal ??
(trackConsts.height as number) ??
0;
logger.info(`[createOffer] Video track dimensions: ${trackWidth}x${trackHeight}`);
const trackPixels = trackWidth * trackHeight; // @ts-ignore -- Property missing from DOM types.
let maxLayers = 0; scalabilityMode: "L1T1",
if (trackPixels >= 960 * 540) { };
maxLayers = 3;
} else if (trackPixels >= 480 * 270) { if (["detail", "text"].includes(track.contentHint)) {
maxLayers = 2; // Prioritize best resolution, for maximum picture detail.
encoding.scaleResolutionDownBy = 1.0;
// @ts-ignore -- Property missing from DOM types.
encoding.maxFramerate = Math.floor(30 / layerDiv);
} else { } else {
maxLayers = 1; encoding.scaleResolutionDownBy = layerDiv;
} }
tcInit.sendEncodings = []; tcInit.sendEncodings.push(encoding);
for (let l = 0; l < maxLayers; l++) {
const layerDiv = 2 ** (maxLayers - l - 1);
const encoding: RTCRtpEncodingParameters = {
rid: "rDiv" + layerDiv.toString(),
// @ts-ignore -- Property missing from DOM types.
scalabilityMode: "L1T1",
};
if (this.configuration.typeOfVideo === TypeOfVideo.SCREEN) {
// Prioritize best resolution, for maximum picture detail.
encoding.scaleResolutionDownBy = 1.0;
// @ts-ignore -- Property missing from DOM types.
encoding.maxFramerate = Math.floor(30 / layerDiv);
// encoding.maxFramerate = (l === 2) ? 30 : Math.floor(30 / (2 * layerDiv)); // TESTING
} else {
encoding.scaleResolutionDownBy = layerDiv;
}
tcInit.sendEncodings.push(encoding);
}
} }
} }
@ -254,19 +233,16 @@ export class WebRtcPeer {
let sendParams = tc.sender.getParameters(); let sendParams = tc.sender.getParameters();
let needSetParams = false; let needSetParams = false;
if (!("degradationPreference" in sendParams)) { if (!sendParams.degradationPreference?.length) {
logger.debug(`[createOffer] RTCRtpSendParameters.degradationPreference attribute not present`); // degradationPreference for video: "balanced", "maintain-framerate", "maintain-resolution".
// Asked about why this might happen. Check it: // https://www.w3.org/TR/2018/CR-webrtc-20180927/#dom-rtcdegradationpreference
// https://groups.google.com/g/discuss-webrtc/c/R8Xug-irfRY if (["detail", "text"].includes(track.contentHint)) {
// For video: "balanced", "maintain-framerate", "maintain-resolution".
if (this.configuration.typeOfVideo === TypeOfVideo.SCREEN) {
sendParams.degradationPreference = "maintain-resolution"; sendParams.degradationPreference = "maintain-resolution";
} else { } else {
sendParams.degradationPreference = "balanced"; sendParams.degradationPreference = "balanced";
} }
logger.debug( logger.info(
`[createOffer] Video sender Degradation Preference set: ${sendParams.degradationPreference}` `[createOffer] Video sender Degradation Preference set: ${sendParams.degradationPreference}`
); );