mirror of https://github.com/vladmandic/human
implement webrtc
parent
83ae207d88
commit
2605c5bcb8
|
@ -1,6 +1,6 @@
|
|||
# @vladmandic/human
|
||||
|
||||
Version: **1.4.1**
|
||||
Version: **1.4.2**
|
||||
Description: **Human: AI-powered 3D Face Detection & Rotation Tracking, Face Description & Recognition, Body Pose Tracking, 3D Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction, Gesture Recognition**
|
||||
|
||||
Author: **Vladimir Mandic <mandic00@live.com>**
|
||||
|
@ -9,8 +9,9 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
|
|||
|
||||
## Changelog
|
||||
|
||||
### **HEAD -> main** 2021/04/10 mandic00@live.com
|
||||
### **1.4.2** 2021/04/12 mandic00@live.com
|
||||
|
||||
- added support for multiple instances of human
|
||||
- fix typedoc
|
||||
- exception handling
|
||||
|
||||
|
|
20
README.md
20
README.md
|
@ -125,6 +125,26 @@ For details, including how to use `Browser ESM` version or `NodeJS` version of `
|
|||
|
||||
<br>
|
||||
|
||||
## Inputs
|
||||
|
||||
`Human` library can process all known input types:
|
||||
|
||||
- `Image`, `ImageData`, `ImageBitmap`, `Canvas`, `OffscreenCanvas`, `Tensor`,
|
||||
- `HTMLImageElement`, `HTMLCanvasElement`, `HTMLVideoElement`, `HTMLMediaElement`
|
||||
|
||||
Additionally, `HTMLVideoElement`, `HTMLMediaElement` can be a standard `<video>` tag that links to:
|
||||
|
||||
- WebCam on user's system
|
||||
- Any supported video type
|
||||
For example: `.mp4`, `.avi`, etc.
|
||||
- Additional video types supported via *HTML5 Media Source Extensions*
|
||||
Live streaming examples:
|
||||
- **HLS** (*HTTP Live Streaming*) using `hls.js`
|
||||
- **DASH** (Dynamic Adaptive Streaming over HTTP) using `dash.js`
|
||||
- **WebRTC** media track
|
||||
|
||||
<br>
|
||||
|
||||
## Example
|
||||
|
||||
Example simple app that uses Human to process video input and
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
const UICSS = `
|
||||
#gl-bench { position: absolute; right: 1rem; bottom: 1rem; z-index:1000; -webkit-user-select: none; -moz-user-select: none; user-select: none; }
|
||||
#gl-bench div { position: relative; display: block; margin: 4px; padding: 0 2px 0 2px; background: darkslategray; border-radius: 0.1rem; cursor: pointer; opacity: 0.9; }
|
||||
#gl-bench div { position: relative; display: block; margin: 4px; padding: 0 2px 0 2px; background: #303030; border-radius: 0.1rem; cursor: pointer; opacity: 0.9; }
|
||||
#gl-bench svg { height: 60px; margin: 0 0px 0px 4px; }
|
||||
#gl-bench text { font-size: 16px; font-family: 'Lato', 'Segoe UI'; dominant-baseline: middle; text-anchor: middle; }
|
||||
#gl-bench .gl-mem { font-size: 12px; fill: white; }
|
||||
|
|
|
@ -2,8 +2,8 @@ let instance = 0;
|
|||
let CSScreated = false;
|
||||
|
||||
let theme = {
|
||||
background: 'darkslategray',
|
||||
hover: 'lightgray',
|
||||
background: '#303030',
|
||||
hover: '#505050',
|
||||
itemBackground: 'black',
|
||||
itemColor: 'white',
|
||||
buttonBackground: 'lightblue',
|
||||
|
@ -19,10 +19,9 @@ function createCSS() {
|
|||
if (CSScreated) return;
|
||||
const css = `
|
||||
:root { --rounded: 0.1rem; }
|
||||
.menu { position: absolute; top: 0rem; right: 0; width: max-content; padding: 0 0.2rem 0 0.2rem; line-height: 1.8rem; z-index: 10;
|
||||
box-shadow: 0 0 8px dimgrey; background: ${theme.background}; border-radius: var(--rounded); border-color: black; border-style: solid; border-width: thin; }
|
||||
.menu { position: absolute; top: 0rem; right: 0; width: max-content; padding: 0 0.2rem 0 0.2rem; line-height: 1.8rem; z-index: 10; background: ${theme.background}; border: none }
|
||||
|
||||
.menu:hover { box-shadow: 0 0 8px ${theme.hover}; }
|
||||
.menu:hover { background: ${theme.hover}; }
|
||||
.menu-container { display: block; max-height: 100vh; }
|
||||
.menu-container-fadeout { max-height: 0; overflow: hidden; transition: max-height, 0.5s ease; }
|
||||
.menu-container-fadein { max-height: 100vh; overflow: hidden; transition: max-height, 0.5s ease; }
|
||||
|
@ -62,7 +61,7 @@ function createCSS() {
|
|||
input[type=range]::-webkit-slider-thumb { border: 1px solid #000000; margin-top: 0.05rem; height: 0.9rem; width: 1rem; border-radius: var(--rounded); background: ${theme.rangeBackground}; cursor: pointer; -webkit-appearance: none; }
|
||||
input[type=range]::-moz-range-thumb { border: 1px solid #000000; margin-top: 0.05rem; height: 0.9rem; width: 1rem; border-radius: var(--rounded); background: ${theme.rangeBackground}; cursor: pointer; -webkit-appearance: none; }
|
||||
|
||||
.svg-background { fill:darkslategrey; cursor:pointer; opacity: 0.6; }
|
||||
.svg-background { fill:#303030; cursor:pointer; opacity: 0.6; }
|
||||
.svg-foreground { fill:white; cursor:pointer; opacity: 0.8; }
|
||||
`;
|
||||
const el = document.createElement('style');
|
||||
|
@ -88,10 +87,10 @@ class Menu {
|
|||
this.menu.id = `menu-${instance}`;
|
||||
this.menu.className = 'menu';
|
||||
if (position) {
|
||||
if (position.top) this.menu.style.top = position.top;
|
||||
if (position.bottom) this.menu.style.bottom = position.bottom;
|
||||
if (position.left) this.menu.style.left = position.left;
|
||||
if (position.right) this.menu.style.right = position.right;
|
||||
if (position.top) this.menu.style.top = `${position.top}`;
|
||||
if (position.bottom) this.menu.style.bottom = `${position.bottom}`;
|
||||
if (position.left) this.menu.style.left = `${position.left}`;
|
||||
if (position.right) this.menu.style.right = `${position.right}`;
|
||||
}
|
||||
|
||||
this.container = document.createElement('div');
|
||||
|
@ -118,7 +117,7 @@ class Menu {
|
|||
|
||||
this.menu.appendChild(this.container);
|
||||
if (typeof parent === 'object') parent.appendChild(this.menu);
|
||||
else document.getElementById(parent).appendChild(this.menu);
|
||||
else document.getElementById(parent)?.appendChild(this.menu);
|
||||
}
|
||||
|
||||
get newID() {
|
||||
|
|
|
@ -0,0 +1,68 @@
|
|||
const debug = true;
|
||||
|
||||
async function log(...msg) {
|
||||
if (debug) {
|
||||
const dt = new Date();
|
||||
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
|
||||
// eslint-disable-next-line no-console
|
||||
console.log(ts, 'webrtc', ...msg);
|
||||
}
|
||||
}
|
||||
|
||||
async function webRTC(server, streamName, elementName) {
|
||||
const suuid = streamName;
|
||||
log('client starting');
|
||||
log(`server: ${server} stream: ${suuid}`);
|
||||
const stream = new MediaStream();
|
||||
const connection = new RTCPeerConnection();
|
||||
connection.oniceconnectionstatechange = () => log('connection', connection.iceConnectionState);
|
||||
connection.onnegotiationneeded = async () => {
|
||||
const offer = await connection.createOffer();
|
||||
await connection.setLocalDescription(offer);
|
||||
const res = await fetch(`${server}/stream/receiver/${suuid}`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8' },
|
||||
body: new URLSearchParams({
|
||||
suuid: `${suuid}`,
|
||||
data: `${btoa(connection.localDescription?.sdp || '')}`,
|
||||
}),
|
||||
});
|
||||
const data = res && res.ok ? await res.text() : '';
|
||||
if (data.length === 0) {
|
||||
log('cannot connect:', server);
|
||||
} else {
|
||||
connection.setRemoteDescription(new RTCSessionDescription({
|
||||
type: 'answer',
|
||||
sdp: atob(data),
|
||||
}));
|
||||
log('negotiation start:', offer);
|
||||
}
|
||||
};
|
||||
connection.ontrack = (event) => {
|
||||
stream.addTrack(event.track);
|
||||
const video = (typeof elementName === 'string') ? document.getElementById(elementName) : elementName;
|
||||
// @ts-ignore
|
||||
if (video instanceof HTMLVideoElement) video.srcObject = stream;
|
||||
else log('element is not a video element:', elementName);
|
||||
// video.onloadeddata = async () => log('resolution:', video.videoWidth, video.videoHeight);
|
||||
log('received track:', event.track);
|
||||
};
|
||||
|
||||
const res = await fetch(`${server}/stream/codec/${suuid}`);
|
||||
const streams = res && res.ok ? await res.json() : [];
|
||||
if (streams.length === 0) log('received no streams');
|
||||
else log('received streams:', streams);
|
||||
for (const s of streams) connection.addTransceiver(s.Type, { direction: 'sendrecv' });
|
||||
|
||||
const channel = connection.createDataChannel(suuid, { maxRetransmits: 10 });
|
||||
channel.onmessage = (e) => log('channel message:', channel.label, 'payload', e.data);
|
||||
channel.onerror = (e) => log('channel error:', channel.label, 'payload', e);
|
||||
// channel.onbufferedamountlow = (e) => log('channel buffering:', channel.label, 'payload', e);
|
||||
channel.onclose = () => log('channel close', channel.label);
|
||||
channel.onopen = () => {
|
||||
log('channel open', channel.label);
|
||||
setInterval(() => channel.send('ping'), 1000); // send ping becouse PION doesn't handle RTCSessionDescription.close()
|
||||
};
|
||||
}
|
||||
|
||||
export default webRTC;
|
|
@ -25,11 +25,11 @@
|
|||
.btn-background:hover { opacity: 1; }
|
||||
.btn-foreground { fill:white; cursor: pointer; opacity: 0.8; }
|
||||
.btn-foreground:hover { opacity: 1; }
|
||||
.status { position: absolute; width: 100vw; bottom: 10%; text-align: center; font-size: 4rem; font-weight: 100; text-shadow: 2px 2px darkslategrey; }
|
||||
.status { position: absolute; width: 100vw; bottom: 10%; text-align: center; font-size: 4rem; font-weight: 100; text-shadow: 2px 2px #303030; }
|
||||
.thumbnail { margin: 8px; box-shadow: 0 0 4px 4px dimgrey; }
|
||||
.thumbnail:hover { box-shadow: 0 0 8px 8px dimgrey; filter: grayscale(1); }
|
||||
.log { position: absolute; bottom: 0; margin: 0.4rem; font-size: 0.9rem; }
|
||||
.menubar { width: 100vw; background: darkslategray; display: flex; justify-content: space-evenly; text-align: center; padding: 8px; cursor: pointer; }
|
||||
.menubar { width: 100vw; background: #303030; display: flex; justify-content: space-evenly; text-align: center; padding: 8px; cursor: pointer; }
|
||||
.samples-container { display: flex; flex-wrap: wrap; }
|
||||
.video { display: none; }
|
||||
.canvas { margin: 0 auto; }
|
||||
|
|
|
@ -5,6 +5,7 @@ import Human from '../dist/human.esm.js'; // equivalent of @vladmandic/human
|
|||
// import Human from '../dist/human.esm-nobundle.js'; // this requires that tf is loaded manually and bundled before human can be used
|
||||
import Menu from './helpers/menu.js';
|
||||
import GLBench from './helpers/gl-bench.js';
|
||||
import webRTC from './helpers/webrtc.js';
|
||||
|
||||
const userConfig = { backend: 'webgl' }; // add any user configuration overrides
|
||||
let human;
|
||||
|
@ -42,6 +43,9 @@ const ui = {
|
|||
worker: 'index-worker.js',
|
||||
samples: ['../assets/sample6.jpg', '../assets/sample1.jpg', '../assets/sample4.jpg', '../assets/sample5.jpg', '../assets/sample3.jpg', '../assets/sample2.jpg'],
|
||||
compare: '../assets/sample-me.jpg',
|
||||
useWebRTC: false, // use webrtc as camera source instead of local webcam
|
||||
webRTCServer: 'http://localhost:8002',
|
||||
webRTCStream: 'reowhite',
|
||||
console: true, // log messages to browser console
|
||||
maxFPSframes: 10, // keep fps history for how many frames
|
||||
modelsPreload: true, // preload human models on startup
|
||||
|
@ -52,7 +56,7 @@ const ui = {
|
|||
camera: {}, // internal, holds details of webcam details
|
||||
detectFPS: [], // internal, holds fps values for detection performance
|
||||
drawFPS: [], // internal, holds fps values for draw performance
|
||||
buffered: true, // experimental, should output be buffered between frames
|
||||
buffered: true, // should output be buffered between frames
|
||||
drawWarmup: false, // debug only, should warmup image processing be displayed on startup
|
||||
drawThread: null, // internl, perform draw operations in a separate thread
|
||||
detectThread: null, // internl, perform detect operations in a separate thread
|
||||
|
@ -198,6 +202,18 @@ async function setupCamera() {
|
|||
const video = document.getElementById('video');
|
||||
const canvas = document.getElementById('canvas');
|
||||
const output = document.getElementById('log');
|
||||
if (ui.useWebRTC) {
|
||||
status('setting up webrtc connection');
|
||||
try {
|
||||
video.onloadeddata = () => ui.camera = { name: ui.webRTCStream, width: video.videoWidth, height: video.videoHeight, facing: 'default' };
|
||||
await webRTC(ui.webRTCServer, ui.webRTCStream, video);
|
||||
} catch (err) {
|
||||
log(err);
|
||||
} finally {
|
||||
status('');
|
||||
}
|
||||
return '';
|
||||
}
|
||||
const live = video.srcObject ? ((video.srcObject.getVideoTracks()[0].readyState === 'live') && (video.readyState > 2) && (!video.paused)) : false;
|
||||
let msg = '';
|
||||
status('setting up camera');
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -62,8 +62,8 @@
|
|||
"@tensorflow/tfjs-node": "^3.3.0",
|
||||
"@tensorflow/tfjs-node-gpu": "^3.3.0",
|
||||
"@types/node": "^14.14.37",
|
||||
"@typescript-eslint/eslint-plugin": "^4.21.0",
|
||||
"@typescript-eslint/parser": "^4.21.0",
|
||||
"@typescript-eslint/eslint-plugin": "^4.22.0",
|
||||
"@typescript-eslint/parser": "^4.22.0",
|
||||
"@vladmandic/pilogger": "^0.2.16",
|
||||
"chokidar": "^3.5.1",
|
||||
"dayjs": "^1.10.4",
|
||||
|
|
|
@ -149,7 +149,8 @@ const config: Config = {
|
|||
// typically not needed
|
||||
videoOptimized: true, // perform additional optimizations when input is video,
|
||||
// must be disabled for images
|
||||
// basically this skips object box boundary detection for every n frames
|
||||
// automatically disabled for Image, ImageData, ImageBitmap and Tensor inputs
|
||||
// skips boundary detection for every n frames
|
||||
// while maintaining in-box detection since objects cannot move that fast
|
||||
warmup: 'face', // what to use for human.warmup(), can be 'none', 'face', 'full'
|
||||
// warmup pre-initializes all models for faster inference but can take
|
||||
|
|
2
wiki
2
wiki
|
@ -1 +1 @@
|
|||
Subproject commit 3539f10bcdd6d6d5b68bbca77969825ff8ffe00d
|
||||
Subproject commit bd0cfa7ff3eaf40cb114b45f5b16f88b9d213de8
|
Loading…
Reference in New Issue