add dynamic viewport and fix web worker

pull/134/head
Vladimir Mandic 2021-04-05 11:48:24 -04:00
parent 18586f5a9e
commit da8e441ad9
14 changed files with 214 additions and 199 deletions

View File

@ -1,6 +1,6 @@
# @vladmandic/human # @vladmandic/human
Version: **1.3.3** Version: **1.3.4**
Description: **Human: AI-powered 3D Face Detection, Face Description & Recognition, Body Pose Tracking, Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction & Gesture Recognition** Description: **Human: AI-powered 3D Face Detection, Face Description & Recognition, Body Pose Tracking, Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction & Gesture Recognition**
Author: **Vladimir Mandic <mandic00@live.com>** Author: **Vladimir Mandic <mandic00@live.com>**
@ -9,9 +9,13 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
## Changelog ## Changelog
### **HEAD -> main** 2021/04/04 mandic00@live.com ### **HEAD -> main** 2021/04/05 mandic00@live.com
### **1.3.4** 2021/04/04 mandic00@live.com
- implement webhint
### **1.3.3** 2021/04/03 mandic00@live.com ### **1.3.3** 2021/04/03 mandic00@live.com
- fix linting and tests - fix linting and tests

View File

@ -3,7 +3,7 @@
<head> <head>
<title>Human</title> <title>Human</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8"> <meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="viewport" content="width=device-width, shrink-to-fit=yes"> <meta name="viewport" content="width=device-width">
<meta name="keywords" content="Human"> <meta name="keywords" content="Human">
<meta name="application-name" content="Human"> <meta name="application-name" content="Human">
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>"> <meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
@ -19,12 +19,12 @@
body { margin: 0; background: black; color: white; overflow-x: hidden } body { margin: 0; background: black; color: white; overflow-x: hidden }
body::-webkit-scrollbar { display: none; } body::-webkit-scrollbar { display: none; }
hr { width: 100%; } hr { width: 100%; }
.play { position: absolute; width: 250px; height: 250px; z-index: 9; top: 55%; left: 50%; margin-left: -125px; display: none; } .play { position: absolute; width: 250px; height: 250px; z-index: 9; bottom: 15%; left: 50%; margin-left: -125px; display: none; }
.btn-background { fill:grey; cursor: pointer; opacity: 0.6; } .btn-background { fill:grey; cursor: pointer; opacity: 0.6; }
.btn-background:hover { opacity: 1; } .btn-background:hover { opacity: 1; }
.btn-foreground { fill:white; cursor: pointer; opacity: 0.8; } .btn-foreground { fill:white; cursor: pointer; opacity: 0.8; }
.btn-foreground:hover { opacity: 1; } .btn-foreground:hover { opacity: 1; }
.status { position: absolute; width: 100vw; bottom: 15%; text-align: center; font-size: 4rem; font-weight: 100; text-shadow: 2px 2px darkslategrey; } .status { position: absolute; width: 100vw; bottom: 10%; text-align: center; font-size: 4rem; font-weight: 100; text-shadow: 2px 2px darkslategrey; }
.thumbnail { margin: 8px; box-shadow: 0 0 4px 4px dimgrey; } .thumbnail { margin: 8px; box-shadow: 0 0 4px 4px dimgrey; }
.thumbnail:hover { box-shadow: 0 0 8px 8px dimgrey; filter: grayscale(1); } .thumbnail:hover { box-shadow: 0 0 8px 8px dimgrey; filter: grayscale(1); }
.log { position: absolute; bottom: 0; margin: 0.4rem; font-size: 0.9rem; } .log { position: absolute; bottom: 0; margin: 0.4rem; font-size: 0.9rem; }

View File

@ -189,6 +189,9 @@ async function drawResults(input) {
async function setupCamera() { async function setupCamera() {
if (ui.busy) return null; if (ui.busy) return null;
ui.busy = true; ui.busy = true;
const viewportScale = Math.min(1, Math.round(100 * window.outerWidth / 700) / 100);
log('demo viewport scale:', viewportScale);
document.querySelector('meta[name=viewport]').setAttribute('content', `width=device-width, shrink-to-fit=no; initial-scale=${viewportScale}`);
const video = document.getElementById('video'); const video = document.getElementById('video');
const canvas = document.getElementById('canvas'); const canvas = document.getElementById('canvas');
const output = document.getElementById('log'); const output = document.getElementById('log');
@ -569,6 +572,7 @@ async function main() {
const res = await human.warmup(userConfig); // this is not required, just pre-warms all models for faster initial inference const res = await human.warmup(userConfig); // this is not required, just pre-warms all models for faster initial inference
if (res && res.canvas && ui.drawWarmup) await drawWarmup(res); if (res && res.canvas && ui.drawWarmup) await drawWarmup(res);
} }
await setupCamera();
status('human: ready'); status('human: ready');
document.getElementById('loader').style.display = 'none'; document.getElementById('loader').style.display = 'none';
document.getElementById('play').style.display = 'block'; document.getElementById('play').style.display = 'block';

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

164
dist/human.esm.js vendored

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

164
dist/human.js vendored

File diff suppressed because one or more lines are too long

4
dist/human.js.map vendored

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

6
dist/human.node.js vendored

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -18,13 +18,13 @@ export function process(input, config): { tensor: tf.Tensor, canvas: OffscreenCa
if (!input) throw new Error('Human: Input is missing'); if (!input) throw new Error('Human: Input is missing');
if ( if (
!(input instanceof tf.Tensor) !(input instanceof tf.Tensor)
&& !(input instanceof Image) && !(typeof Image !== 'undefined' && input instanceof Image)
&& !(input instanceof ImageData) && !(typeof ImageData !== 'undefined' && input instanceof ImageData)
&& !(input instanceof ImageBitmap) && !(typeof ImageBitmap !== 'undefined' && input instanceof ImageBitmap)
&& !(input instanceof HTMLImageElement) && !(typeof HTMLImageElement !== 'undefined' && input instanceof HTMLImageElement)
&& !(input instanceof HTMLVideoElement) && !(typeof HTMLVideoElement !== 'undefined' && input instanceof HTMLVideoElement)
&& !(input instanceof HTMLCanvasElement) && !(typeof HTMLCanvasElement !== 'undefined' && input instanceof HTMLCanvasElement)
&& !(input instanceof OffscreenCanvas) && !(typeof OffscreenCanvas !== 'undefined' && input instanceof OffscreenCanvas)
) { ) {
throw new Error('Human: Input type is not recognized'); throw new Error('Human: Input type is not recognized');
} }
@ -107,14 +107,21 @@ export function process(input, config): { tensor: tf.Tensor, canvas: OffscreenCa
if (fx) fx = null; if (fx) fx = null;
} }
let pixels; let pixels;
if (outCanvas.data) { if (outCanvas.data) { // if we have data, just convert to tensor
const shape = [outCanvas.height, outCanvas.width, 3]; const shape = [outCanvas.height, outCanvas.width, 3];
pixels = tf.tensor3d(outCanvas.data, shape, 'int32'); pixels = tf.tensor3d(outCanvas.data, shape, 'int32');
} else if ((config.backend === 'webgl') || (outCanvas instanceof ImageData)) { } else if (outCanvas instanceof ImageData) { // if input is imagedata, just use it
// tf kernel-optimized method to get imagedata, also if input is imagedata, just use it
pixels = tf.browser.fromPixels(outCanvas); pixels = tf.browser.fromPixels(outCanvas);
} else { } else if (config.backend === 'webgl' || config.backend === 'humangl') { // tf kernel-optimized method to get imagedata
// cpu and wasm kernel does not implement efficient fromPixels method nor we can use canvas as-is, so we do a silly one more canvas // we can use canvas as-is as it already has a context, so we do a silly one more canvas
const tempCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement('canvas');
tempCanvas.width = targetWidth;
tempCanvas.height = targetHeight;
const tempCtx = tempCanvas.getContext('2d');
tempCtx?.drawImage(outCanvas, 0, 0);
pixels = tf.browser.fromPixels(tempCanvas);
} else { // cpu and wasm kernel does not implement efficient fromPixels method
// we can use canvas as-is as it already has a context, so we do a silly one more canvas
const tempCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement('canvas'); const tempCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement('canvas');
tempCanvas.width = targetWidth; tempCanvas.width = targetWidth;
tempCanvas.height = targetHeight; tempCanvas.height = targetHeight;