mirror of https://github.com/vladmandic/human
add dynamic viewport and fix web worker
parent
a306378b3b
commit
1d6c72318b
|
@ -1,6 +1,6 @@
|
|||
# @vladmandic/human
|
||||
|
||||
Version: **1.3.3**
|
||||
Version: **1.3.4**
|
||||
Description: **Human: AI-powered 3D Face Detection, Face Description & Recognition, Body Pose Tracking, Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction & Gesture Recognition**
|
||||
|
||||
Author: **Vladimir Mandic <mandic00@live.com>**
|
||||
|
@ -9,9 +9,13 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
|
|||
|
||||
## Changelog
|
||||
|
||||
### **HEAD -> main** 2021/04/04 mandic00@live.com
|
||||
### **HEAD -> main** 2021/04/05 mandic00@live.com
|
||||
|
||||
|
||||
### **1.3.4** 2021/04/04 mandic00@live.com
|
||||
|
||||
- implement webhint
|
||||
|
||||
### **1.3.3** 2021/04/03 mandic00@live.com
|
||||
|
||||
- fix linting and tests
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
<head>
|
||||
<title>Human</title>
|
||||
<meta http-equiv="content-type" content="text/html; charset=utf-8">
|
||||
<meta name="viewport" content="width=device-width, shrink-to-fit=yes">
|
||||
<meta name="viewport" content="width=device-width">
|
||||
<meta name="keywords" content="Human">
|
||||
<meta name="application-name" content="Human">
|
||||
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||
|
@ -19,12 +19,12 @@
|
|||
body { margin: 0; background: black; color: white; overflow-x: hidden }
|
||||
body::-webkit-scrollbar { display: none; }
|
||||
hr { width: 100%; }
|
||||
.play { position: absolute; width: 250px; height: 250px; z-index: 9; top: 55%; left: 50%; margin-left: -125px; display: none; }
|
||||
.play { position: absolute; width: 250px; height: 250px; z-index: 9; bottom: 15%; left: 50%; margin-left: -125px; display: none; }
|
||||
.btn-background { fill:grey; cursor: pointer; opacity: 0.6; }
|
||||
.btn-background:hover { opacity: 1; }
|
||||
.btn-foreground { fill:white; cursor: pointer; opacity: 0.8; }
|
||||
.btn-foreground:hover { opacity: 1; }
|
||||
.status { position: absolute; width: 100vw; bottom: 15%; text-align: center; font-size: 4rem; font-weight: 100; text-shadow: 2px 2px darkslategrey; }
|
||||
.status { position: absolute; width: 100vw; bottom: 10%; text-align: center; font-size: 4rem; font-weight: 100; text-shadow: 2px 2px darkslategrey; }
|
||||
.thumbnail { margin: 8px; box-shadow: 0 0 4px 4px dimgrey; }
|
||||
.thumbnail:hover { box-shadow: 0 0 8px 8px dimgrey; filter: grayscale(1); }
|
||||
.log { position: absolute; bottom: 0; margin: 0.4rem; font-size: 0.9rem; }
|
||||
|
|
|
@ -189,6 +189,9 @@ async function drawResults(input) {
|
|||
async function setupCamera() {
|
||||
if (ui.busy) return null;
|
||||
ui.busy = true;
|
||||
const viewportScale = Math.min(1, Math.round(100 * window.outerWidth / 700) / 100);
|
||||
log('demo viewport scale:', viewportScale);
|
||||
document.querySelector('meta[name=viewport]').setAttribute('content', `width=device-width, shrink-to-fit=no; initial-scale=${viewportScale}`);
|
||||
const video = document.getElementById('video');
|
||||
const canvas = document.getElementById('canvas');
|
||||
const output = document.getElementById('log');
|
||||
|
@ -569,6 +572,7 @@ async function main() {
|
|||
const res = await human.warmup(userConfig); // this is not required, just pre-warms all models for faster initial inference
|
||||
if (res && res.canvas && ui.drawWarmup) await drawWarmup(res);
|
||||
}
|
||||
await setupCamera();
|
||||
status('human: ready');
|
||||
document.getElementById('loader').style.display = 'none';
|
||||
document.getElementById('play').style.display = 'block';
|
||||
|
|
|
@ -18,13 +18,13 @@ export function process(input, config): { tensor: tf.Tensor, canvas: OffscreenCa
|
|||
if (!input) throw new Error('Human: Input is missing');
|
||||
if (
|
||||
!(input instanceof tf.Tensor)
|
||||
&& !(input instanceof Image)
|
||||
&& !(input instanceof ImageData)
|
||||
&& !(input instanceof ImageBitmap)
|
||||
&& !(input instanceof HTMLImageElement)
|
||||
&& !(input instanceof HTMLVideoElement)
|
||||
&& !(input instanceof HTMLCanvasElement)
|
||||
&& !(input instanceof OffscreenCanvas)
|
||||
&& !(typeof Image !== 'undefined' && input instanceof Image)
|
||||
&& !(typeof ImageData !== 'undefined' && input instanceof ImageData)
|
||||
&& !(typeof ImageBitmap !== 'undefined' && input instanceof ImageBitmap)
|
||||
&& !(typeof HTMLImageElement !== 'undefined' && input instanceof HTMLImageElement)
|
||||
&& !(typeof HTMLVideoElement !== 'undefined' && input instanceof HTMLVideoElement)
|
||||
&& !(typeof HTMLCanvasElement !== 'undefined' && input instanceof HTMLCanvasElement)
|
||||
&& !(typeof OffscreenCanvas !== 'undefined' && input instanceof OffscreenCanvas)
|
||||
) {
|
||||
throw new Error('Human: Input type is not recognized');
|
||||
}
|
||||
|
@ -107,14 +107,21 @@ export function process(input, config): { tensor: tf.Tensor, canvas: OffscreenCa
|
|||
if (fx) fx = null;
|
||||
}
|
||||
let pixels;
|
||||
if (outCanvas.data) {
|
||||
if (outCanvas.data) { // if we have data, just convert to tensor
|
||||
const shape = [outCanvas.height, outCanvas.width, 3];
|
||||
pixels = tf.tensor3d(outCanvas.data, shape, 'int32');
|
||||
} else if ((config.backend === 'webgl') || (outCanvas instanceof ImageData)) {
|
||||
// tf kernel-optimized method to get imagedata, also if input is imagedata, just use it
|
||||
} else if (outCanvas instanceof ImageData) { // if input is imagedata, just use it
|
||||
pixels = tf.browser.fromPixels(outCanvas);
|
||||
} else {
|
||||
// cpu and wasm kernel does not implement efficient fromPixels method nor we can use canvas as-is, so we do a silly one more canvas
|
||||
} else if (config.backend === 'webgl' || config.backend === 'humangl') { // tf kernel-optimized method to get imagedata
|
||||
// we can use canvas as-is as it already has a context, so we do a silly one more canvas
|
||||
const tempCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement('canvas');
|
||||
tempCanvas.width = targetWidth;
|
||||
tempCanvas.height = targetHeight;
|
||||
const tempCtx = tempCanvas.getContext('2d');
|
||||
tempCtx?.drawImage(outCanvas, 0, 0);
|
||||
pixels = tf.browser.fromPixels(tempCanvas);
|
||||
} else { // cpu and wasm kernel does not implement efficient fromPixels method
|
||||
// we can use canvas as-is as it already has a context, so we do a silly one more canvas
|
||||
const tempCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement('canvas');
|
||||
tempCanvas.width = targetWidth;
|
||||
tempCanvas.height = targetHeight;
|
||||
|
|
Loading…
Reference in New Issue