mirror of https://github.com/vladmandic/human
compatibility notes
parent
6cb2aeaa16
commit
15db4b4b46
12
README.md
12
README.md
|
@ -229,7 +229,8 @@ config = {
|
|||
scoped: false, // enable scoped runs
|
||||
// some models *may* have memory leaks, this wrapps everything in a local scope at a cost of performance
|
||||
// typically not needed
|
||||
filter: {
|
||||
videoOptimized: true, // perform additional optimizations when input is video, must be disabled for images
|
||||
filter: { // note: image filters are only available in Browser environments and not in NodeJS as they require WebGL for processing
|
||||
enabled: true, // enable image pre-processing filters
|
||||
return: true, // return processed canvas imagedata in result
|
||||
brightness: 0, // range: -1 (darken) to 1 (lighten)
|
||||
|
@ -432,6 +433,15 @@ For performance details, see output of `result.performance` object during runtim
|
|||
|
||||
<hr>
|
||||
|
||||
## Limitations
|
||||
|
||||
`Human` library can be used in any modern Browser or NodeJS environment, but there are several items to be aware of:
|
||||
|
||||
- **NodeJS**: Due to a missing feature in `tfjs-node`, only some models are available <https://github.com/tensorflow/tfjs/issues/4066>
|
||||
- **Browser**: `filters` module cannot be used when using web workers <https://github.com/phoboslab/WebGLImageFilter/issues/27>
|
||||
|
||||
<hr>
|
||||
|
||||
## Credits
|
||||
|
||||
- Face Detection: [**MediaPipe BlazeFace**](https://drive.google.com/file/d/1f39lSzU5Oq-j_OXgS67KfN5wNsoeAZ4V/view)
|
||||
|
|
|
@ -7,6 +7,7 @@ export default {
|
|||
scoped: false, // enable scoped runs
|
||||
// some models *may* have memory leaks, this wrapps everything in a local scope at a cost of performance
|
||||
// typically not needed
|
||||
videoOptimized: true, // perform additional optimizations when input is video, must be disabled for images
|
||||
filter: {
|
||||
enabled: true, // enable image pre-processing filters
|
||||
return: true, // return processed canvas imagedata in result
|
||||
|
|
|
@ -28,6 +28,7 @@ const ui = {
|
|||
const config = {
|
||||
backend: 'webgl', // if you want to use 'wasm' backend, enable script load of tf and tf-backend-wasm in index.html
|
||||
filter: { enabled: true, brightness: 0, contrast: 0, sharpness: 0, blur: 0, saturation: 0, hue: 0, negative: false, sepia: false, vintage: false, kodachrome: false, technicolor: false, polaroid: false, pixelate: 0 },
|
||||
videoOptimized: true,
|
||||
face: {
|
||||
enabled: true,
|
||||
detector: { maxFaces: 10, skipFrames: 10, minConfidence: 0.5, iouThreshold: 0.3, scoreThreshold: 0.7 },
|
||||
|
|
|
@ -13,7 +13,7 @@ async function drawFace(result, canvas, ui, triangulation) {
|
|||
// silly hack since fillText does not suport new line
|
||||
const labels = [];
|
||||
if (face.agConfidence) labels.push(`${Math.trunc(100 * face.agConfidence)}% ${face.gender || ''}`);
|
||||
if (face.age) labels.push(`age:${face.age || ''}`);
|
||||
if (face.age) labels.push(`age: ${face.age || ''}`);
|
||||
if (face.iris) labels.push(`iris: ${face.iris}`);
|
||||
if (face.emotion && face.emotion[0]) labels.push(`${Math.trunc(100 * face.emotion[0].score)}% ${face.emotion[0].emotion}`);
|
||||
ctx.fillStyle = ui.baseLabel;
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import human from '../dist/human.esm.js';
|
||||
|
||||
let config;
|
||||
let busy = false;
|
||||
|
||||
const log = (...msg) => {
|
||||
// eslint-disable-next-line no-console
|
||||
|
@ -8,6 +9,8 @@ const log = (...msg) => {
|
|||
};
|
||||
|
||||
onmessage = async (msg) => {
|
||||
if (busy) return;
|
||||
busy = true;
|
||||
// worker.postMessage({ image: image.data.buffer, width: canvas.width, height: canvas.height, config }, [image.data.buffer]);
|
||||
const image = new ImageData(new Uint8ClampedArray(msg.data.image), msg.data.width, msg.data.height);
|
||||
config = msg.data.config;
|
||||
|
@ -19,4 +22,5 @@ onmessage = async (msg) => {
|
|||
log('Worker thread error:', err.message);
|
||||
}
|
||||
postMessage(result);
|
||||
busy = false;
|
||||
};
|
||||
|
|
|
@ -5685,6 +5685,7 @@ var require_config = __commonJS((exports2) => {
|
|||
backend: "webgl",
|
||||
console: true,
|
||||
scoped: false,
|
||||
videoOptimized: true,
|
||||
filter: {
|
||||
enabled: true,
|
||||
return: true,
|
||||
|
@ -5776,7 +5777,7 @@ var require_config = __commonJS((exports2) => {
|
|||
var require_package = __commonJS((exports2, module2) => {
|
||||
module2.exports = {
|
||||
name: "@vladmandic/human",
|
||||
version: "0.3.8",
|
||||
version: "0.3.9",
|
||||
description: "human: 3D Face Detection, Iris Tracking and Age & Gender Prediction",
|
||||
sideEffects: false,
|
||||
main: "dist/human.cjs",
|
||||
|
@ -5813,7 +5814,7 @@ var require_package = __commonJS((exports2, module2) => {
|
|||
rimraf: "^3.0.2"
|
||||
},
|
||||
scripts: {
|
||||
start: "node --trace-warnings --unhandled-rejections=strict --trace-uncaught --no-deprecation demo/node.js",
|
||||
start: "node --trace-warnings --unhandled-rejections=strict --trace-uncaught --no-deprecation src/node.js",
|
||||
lint: "eslint src/*.js demo/*.js",
|
||||
"build-iife": "esbuild --bundle --platform=browser --sourcemap --target=esnext --format=iife --minify --external:fs --global-name=human --metafile=dist/human.json --outfile=dist/human.js src/human.js",
|
||||
"build-esm-bundle": "esbuild --bundle --platform=browser --sourcemap --target=esnext --format=esm --minify --external:fs --metafile=dist/human.esm.json --outfile=dist/human.esm.js src/human.js",
|
||||
|
@ -5906,15 +5907,6 @@ function mergeDeep(...objects) {
|
|||
function sanity(input) {
|
||||
if (!input)
|
||||
return "input is not defined";
|
||||
if (!(input instanceof tf.Tensor) || tf.ENV.flags.IS_BROWSER && (input instanceof ImageData || input instanceof HTMLImageElement || input instanceof HTMLCanvasElement || input instanceof HTMLVideoElement || input instanceof HTMLMediaElement)) {
|
||||
const width = input.naturalWidth || input.videoWidth || input.width || input.shape && input.shape[1] > 0;
|
||||
if (!width || width === 0)
|
||||
return "input is empty";
|
||||
}
|
||||
if (tf.ENV.flags.IS_BROWSER && (input instanceof HTMLVideoElement || input instanceof HTMLMediaElement)) {
|
||||
if (input.readyState && input.readyState <= 2)
|
||||
return "input is not ready";
|
||||
}
|
||||
if (tf.ENV.flags.IS_NODE && !(input instanceof tf.Tensor)) {
|
||||
return "input must be a tensor";
|
||||
}
|
||||
|
@ -5957,14 +5949,14 @@ function tfImage(input) {
|
|||
let filtered;
|
||||
if (tf.ENV.flags.IS_BROWSER && config.filter.enabled && !(input instanceof tf.Tensor)) {
|
||||
const width = input.naturalWidth || input.videoWidth || input.width || input.shape && input.shape[1] > 0;
|
||||
const height = input.naturalHeight || input.videoHeight || input.Height || input.shape && input.shape[2] > 0;
|
||||
if (!offscreenCanvas) {
|
||||
offscreenCanvas = document.createElement("canvas");
|
||||
offscreenCanvas.width = width;
|
||||
offscreenCanvas.height = height;
|
||||
}
|
||||
const height = input.naturalHeight || input.videoHeight || input.height || input.shape && input.shape[2] > 0;
|
||||
if (!offscreenCanvas)
|
||||
offscreenCanvas = new OffscreenCanvas(width, height);
|
||||
const ctx = offscreenCanvas.getContext("2d");
|
||||
ctx.drawImage(input, 0, 0, width, height, 0, 0, offscreenCanvas.width, offscreenCanvas.height);
|
||||
if (input instanceof ImageData)
|
||||
ctx.putImageData(input, 0, 0);
|
||||
else
|
||||
ctx.drawImage(input, 0, 0, width, height, 0, 0, offscreenCanvas.width, offscreenCanvas.height);
|
||||
if (!fx)
|
||||
fx = new fxImage.Canvas();
|
||||
else
|
||||
|
@ -6015,8 +6007,9 @@ async function detect(input, userConfig = {}) {
|
|||
const perf = {};
|
||||
let timeStamp;
|
||||
timeStamp = now();
|
||||
const shouldOverride = tf.ENV.flags.IS_NODE || tf.ENV.flags.IS_BROWSER && !(input instanceof HTMLVideoElement || input instanceof HTMLMediaElement);
|
||||
config = mergeDeep(defaults, userConfig, shouldOverride ? override : {});
|
||||
config = mergeDeep(defaults, userConfig);
|
||||
if (!config.videoOptimized)
|
||||
config = mergeDeep(config, override);
|
||||
perf.config = Math.trunc(now() - timeStamp);
|
||||
timeStamp = now();
|
||||
state = "check";
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
{
|
||||
"inputs": {
|
||||
"config.js": {
|
||||
"bytes": 5828,
|
||||
"bytes": 5942,
|
||||
"imports": []
|
||||
},
|
||||
"package.json": {
|
||||
"bytes": 2635,
|
||||
"bytes": 2634,
|
||||
"imports": []
|
||||
},
|
||||
"src/emotion/emotion.js": {
|
||||
|
@ -116,7 +116,7 @@
|
|||
"imports": []
|
||||
},
|
||||
"src/human.js": {
|
||||
"bytes": 11161,
|
||||
"bytes": 10488,
|
||||
"imports": [
|
||||
{
|
||||
"path": "src/facemesh/facemesh.js"
|
||||
|
@ -260,7 +260,7 @@
|
|||
"dist/human.cjs.map": {
|
||||
"imports": [],
|
||||
"inputs": {},
|
||||
"bytes": 251682
|
||||
"bytes": 250765
|
||||
},
|
||||
"dist/human.cjs": {
|
||||
"imports": [],
|
||||
|
@ -350,16 +350,16 @@
|
|||
"bytesInOutput": 20197
|
||||
},
|
||||
"config.js": {
|
||||
"bytesInOutput": 2173
|
||||
"bytesInOutput": 2199
|
||||
},
|
||||
"package.json": {
|
||||
"bytesInOutput": 2778
|
||||
"bytesInOutput": 2777
|
||||
},
|
||||
"src/human.js": {
|
||||
"bytesInOutput": 9977
|
||||
"bytesInOutput": 9246
|
||||
}
|
||||
},
|
||||
"bytes": 154404
|
||||
"bytes": 153698
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -1,11 +1,11 @@
|
|||
{
|
||||
"inputs": {
|
||||
"config.js": {
|
||||
"bytes": 5828,
|
||||
"bytes": 5942,
|
||||
"imports": []
|
||||
},
|
||||
"package.json": {
|
||||
"bytes": 2635,
|
||||
"bytes": 2634,
|
||||
"imports": []
|
||||
},
|
||||
"src/emotion/emotion.js": {
|
||||
|
@ -116,7 +116,7 @@
|
|||
"imports": []
|
||||
},
|
||||
"src/human.js": {
|
||||
"bytes": 11161,
|
||||
"bytes": 10488,
|
||||
"imports": [
|
||||
{
|
||||
"path": "src/facemesh/facemesh.js"
|
||||
|
@ -260,7 +260,7 @@
|
|||
"dist/human.esm-nobundle.js.map": {
|
||||
"imports": [],
|
||||
"inputs": {},
|
||||
"bytes": 227481
|
||||
"bytes": 226578
|
||||
},
|
||||
"dist/human.esm-nobundle.js": {
|
||||
"imports": [],
|
||||
|
@ -350,16 +350,16 @@
|
|||
"bytesInOutput": 11088
|
||||
},
|
||||
"config.js": {
|
||||
"bytesInOutput": 1306
|
||||
"bytesInOutput": 1324
|
||||
},
|
||||
"package.json": {
|
||||
"bytesInOutput": 2305
|
||||
"bytesInOutput": 2304
|
||||
},
|
||||
"src/human.js": {
|
||||
"bytesInOutput": 5719
|
||||
"bytesInOutput": 5180
|
||||
}
|
||||
},
|
||||
"bytes": 81206
|
||||
"bytes": 80684
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -1,7 +1,7 @@
|
|||
{
|
||||
"inputs": {
|
||||
"config.js": {
|
||||
"bytes": 5828,
|
||||
"bytes": 5942,
|
||||
"imports": []
|
||||
},
|
||||
"node_modules/@tensorflow/tfjs-backend-cpu/dist/tf-backend-cpu.node.js": {
|
||||
|
@ -149,7 +149,7 @@
|
|||
]
|
||||
},
|
||||
"package.json": {
|
||||
"bytes": 2635,
|
||||
"bytes": 2634,
|
||||
"imports": []
|
||||
},
|
||||
"src/emotion/emotion.js": {
|
||||
|
@ -291,7 +291,7 @@
|
|||
"imports": []
|
||||
},
|
||||
"src/human.js": {
|
||||
"bytes": 11161,
|
||||
"bytes": 10488,
|
||||
"imports": [
|
||||
{
|
||||
"path": "node_modules/@tensorflow/tfjs/dist/tf.node.js"
|
||||
|
@ -468,7 +468,7 @@
|
|||
"dist/human.esm.js.map": {
|
||||
"imports": [],
|
||||
"inputs": {},
|
||||
"bytes": 4988533
|
||||
"bytes": 4987630
|
||||
},
|
||||
"dist/human.esm.js": {
|
||||
"imports": [],
|
||||
|
@ -615,16 +615,16 @@
|
|||
"bytesInOutput": 11089
|
||||
},
|
||||
"config.js": {
|
||||
"bytesInOutput": 1307
|
||||
"bytesInOutput": 1325
|
||||
},
|
||||
"package.json": {
|
||||
"bytesInOutput": 2306
|
||||
"bytesInOutput": 2305
|
||||
},
|
||||
"src/human.js": {
|
||||
"bytesInOutput": 5876
|
||||
"bytesInOutput": 5335
|
||||
}
|
||||
},
|
||||
"bytes": 1118154
|
||||
"bytes": 1117630
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -1,7 +1,7 @@
|
|||
{
|
||||
"inputs": {
|
||||
"config.js": {
|
||||
"bytes": 5828,
|
||||
"bytes": 5942,
|
||||
"imports": []
|
||||
},
|
||||
"node_modules/@tensorflow/tfjs-backend-cpu/dist/tf-backend-cpu.node.js": {
|
||||
|
@ -149,7 +149,7 @@
|
|||
]
|
||||
},
|
||||
"package.json": {
|
||||
"bytes": 2635,
|
||||
"bytes": 2634,
|
||||
"imports": []
|
||||
},
|
||||
"src/emotion/emotion.js": {
|
||||
|
@ -291,7 +291,7 @@
|
|||
"imports": []
|
||||
},
|
||||
"src/human.js": {
|
||||
"bytes": 11161,
|
||||
"bytes": 10488,
|
||||
"imports": [
|
||||
{
|
||||
"path": "node_modules/@tensorflow/tfjs/dist/tf.node.js"
|
||||
|
@ -468,7 +468,7 @@
|
|||
"dist/human.js.map": {
|
||||
"imports": [],
|
||||
"inputs": {},
|
||||
"bytes": 4988533
|
||||
"bytes": 4987630
|
||||
},
|
||||
"dist/human.js": {
|
||||
"imports": [],
|
||||
|
@ -615,16 +615,16 @@
|
|||
"bytesInOutput": 11089
|
||||
},
|
||||
"config.js": {
|
||||
"bytesInOutput": 1307
|
||||
"bytesInOutput": 1325
|
||||
},
|
||||
"package.json": {
|
||||
"bytesInOutput": 2306
|
||||
"bytesInOutput": 2305
|
||||
},
|
||||
"src/human.js": {
|
||||
"bytesInOutput": 5876
|
||||
"bytesInOutput": 5335
|
||||
}
|
||||
},
|
||||
"bytes": 1118163
|
||||
"bytes": 1117639
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
22
src/human.js
22
src/human.js
|
@ -74,15 +74,6 @@ function mergeDeep(...objects) {
|
|||
|
||||
function sanity(input) {
|
||||
if (!input) return 'input is not defined';
|
||||
if (!(input instanceof tf.Tensor)
|
||||
|| (tf.ENV.flags.IS_BROWSER
|
||||
&& (input instanceof ImageData || input instanceof HTMLImageElement || input instanceof HTMLCanvasElement || input instanceof HTMLVideoElement || input instanceof HTMLMediaElement))) {
|
||||
const width = input.naturalWidth || input.videoWidth || input.width || (input.shape && (input.shape[1] > 0));
|
||||
if (!width || (width === 0)) return 'input is empty';
|
||||
}
|
||||
if (tf.ENV.flags.IS_BROWSER && (input instanceof HTMLVideoElement || input instanceof HTMLMediaElement)) {
|
||||
if (input.readyState && (input.readyState <= 2)) return 'input is not ready';
|
||||
}
|
||||
if (tf.ENV.flags.IS_NODE && !(input instanceof tf.Tensor)) {
|
||||
return 'input must be a tensor';
|
||||
}
|
||||
|
@ -127,15 +118,18 @@ function tfImage(input) {
|
|||
let filtered;
|
||||
if (tf.ENV.flags.IS_BROWSER && config.filter.enabled && !(input instanceof tf.Tensor)) {
|
||||
const width = input.naturalWidth || input.videoWidth || input.width || (input.shape && (input.shape[1] > 0));
|
||||
const height = input.naturalHeight || input.videoHeight || input.Height || (input.shape && (input.shape[2] > 0));
|
||||
// if (!offscreenCanvas) offscreenCanvas = new OffscreenCanvas(width, height);
|
||||
const height = input.naturalHeight || input.videoHeight || input.height || (input.shape && (input.shape[2] > 0));
|
||||
if (!offscreenCanvas) offscreenCanvas = new OffscreenCanvas(width, height);
|
||||
/*
|
||||
if (!offscreenCanvas) {
|
||||
offscreenCanvas = document.createElement('canvas');
|
||||
offscreenCanvas.width = width;
|
||||
offscreenCanvas.height = height;
|
||||
}
|
||||
*/
|
||||
const ctx = offscreenCanvas.getContext('2d');
|
||||
ctx.drawImage(input, 0, 0, width, height, 0, 0, offscreenCanvas.width, offscreenCanvas.height);
|
||||
if (input instanceof ImageData) ctx.putImageData(input, 0, 0);
|
||||
else ctx.drawImage(input, 0, 0, width, height, 0, 0, offscreenCanvas.width, offscreenCanvas.height);
|
||||
if (!fx) fx = new fxImage.Canvas();
|
||||
else fx.reset();
|
||||
fx.addFilter('brightness', config.filter.brightness); // must have at least one filter enabled
|
||||
|
@ -173,8 +167,8 @@ async function detect(input, userConfig = {}) {
|
|||
let timeStamp;
|
||||
|
||||
timeStamp = now();
|
||||
const shouldOverride = tf.ENV.flags.IS_NODE || (tf.ENV.flags.IS_BROWSER && !((input instanceof HTMLVideoElement) || (input instanceof HTMLMediaElement)));
|
||||
config = mergeDeep(defaults, userConfig, shouldOverride ? override : {});
|
||||
config = mergeDeep(defaults, userConfig);
|
||||
if (!config.videoOptimized) config = mergeDeep(config, override);
|
||||
perf.config = Math.trunc(now() - timeStamp);
|
||||
|
||||
// sanity checks
|
||||
|
|
Loading…
Reference in New Issue