mirror of https://github.com/vladmandic/human
added node build and demo
parent
b19e6372c8
commit
11137f4523
60
README.md
60
README.md
|
@ -5,6 +5,8 @@
|
|||
**Package**: <https://www.npmjs.com/package/@vladmandic/human>
|
||||
**Live Demo**: <https://vladmandic.github.io/human/demo/demo-esm.html>
|
||||
|
||||
Compatible with Browser, WebWorker and NodeJS** execution!
|
||||
|
||||
*Suggestions are welcome!*
|
||||
|
||||
<hr>
|
||||
|
@ -88,13 +90,15 @@ You also need to install and includ `tfjs` in your project
|
|||
|
||||
Install with:
|
||||
```shell
|
||||
npm install @tensorflow/tfjs @vladmandic/human
|
||||
npm install @tensorflow/tfjs-node @vladmandic/human
|
||||
```
|
||||
And then use with:
|
||||
```js
|
||||
import * as tf from '@tensorflow/tfjs';
|
||||
import human from '@vladmandic/Human';
|
||||
const tf = require('@tensorflow/tfjs-node');
|
||||
const human = require('@vladmandic/human');
|
||||
```
|
||||
*See limitations for NodeJS usage under `demo`*
|
||||
|
||||
|
||||
### Weights
|
||||
|
||||
|
@ -108,10 +112,20 @@ If your application resides in a different folder, modify `modelPath` property i
|
|||
|
||||
Demos are included in `/demo`:
|
||||
|
||||
- `demo-esm`: Demo using ESM module
|
||||
- `demo-iife`: Demo using IIFE module
|
||||
Browser:
|
||||
- `demo-esm`: Demo using Browser with ESM module
|
||||
- `demo-iife`: Demo using Browser with IIFE module
|
||||
- `demo-webworker`: Demo using Browser with ESM module and Web Workers
|
||||
*All three following demos are identical, they just illustrate different ways to load and work with `Human` library:*
|
||||
|
||||
Both demos are identical, they just illustrate different ways to load `Human` library
|
||||
NodeJS:
|
||||
- `demo-node`: Demo using NodeJS with CJS module
|
||||
This is a very simple demo as althought `Human` library is compatible with NodeJS execution
|
||||
and is able to load images and models from local filesystem,
|
||||
`tfjs-node` backend does not implement function required for execution of some models
|
||||
|
||||
Currently only body pose detection works while face and hand models are not supported
|
||||
See `tfjs-node` issue <https://github.com/tensorflow/tfjs/issues/4066> for details
|
||||
|
||||
<hr>
|
||||
|
||||
|
@ -219,34 +233,34 @@ Result of `humand.detect()` is a single object that includes data for all enable
|
|||
|
||||
```js
|
||||
result = {
|
||||
face: // <array of detected objects>
|
||||
face: // <array of detected objects>
|
||||
[
|
||||
{
|
||||
confidence: // <number>
|
||||
box: // <array [x, y, width, height]>
|
||||
mesh: // <array of 3D points [x, y, z]> (468 base points & 10 iris points)
|
||||
annotations: // <list of object { landmark: array of points }> (32 base annotated landmarks & 2 iris annotations)
|
||||
iris: // <number> (relative distance of iris to camera, multiple by focal lenght to get actual distance)
|
||||
age: // <number> (estimated age)
|
||||
gender: // <string> (male or female)
|
||||
confidence, // <number>
|
||||
box, // <array [x, y, width, height]>
|
||||
mesh, // <array of 3D points [x, y, z]> (468 base points & 10 iris points)
|
||||
annotations, // <list of object { landmark: array of points }> (32 base annotated landmarks & 2 iris annotations)
|
||||
iris, // <number> (relative distance of iris to camera, multiple by focal lenght to get actual distance)
|
||||
age, // <number> (estimated age)
|
||||
gender, // <string> (male or female)
|
||||
}
|
||||
],
|
||||
body: // <array of detected objects>
|
||||
body: // <array of detected objects>
|
||||
[
|
||||
{
|
||||
score: // <number>,
|
||||
keypoints: // <array of 2D landmarks [ score, landmark, position [x, y] ]> (17 annotated landmarks)
|
||||
score, // <number>,
|
||||
keypoints, // <array of 2D landmarks [ score, landmark, position [x, y] ]> (17 annotated landmarks)
|
||||
}
|
||||
],
|
||||
hand: // <array of detected objects>
|
||||
[
|
||||
{
|
||||
confidence: // <number>,
|
||||
box: // <array [x, y, width, height]>,
|
||||
landmarks: // <array of 3D points [x, y,z]> (21 points)
|
||||
annotations: // <array of 3D landmarks [ landmark: <array of points> ]> (5 annotated landmakrs)
|
||||
confidence, // <number>,
|
||||
box, // <array [x, y, width, height]>,
|
||||
landmarks, // <array of 3D points [x, y,z]> (21 points)
|
||||
annotations, // <array of 3D landmarks [ landmark: <array of points> ]> (5 annotated landmakrs)
|
||||
}
|
||||
]
|
||||
],
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -286,4 +300,6 @@ Library can also be used on mobile devices
|
|||
## Todo
|
||||
|
||||
- Improve detection of smaller faces
|
||||
- Tweak default parameters
|
||||
- Verify age/gender models
|
||||
- Make it work with multiple hands
|
||||
|
|
|
@ -215,9 +215,11 @@ function setupGUI() {
|
|||
settings = QuickSettings.create(10, 10, 'Settings', document.getElementById('main'));
|
||||
settings.addRange('FPS', 0, 100, 0, 1);
|
||||
settings.addBoolean('Pause', false, (val) => {
|
||||
if (val) document.getElementById('video').pause();
|
||||
else document.getElementById('video').play();
|
||||
runHumanDetect();
|
||||
const video = document.getElementById('video');
|
||||
const canvas = document.getElementById('canvas');
|
||||
if (val) video.pause();
|
||||
else video.play();
|
||||
runHumanDetect(video, canvas);
|
||||
});
|
||||
settings.addHTML('line1', '<hr>'); settings.hideTitle('line1');
|
||||
settings.addBoolean('Draw Boxes', false);
|
||||
|
@ -283,10 +285,10 @@ async function setupCamera() {
|
|||
video.srcObject = stream;
|
||||
return new Promise((resolve) => {
|
||||
video.onloadedmetadata = () => {
|
||||
resolve(video);
|
||||
video.width = video.videoWidth;
|
||||
video.height = video.videoHeight;
|
||||
video.play();
|
||||
resolve(video);
|
||||
};
|
||||
});
|
||||
}
|
||||
|
|
|
@ -0,0 +1,68 @@
|
|||
const fs = require('fs');
|
||||
const process = require('process');
|
||||
const console = require('console');
|
||||
const tf = require('@tensorflow/tfjs-node');
|
||||
const human = require('..'); // this would be '@vladmandic/human'
|
||||
|
||||
const logger = new console.Console({
|
||||
stdout: process.stdout,
|
||||
stderr: process.stderr,
|
||||
ignoreErrors: true,
|
||||
groupIndentation: 2,
|
||||
inspectOptions: {
|
||||
showHidden: true,
|
||||
depth: 5,
|
||||
colors: true,
|
||||
showProxy: true,
|
||||
maxArrayLength: 1024,
|
||||
maxStringLength: 10240,
|
||||
breakLength: 200,
|
||||
compact: 64,
|
||||
sorted: false,
|
||||
getters: true,
|
||||
},
|
||||
});
|
||||
|
||||
const config = {
|
||||
face: {
|
||||
enabled: false,
|
||||
detector: { modelPath: 'file://models/blazeface/model.json', inputSize: 128, maxFaces: 10, skipFrames: 5, minConfidence: 0.8, iouThreshold: 0.3, scoreThreshold: 0.75 },
|
||||
mesh: { enabled: true, modelPath: 'file://models/facemesh/model.json', inputSize: 192 },
|
||||
iris: { enabled: true, modelPath: 'file://models/iris/model.json', inputSize: 192 },
|
||||
age: { enabled: true, modelPath: 'file://models/ssrnet-age/imdb/model.json', inputSize: 64, skipFrames: 5 },
|
||||
gender: { enabled: true, modelPath: 'file://models/ssrnet-gender/imdb/model.json' },
|
||||
},
|
||||
body: { enabled: true, modelPath: 'file://models/posenet/model.json', inputResolution: 257, outputStride: 16, maxDetections: 5, scoreThreshold: 0.75, nmsRadius: 20 },
|
||||
hand: {
|
||||
enabled: false,
|
||||
inputSize: 256,
|
||||
skipFrames: 5,
|
||||
minConfidence: 0.8,
|
||||
iouThreshold: 0.3,
|
||||
scoreThreshold: 0.75,
|
||||
detector: { anchors: 'file://models/handdetect/anchors.json', modelPath: 'file://models/handdetect/model.json' },
|
||||
skeleton: { modelPath: 'file://models/handskeleton/model.json' },
|
||||
},
|
||||
};
|
||||
|
||||
async function detect(input, output) {
|
||||
await tf.setBackend('tensorflow');
|
||||
await tf.ready();
|
||||
logger.info('TFJS Flags:', tf.env().features);
|
||||
logger.log('Loading:', input);
|
||||
const buffer = fs.readFileSync(input);
|
||||
const image = tf.node.decodeImage(buffer);
|
||||
logger.log('Processing:', image.shape);
|
||||
const result = await human.detect(image, config);
|
||||
logger.log(result);
|
||||
// Draw detected data and save processed image
|
||||
logger.log('Saving:', output);
|
||||
}
|
||||
|
||||
async function main() {
|
||||
if (process.argv.length !== 4) logger.error('Parameters: <input image> <output image>');
|
||||
else if (!fs.existsSync(process.argv[2])) logger.error(`File not found: ${process.argv[2]}`);
|
||||
else detect(process.argv[2], process.argv[3]);
|
||||
}
|
||||
|
||||
main();
|
|
@ -0,0 +1,22 @@
|
|||
import human from '../dist/human.esm.js';
|
||||
|
||||
onmessage = async (msg) => {
|
||||
const result = await human.detect(msg.data.image, msg.data.config);
|
||||
postMessage(result);
|
||||
};
|
||||
|
||||
/*
|
||||
|
||||
web workers are finicky
|
||||
- cannot pass HTMLImage or HTMLVideo to web worker, so need to pass canvas instead
|
||||
- canvases can execute transferControlToOffscreen() and then become offscreenCanvas which can be passed to worker, but...
|
||||
cannot transfer canvas that has a rendering context (basically, first time you execute getContext() on it)
|
||||
|
||||
which means that if we pass main Canvas that will be used to render results on,
|
||||
then all operations on it must be within webworker and we cannot touch it in the main thread at all.
|
||||
doable, but...how to paint a video frame on it before we pass it?
|
||||
|
||||
and we create new offscreenCanvas that we drew video frame on and pass it's imageData and return results from worker
|
||||
then there is an overhead of creating it and it ends up being slower than executing in the main thread
|
||||
|
||||
*/
|
15
package.json
15
package.json
|
@ -3,7 +3,7 @@
|
|||
"version": "0.2.8",
|
||||
"description": "human: 3D Face Detection, Iris Tracking and Age & Gender Prediction",
|
||||
"sideEffects": false,
|
||||
"main": "src/index.js",
|
||||
"main": "dist/human.node.js",
|
||||
"module": "dist/human.esm.js",
|
||||
"browser": "dist/human.js",
|
||||
"author": "Vladimir Mandic <mandic00@live.com>",
|
||||
|
@ -20,7 +20,8 @@
|
|||
"url": "git+https://github.com/vladmandic/human.git"
|
||||
},
|
||||
"dependencies": {
|
||||
"@tensorflow/tfjs": "^2.6.0"
|
||||
"@tensorflow/tfjs": "^2.6.0",
|
||||
"@tensorflow/tfjs-node": "^2.6.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"esbuild": "^0.7.13",
|
||||
|
@ -33,10 +34,12 @@
|
|||
"rimraf": "^3.0.2"
|
||||
},
|
||||
"scripts": {
|
||||
"lint": "eslint src/*",
|
||||
"build": "rimraf dist/ && npm run build-esm && npm run build-iife",
|
||||
"build-esm": "esbuild --bundle --platform=browser --sourcemap --target=esnext --format=esm --minify --outfile=dist/human.esm.js src/index.js",
|
||||
"build-iife": "esbuild --bundle --platform=browser --sourcemap --target=esnext --format=iife --minify --global-name=human --outfile=dist/human.js src/index.js"
|
||||
"start": "node --trace-warnings --trace-uncaught --no-deprecation demo/demo-node.js",
|
||||
"lint": "eslint src/*.js demo/*.js",
|
||||
"build": "rimraf dist/ && npm run build-esm && npm run build-iife && npm run build-node",
|
||||
"build-esm": "esbuild --bundle --platform=browser --sourcemap --target=esnext --format=esm --minify --external:fs --outfile=dist/human.esm.js src/index.js",
|
||||
"build-iife": "esbuild --bundle --platform=browser --sourcemap --target=esnext --format=iife --minify --external:fs --global-name=human --outfile=dist/human.js src/index.js",
|
||||
"build-node": "esbuild --bundle --platform=node --sourcemap --target=esnext --format=cjs --external:@tensorflow --outfile=dist/human.node.js src/index.js"
|
||||
},
|
||||
"keywords": [
|
||||
"tensorflowjs",
|
||||
|
|
|
@ -17,9 +17,13 @@ async function loadHandPoseModel(url) {
|
|||
// of bounding boxes, each of which is assigned a score during prediction. The
|
||||
// anchors define the coordinates of these boxes.
|
||||
async function loadAnchors(url) {
|
||||
return tf.util
|
||||
.fetch(url)
|
||||
.then((d) => d.json());
|
||||
if (tf.env().features.IS_NODE) {
|
||||
// eslint-disable-next-line global-require
|
||||
const fs = require('fs');
|
||||
const data = await fs.readFileSync(url.replace('file://', ''));
|
||||
return JSON.parse(data);
|
||||
}
|
||||
return tf.util.fetch(url).then((d) => d.json());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
82
src/index.js
82
src/index.js
|
@ -31,53 +31,55 @@ function mergeDeep(...objects) {
|
|||
}
|
||||
|
||||
async function detect(input, userConfig) {
|
||||
const config = mergeDeep(defaults, userConfig);
|
||||
// eslint-disable-next-line no-async-promise-executor
|
||||
return new Promise(async (resolve) => {
|
||||
const config = mergeDeep(defaults, userConfig);
|
||||
|
||||
// load models if enabled
|
||||
if (config.body.enabled && !models.posenet) models.posenet = await posenet.load(config.body);
|
||||
if (config.hand.enabled && !models.handpose) models.handpose = await handpose.load(config.hand);
|
||||
if (config.face.enabled && !models.facemesh) models.facemesh = await facemesh.load(config.face);
|
||||
if (config.face.age.enabled) await ssrnet.loadAge(config);
|
||||
if (config.face.gender.enabled) await ssrnet.loadGender(config);
|
||||
// load models if enabled
|
||||
if (config.face.age.enabled) await ssrnet.loadAge(config);
|
||||
if (config.face.gender.enabled) await ssrnet.loadGender(config);
|
||||
if (config.body.enabled && !models.posenet) models.posenet = await posenet.load(config.body);
|
||||
if (config.hand.enabled && !models.handpose) models.handpose = await handpose.load(config.hand);
|
||||
if (config.face.enabled && !models.facemesh) models.facemesh = await facemesh.load(config.face);
|
||||
|
||||
tf.engine().startScope();
|
||||
tf.engine().startScope();
|
||||
|
||||
// run posenet
|
||||
let poseRes = [];
|
||||
if (config.body.enabled) poseRes = await models.posenet.estimateMultiplePoses(input, config.body);
|
||||
// run posenet
|
||||
let poseRes = [];
|
||||
if (config.body.enabled) poseRes = await models.posenet.estimateMultiplePoses(input, config.body);
|
||||
|
||||
// run handpose
|
||||
let handRes = [];
|
||||
if (config.hand.enabled) handRes = await models.handpose.estimateHands(input, config.hand);
|
||||
// run handpose
|
||||
let handRes = [];
|
||||
if (config.hand.enabled) handRes = await models.handpose.estimateHands(input, config.hand);
|
||||
|
||||
// run facemesh, includes blazeface and iris
|
||||
const faceRes = [];
|
||||
if (config.face.enabled) {
|
||||
const faces = await models.facemesh.estimateFaces(input, config.face);
|
||||
for (const face of faces) {
|
||||
// run ssr-net age & gender, inherits face from blazeface
|
||||
const ssrdata = (config.face.age.enabled || config.face.gender.enabled) ? await ssrnet.predict(face.image, config) : {};
|
||||
face.image.dispose();
|
||||
// iris: array[ bottom, left, top, right, center ]
|
||||
const iris = (face.annotations.leftEyeIris && face.annotations.rightEyeIris)
|
||||
? Math.max(face.annotations.leftEyeIris[3][0] - face.annotations.leftEyeIris[1][0], face.annotations.rightEyeIris[3][0] - face.annotations.rightEyeIris[1][0])
|
||||
: 0;
|
||||
faceRes.push({
|
||||
confidence: face.confidence,
|
||||
box: face.box,
|
||||
mesh: face.mesh,
|
||||
annotations: face.annotations,
|
||||
age: ssrdata.age,
|
||||
gender: ssrdata.gender,
|
||||
iris: (iris !== 0) ? Math.trunc(100 * 11.7 / iris) / 100 : 0,
|
||||
});
|
||||
// run facemesh, includes blazeface and iris
|
||||
const faceRes = [];
|
||||
if (config.face.enabled) {
|
||||
const faces = await models.facemesh.estimateFaces(input, config.face);
|
||||
for (const face of faces) {
|
||||
// run ssr-net age & gender, inherits face from blazeface
|
||||
const ssrdata = (config.face.age.enabled || config.face.gender.enabled) ? await ssrnet.predict(face.image, config) : {};
|
||||
face.image.dispose();
|
||||
// iris: array[ bottom, left, top, right, center ]
|
||||
const iris = (face.annotations.leftEyeIris && face.annotations.rightEyeIris)
|
||||
? Math.max(face.annotations.leftEyeIris[3][0] - face.annotations.leftEyeIris[1][0], face.annotations.rightEyeIris[3][0] - face.annotations.rightEyeIris[1][0])
|
||||
: 0;
|
||||
faceRes.push({
|
||||
confidence: face.confidence,
|
||||
box: face.box,
|
||||
mesh: face.mesh,
|
||||
annotations: face.annotations,
|
||||
age: ssrdata.age,
|
||||
gender: ssrdata.gender,
|
||||
iris: (iris !== 0) ? Math.trunc(100 * 11.7 / iris) / 100 : 0,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tf.engine().endScope();
|
||||
|
||||
// combine results
|
||||
return { face: faceRes, body: poseRes, hand: handRes };
|
||||
tf.engine().endScope();
|
||||
// combine results
|
||||
resolve({ face: faceRes, body: poseRes, hand: handRes });
|
||||
});
|
||||
}
|
||||
|
||||
exports.detect = detect;
|
||||
|
|
Loading…
Reference in New Issue