add node-video sample

pull/134/head
Vladimir Mandic 2021-05-16 23:55:08 -04:00
parent c81273ef45
commit e49050a0db
5 changed files with 179 additions and 8 deletions

View File

@ -11,6 +11,7 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
### **HEAD -> main** 2021/05/11 mandic00@live.com
- add node-webcam demo
- fix node build and update model signatures
### **1.8.4** 2021/05/11 mandic00@live.com

View File

@ -75,7 +75,8 @@ Check out [**Live Demo**](https://vladmandic.github.io/human/demo/index.html) fo
## Options
As presented in the demo application...
As presented in the demo application...
> [demo/index.html](demo/index.html)
![Options visible in demo](assets/screenshot-menu.png)
@ -86,26 +87,32 @@ As presented in the demo application...
<br>
**Training image:**
> [demo/index.html](demo/index.html?image=%22../assets/human-sample-upper.jpg%22)
![Example Training Image](assets/screenshot-sample.png)
**Using static images:**
> [demo/index.html](demo/index.html?images=true)
![Example Using Image](assets/screenshot-images.jpg)
**Live WebCam view:**
> [demo/index.html](demo/index.html)
![Example Using WebCam](assets/screenshot-webcam.jpg)
**Face Similarity Matching:**
**Face Similarity Matching:**
> [demo/facematch.html](demo/facematch.html)
![Face Matching](assets/screenshot-facematch.jpg)
**Face3D OpenGL Rendering:**
**Face3D OpenGL Rendering:**
> [demo/face3d.html](demo/face3d.html)
![Face Matching](assets/screenshot-face3d.jpg)
**468-Point Face Mesh Defails:**
(view in full resolution to see keypoints)
![FaceMesh](assets/facemesh.png)

View File

@ -43,14 +43,10 @@ const ui = {
columns: 2, // when processing sample images create this many columns
useWorker: false, // use web workers for processing
worker: 'index-worker.js',
samples: ['../assets/sample6.jpg', '../assets/sample1.jpg', '../assets/sample4.jpg', '../assets/sample5.jpg', '../assets/sample3.jpg', '../assets/sample2.jpg'],
compare: '../assets/sample-me.jpg',
useWebRTC: false, // use webrtc as camera source instead of local webcam
webRTCServer: 'http://localhost:8002',
webRTCStream: 'reowhite',
maxFPSframes: 10, // keep fps history for how many frames
modelsPreload: true, // preload human models on startup
modelsWarmup: true, // warmup human models on startup
// internal variables
busy: false, // internal camera busy flag
menuWidth: 0, // internal
@ -67,6 +63,61 @@ const ui = {
bench: true, // show gl fps benchmark window
lastFrame: 0, // time of last frame processing
viewportSet: false, // internal, has custom viewport been set
// webrtc
useWebRTC: false, // use webrtc as camera source instead of local webcam
webRTCServer: 'http://localhost:8002',
webRTCStream: 'reowhite',
// sample images
compare: '../assets/sample-me.jpg', // base image for face compare
samples: [
'../assets/sample6.jpg',
'../assets/sample1.jpg',
'../assets/sample4.jpg',
'../assets/sample5.jpg',
'../assets/sample3.jpg',
'../assets/sample2.jpg',
],
/*
ui.samples = [
'../private/daz3d/daz3d-brianna.jpg',
'../private/daz3d/daz3d-chiyo.jpg',
'../private/daz3d/daz3d-cody.jpg',
'../private/daz3d/daz3d-drew-01.jpg',
'../private/daz3d/daz3d-drew-02.jpg',
'../private/daz3d/daz3d-ella-01.jpg',
'../private/daz3d/daz3d-ella-02.jpg',
'../private/daz3d/daz3d-_emotions01.jpg',
'../private/daz3d/daz3d-_emotions02.jpg',
'../private/daz3d/daz3d-_emotions03.jpg',
'../private/daz3d/daz3d-_emotions04.jpg',
'../private/daz3d/daz3d-_emotions05.jpg',
'../private/daz3d/daz3d-gillian.jpg',
'../private/daz3d/daz3d-ginnifer.jpg',
'../private/daz3d/daz3d-hye-01.jpg',
'../private/daz3d/daz3d-hye-02.jpg',
'../private/daz3d/daz3d-kaia.jpg',
'../private/daz3d/daz3d-karen.jpg',
'../private/daz3d/daz3d-kiaria-01.jpg',
'../private/daz3d/daz3d-kiaria-02.jpg',
'../private/daz3d/daz3d-lilah-01.jpg',
'../private/daz3d/daz3d-lilah-02.jpg',
'../private/daz3d/daz3d-lilah-03.jpg',
'../private/daz3d/daz3d-lila.jpg',
'../private/daz3d/daz3d-lindsey.jpg',
'../private/daz3d/daz3d-megah.jpg',
'../private/daz3d/daz3d-selina-01.jpg',
'../private/daz3d/daz3d-selina-02.jpg',
'../private/daz3d/daz3d-snow.jpg',
'../private/daz3d/daz3d-sunshine.jpg',
'../private/daz3d/daz3d-taia.jpg',
'../private/daz3d/daz3d-tuesday-01.jpg',
'../private/daz3d/daz3d-tuesday-02.jpg',
'../private/daz3d/daz3d-tuesday-03.jpg',
'../private/daz3d/daz3d-zoe.jpg',
];
*/
};
// global variables
@ -657,6 +708,18 @@ async function main() {
document.getElementById('loader').style.display = 'none';
document.getElementById('play').style.display = 'block';
for (const m of Object.values(menu)) m.hide();
if (params.has('image')) {
const image = JSON.parse(params.get('image'));
log('overriding image:', image);
ui.samples = [image];
await detectSampleImages();
}
if (params.has('images')) {
log('overriding images list:', JSON.parse(params.get('images')));
await detectSampleImages();
}
}
window.onload = main;

83
demo/node-video.js Normal file
View File

@ -0,0 +1,83 @@
/*
uses ffmpeg to process video input and output stream of motion jpeg images which are then parsed for frame start/end markers by pipe2jpeg
each frame triggers an event with jpeg buffer that then can be decoded and passed to human for processing
if you want process at specific intervals, set output fps to some value
if you want to process an input stream, set real-time flag and set input as required
*/
const spawn = require('child_process').spawn;
const log = require('@vladmandic/pilogger');
const tf = require('@tensorflow/tfjs-node');
const Pipe2Jpeg = require('pipe2jpeg');
const Human = require('@vladmandic/human').default;
let count = 0; // counter
let busy = false; // busy flag
const inputFile = './test.mp4';
const humanConfig = {
backend: 'tensorflow',
modelBasePath: 'file://node_modules/@vladmandic/human/models/',
debug: false,
videoOptimized: true,
async: true,
filter: { enabled: false },
face: {
enabled: true,
detector: { enabled: true, rotation: false },
mesh: { enabled: true },
iris: { enabled: true },
description: { enabled: true },
emotion: { enabled: true },
},
hand: { enabled: false },
body: { enabled: false },
object: { enabled: false },
};
const human = new Human(humanConfig);
const pipe2jpeg = new Pipe2Jpeg();
const ffmpegParams = [
'-loglevel', 'quiet',
// input
// '-re', // optional process video in real-time not as fast as possible
'-i', `${inputFile}`, // input file
// output
'-an', // drop audio
'-c:v', 'mjpeg', // use motion jpeg as output encoder
'-pix_fmt', 'yuvj422p', // typical for mp4, may need different settings for some videos
'-f', 'image2pipe', // pipe images as output
// '-vf', 'fps=5,scale=800:600', // optional video filter, do anything here such as process at fixed 5fps or resize to specific resulution
'pipe:1', // output to unix pipe that is then captured by pipe2jpeg
];
async function process(jpegBuffer) {
if (busy) return; // skip processing if busy
busy = true;
const decoded = tf.node.decodeJpeg(jpegBuffer, 3); // decode jpeg buffer to raw tensor
const tensor = tf.expandDims(decoded, 0); // almost all tf models use first dimension as batch number so we add it
decoded.dispose();
log.state('input frame:', ++count, 'size:', jpegBuffer.length, 'decoded shape:', tensor.shape);
const res = await human.detect(tensor);
log.data('gesture', JSON.stringify(res.gesture));
// do processing here
tensor.dispose(); // must dispose tensor
busy = false;
}
async function main() {
log.header();
await human.tf.ready();
// pre-load models
log.info('human:', human.version);
pipe2jpeg.on('jpeg', (jpegBuffer) => process(jpegBuffer));
const ffmpeg = spawn('ffmpeg', ffmpegParams, { stdio: ['ignore', 'pipe', 'ignore'] });
ffmpeg.on('error', (error) => log.error('ffmpeg error:', error));
ffmpeg.on('exit', (code, signal) => log.info('ffmpeg exit', code, signal));
ffmpeg.stdout.pipe(pipe2jpeg);
}
main();

View File

@ -15,3 +15,20 @@
2021-05-11 15:08:10 INFO:  Generate types: ["src/human.ts"]
2021-05-11 15:08:14 INFO:  Update Change log: ["/home/vlado/dev/human/CHANGELOG.md"]
2021-05-11 15:08:14 INFO:  Generate TypeDocs: ["src/human.ts"]
2021-05-16 23:54:29 INFO:  @vladmandic/human version 1.8.4
2021-05-16 23:54:29 INFO:  User: vlado Platform: linux Arch: x64 Node: v16.0.0
2021-05-16 23:54:29 INFO:  Build: file startup all type: production config: {"minifyWhitespace":true,"minifyIdentifiers":true,"minifySyntax":true}
2021-05-16 23:54:29 STATE: Build for: node type: tfjs: {"imports":1,"importBytes":39,"outputBytes":1284,"outputFiles":"dist/tfjs.esm.js"}
2021-05-16 23:54:30 STATE: Build for: node type: node: {"imports":35,"importBytes":413382,"outputBytes":372832,"outputFiles":"dist/human.node.js"}
2021-05-16 23:54:30 STATE: Build for: nodeGPU type: tfjs: {"imports":1,"importBytes":43,"outputBytes":1292,"outputFiles":"dist/tfjs.esm.js"}
2021-05-16 23:54:30 STATE: Build for: nodeGPU type: node: {"imports":35,"importBytes":413390,"outputBytes":372836,"outputFiles":"dist/human.node-gpu.js"}
2021-05-16 23:54:30 STATE: Build for: nodeWASM type: tfjs: {"imports":1,"importBytes":81,"outputBytes":1359,"outputFiles":"dist/tfjs.esm.js"}
2021-05-16 23:54:30 STATE: Build for: nodeWASM type: node: {"imports":35,"importBytes":413457,"outputBytes":372908,"outputFiles":"dist/human.node-wasm.js"}
2021-05-16 23:54:30 STATE: Build for: browserNoBundle type: tfjs: {"imports":1,"importBytes":2488,"outputBytes":1394,"outputFiles":"dist/tfjs.esm.js"}
2021-05-16 23:54:30 STATE: Build for: browserNoBundle type: esm: {"imports":35,"importBytes":413492,"outputBytes":229716,"outputFiles":"dist/human.esm-nobundle.js"}
2021-05-16 23:54:30 STATE: Build for: browserBundle type: tfjs: {"modules":1274,"moduleBytes":4114813,"imports":7,"importBytes":2488,"outputBytes":1111318,"outputFiles":"dist/tfjs.esm.js"}
2021-05-16 23:54:31 STATE: Build for: browserBundle type: iife: {"imports":35,"importBytes":1523416,"outputBytes":1337370,"outputFiles":"dist/human.js"}
2021-05-16 23:54:31 STATE: Build for: browserBundle type: esm: {"imports":35,"importBytes":1523416,"outputBytes":1337362,"outputFiles":"dist/human.esm.js"}
2021-05-16 23:54:31 INFO:  Generate types: ["src/human.ts"]
2021-05-16 23:54:36 INFO:  Update Change log: ["/home/vlado/dev/human/CHANGELOG.md"]
2021-05-16 23:54:36 INFO:  Generate TypeDocs: ["src/human.ts"]