mirror of https://github.com/vladmandic/human
add node-webcam demo
parent
374a5a15c1
commit
5d762aa93e
|
@ -1,11 +1,10 @@
|
|||
// @ts-nocheck
|
||||
|
||||
const fs = require('fs');
|
||||
// eslint-disable-next-line import/no-extraneous-dependencies, node/no-unpublished-require
|
||||
const log = require('@vladmandic/pilogger');
|
||||
|
||||
// workers actual import tfjs and faceapi modules
|
||||
// eslint-disable-next-line import/no-extraneous-dependencies, node/no-unpublished-require
|
||||
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
|
||||
const tf = require('@tensorflow/tfjs-node');
|
||||
const Human = require('../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
|
||||
|
||||
|
@ -38,7 +37,7 @@ const myConfig = {
|
|||
// you can add any pre-proocessing here such as resizing, etc.
|
||||
async function image(img) {
|
||||
const buffer = fs.readFileSync(img);
|
||||
const tensor = tf.tidy(() => tf.node.decodeImage(buffer).toFloat().expandDims());
|
||||
const tensor = human.tf.tidy(() => human.tf.node.decodeImage(buffer).toFloat().expandDims());
|
||||
return tensor;
|
||||
}
|
||||
|
||||
|
@ -65,10 +64,10 @@ async function main() {
|
|||
log.data('Worker received message:', process.pid, msg); // generic log
|
||||
});
|
||||
|
||||
// wait until tf is ready
|
||||
await tf.ready();
|
||||
// create instance of human
|
||||
human = new Human(myConfig);
|
||||
// wait until tf is ready
|
||||
await human.tf.ready();
|
||||
// pre-load models
|
||||
log.state('Worker: PID:', process.pid, `TensorFlow/JS ${human.tf.version_core} Human ${human.version} Backend: ${human.tf.getBackend()}`);
|
||||
await human.load();
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
const util = require('util');
|
||||
const log = require('@vladmandic/pilogger');
|
||||
const nodeWebCam = require('node-webcam');
|
||||
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
|
||||
const tf = require('@tensorflow/tfjs-node');
|
||||
// load specific version of Human library that matches TensorFlow mode
|
||||
const Human = require('../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
|
||||
|
||||
// options for node-webcam
|
||||
const optionsCamera = {
|
||||
callbackReturn: 'buffer', // this means whatever `fswebcam` writes to disk, no additional processing so it's fastest
|
||||
saveShots: false, // don't save processed frame to disk, note that temp file is still created by fswebcam thus recommendation for tmpfs
|
||||
};
|
||||
|
||||
// options for human
|
||||
const optionsHuman = {
|
||||
backend: 'tensorflow',
|
||||
modelBasePath: 'file://node_modules/@vladmandic/human/models/',
|
||||
};
|
||||
|
||||
const camera = nodeWebCam.create(optionsCamera);
|
||||
const capture = util.promisify(camera.capture);
|
||||
const human = new Human(optionsHuman);
|
||||
const results = [];
|
||||
|
||||
const buffer2tensor = human.tf.tidy((buffer) => {
|
||||
const decode = human.tf.node.decodeImage(buffer, 3);
|
||||
let expand;
|
||||
if (decode.shape[2] === 4) { // input is in rgba format, need to convert to rgb
|
||||
const channels = human.tf.split(decode, 4, 2); // tf.split(tensor, 4, 2); // split rgba to channels
|
||||
const rgb = human.tf.stack([channels[0], channels[1], channels[2]], 2); // stack channels back to rgb and ignore alpha
|
||||
expand = human.tf.reshape(rgb, [1, decode.shape[0], decode.shape[1], 3]); // move extra dim from the end of tensor and use it as batch number instead
|
||||
} else {
|
||||
expand = human.tf.expandDims(decode, 0); // inpur ia rgb so use as-is
|
||||
}
|
||||
const cast = human.tf.cast(expand, 'float32');
|
||||
return cast;
|
||||
});
|
||||
|
||||
async function process() {
|
||||
// trigger next frame every 5 sec
|
||||
// triggered here before actual capture and detection since we assume it will complete in less than 5sec
|
||||
// so it's as close as possible to real 5sec and not 5sec + detection time
|
||||
// if there is a chance of race scenario where detection takes longer than loop trigger, then trigger should be at the end of the function instead
|
||||
setTimeout(() => process(), 5000);
|
||||
|
||||
const buffer = await capture(); // gets the (default) jpeg data from from webcam
|
||||
const tensor = buffer2tensor(buffer); // create tensor from image buffer
|
||||
const res = await human.detect(tensor); // run detection
|
||||
|
||||
// do whatever here with the res
|
||||
// or just append it to results array that will contain all processed results over time
|
||||
results.push(res);
|
||||
|
||||
// alternatively to triggering every 5sec sec, simply trigger next frame as fast as possible
|
||||
// setImmediate(() => process());
|
||||
}
|
||||
|
||||
log.header();
|
||||
process();
|
|
@ -4,6 +4,7 @@ const process = require('process');
|
|||
const fetch = require('node-fetch').default;
|
||||
|
||||
// for NodeJS, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human
|
||||
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
|
||||
const tf = require('@tensorflow/tfjs-node'); // or const tf = require('@tensorflow/tfjs-node-gpu');
|
||||
|
||||
// load specific version of Human library that matches TensorFlow mode
|
||||
|
@ -38,10 +39,10 @@ const myConfig = {
|
|||
};
|
||||
|
||||
async function init() {
|
||||
// wait until tf is ready
|
||||
await tf.ready();
|
||||
// create instance of human
|
||||
human = new Human(myConfig);
|
||||
// wait until tf is ready
|
||||
await human.tf.ready();
|
||||
// pre-load models
|
||||
log.info('Human:', human.version);
|
||||
log.info('Active Configuration', human.config);
|
||||
|
@ -66,7 +67,7 @@ async function detect(input) {
|
|||
// decode image using tfjs-node so we don't need external depenencies
|
||||
// can also be done using canvas.js or some other 3rd party image library
|
||||
if (!buffer) return {};
|
||||
const tensor = tf.tidy(() => {
|
||||
const tensor = human.tf.tidy(() => {
|
||||
const decode = human.tf.node.decodeImage(buffer, 3);
|
||||
let expand;
|
||||
if (decode.shape[2] === 4) { // input is in rgba format, need to convert to rgb
|
||||
|
@ -92,7 +93,7 @@ async function detect(input) {
|
|||
}
|
||||
|
||||
// dispose image tensor as we no longer need it
|
||||
tf.dispose(tensor);
|
||||
human.tf.dispose(tensor);
|
||||
|
||||
// print data to console
|
||||
log.data('Results:');
|
||||
|
|
Loading…
Reference in New Issue