human/demo/nodejs/node-webcam.js

95 lines
4.3 KiB
JavaScript
Raw Normal View History

2021-05-25 14:58:20 +02:00
/**
* Human demo for NodeJS
* Unsupported sample of using external utility fswebcam to capture screenshot from attached webcam in regular intervals and process it using Human
*
2022-08-20 15:38:08 +02:00
* Note that [node-webcam](https://www.npmjs.com/package/node-webcam) is not part of Human dependencies and should be installed manually
* Working version of `fswebcam` must be present on the system
2021-05-17 15:35:41 +02:00
*/
2021-08-14 00:47:37 +02:00
let initial = true; // remember if this is the first run to print additional details
2022-08-21 19:34:51 +02:00
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
const nodeWebCam = require('node-webcam'); // eslint-disable-line import/no-unresolved, node/no-missing-require
2022-01-01 14:13:04 +01:00
2022-08-21 19:34:51 +02:00
// in nodejs environments tfjs-node is required to be loaded before human
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
2022-07-18 14:22:19 +02:00
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
2022-01-01 14:13:04 +01:00
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
2021-05-11 16:11:55 +02:00
// options for node-webcam
2021-08-14 00:47:37 +02:00
const tempFile = 'webcam-snap'; // node-webcam requires writting snapshot to a file, recommended to use tmpfs to avoid excessive disk writes
2021-05-11 16:11:55 +02:00
const optionsCamera = {
callbackReturn: 'buffer', // this means whatever `fswebcam` writes to disk, no additional processing so it's fastest
saveShots: false, // don't save processed frame to disk, note that temp file is still created by fswebcam thus recommendation for tmpfs
};
2021-08-13 16:34:09 +02:00
const camera = nodeWebCam.create(optionsCamera);
2021-05-11 16:11:55 +02:00
// options for human
const optionsHuman = {
2021-06-14 14:16:10 +02:00
modelBasePath: 'file://models/',
2021-05-11 16:11:55 +02:00
};
2022-08-20 15:38:08 +02:00
2022-01-01 14:13:04 +01:00
const human = new Human.Human(optionsHuman);
2021-08-13 16:34:09 +02:00
2021-08-14 00:47:37 +02:00
function buffer2tensor(buffer) {
return human.tf.tidy(() => {
if (!buffer) return null;
const decode = human.tf.node.decodeImage(buffer, 3);
let expand;
if (decode.shape[2] === 4) { // input is in rgba format, need to convert to rgb
const channels = human.tf.split(decode, 4, 2); // tf.split(tensor, 4, 2); // split rgba to channels
const rgb = human.tf.stack([channels[0], channels[1], channels[2]], 2); // stack channels back to rgb and ignore alpha
expand = human.tf.reshape(rgb, [1, decode.shape[0], decode.shape[1], 3]); // move extra dim from the end of tensor and use it as batch number instead
} else {
expand = human.tf.expandDims(decode, 0); // inpur ia rgb so use as-is
}
const cast = human.tf.cast(expand, 'float32');
return cast;
});
2021-08-13 16:34:09 +02:00
}
2021-05-11 16:11:55 +02:00
2021-08-13 16:34:09 +02:00
async function detect() {
2021-05-11 16:11:55 +02:00
// trigger next frame every 5 sec
// triggered here before actual capture and detection since we assume it will complete in less than 5sec
// so it's as close as possible to real 5sec and not 5sec + detection time
// if there is a chance of race scenario where detection takes longer than loop trigger, then trigger should be at the end of the function instead
2021-08-13 16:34:09 +02:00
setTimeout(() => detect(), 5000);
2021-05-11 16:11:55 +02:00
2021-08-14 00:47:37 +02:00
camera.capture(tempFile, (err, data) => { // gets the (default) jpeg data from from webcam
if (err) {
log.error('error capturing webcam:', err);
} else {
const tensor = buffer2tensor(data); // create tensor from image buffer
if (initial) log.data('input tensor:', tensor.shape);
2022-08-21 19:34:51 +02:00
human.detect(tensor) // eslint-disable-line promise/no-promise-in-callback
.then((result) => {
if (result && result.face && result.face.length > 0) {
for (let i = 0; i < result.face.length; i++) {
const face = result.face[i];
const emotion = face.emotion?.reduce((prev, curr) => (prev.score > curr.score ? prev : curr));
log.data(`detected face: #${i} boxScore:${face.boxScore} faceScore:${face.faceScore} age:${face.age} genderScore:${face.genderScore} gender:${face.gender} emotionScore:${emotion?.score} emotion:${emotion?.emotion} iris:${face.iris}`);
}
} else {
log.data(' Face: N/A');
2021-08-14 00:47:37 +02:00
}
2022-08-21 19:34:51 +02:00
return result;
})
.catch(() => log.error('human detect error'));
2021-08-14 00:47:37 +02:00
}
initial = false;
});
2021-05-11 16:11:55 +02:00
// alternatively to triggering every 5sec sec, simply trigger next frame as fast as possible
// setImmediate(() => process());
}
2021-08-13 16:34:09 +02:00
async function main() {
2022-08-20 15:38:08 +02:00
log.info('human:', human.version, 'tf:', tf.version_core);
2021-08-14 00:47:37 +02:00
camera.list((list) => {
log.data('detected camera:', list);
});
await human.load();
2021-08-13 16:34:09 +02:00
detect();
}
2021-05-11 16:11:55 +02:00
log.header();
2021-08-13 16:34:09 +02:00
main();