mirror of https://github.com/vladmandic/human
update node-webcam
parent
7d3915cf2a
commit
26b3fa28cf
|
@ -6,8 +6,7 @@
|
||||||
* Working version of fswebcam must be present on the system
|
* Working version of fswebcam must be present on the system
|
||||||
*/
|
*/
|
||||||
|
|
||||||
const util = require('util');
|
let initial = true; // remember if this is the first run to print additional details
|
||||||
const process = require('process');
|
|
||||||
const log = require('@vladmandic/pilogger');
|
const log = require('@vladmandic/pilogger');
|
||||||
// eslint-disable-next-line node/no-missing-require
|
// eslint-disable-next-line node/no-missing-require
|
||||||
const nodeWebCam = require('node-webcam');
|
const nodeWebCam = require('node-webcam');
|
||||||
|
@ -18,6 +17,7 @@ const tf = require('@tensorflow/tfjs-node'); // or const tf = require('@tensorfl
|
||||||
const Human = require('../../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
|
const Human = require('../../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
|
||||||
|
|
||||||
// options for node-webcam
|
// options for node-webcam
|
||||||
|
const tempFile = 'webcam-snap'; // node-webcam requires writting snapshot to a file, recommended to use tmpfs to avoid excessive disk writes
|
||||||
const optionsCamera = {
|
const optionsCamera = {
|
||||||
callbackReturn: 'buffer', // this means whatever `fswebcam` writes to disk, no additional processing so it's fastest
|
callbackReturn: 'buffer', // this means whatever `fswebcam` writes to disk, no additional processing so it's fastest
|
||||||
saveShots: false, // don't save processed frame to disk, note that temp file is still created by fswebcam thus recommendation for tmpfs
|
saveShots: false, // don't save processed frame to disk, note that temp file is still created by fswebcam thus recommendation for tmpfs
|
||||||
|
@ -31,35 +31,23 @@ const optionsHuman = {
|
||||||
};
|
};
|
||||||
const human = new Human(optionsHuman);
|
const human = new Human(optionsHuman);
|
||||||
|
|
||||||
const results = [];
|
function buffer2tensor(buffer) {
|
||||||
const list = util.promisify(camera.list);
|
return human.tf.tidy(() => {
|
||||||
const capture = util.promisify(camera.capture);
|
if (!buffer) return null;
|
||||||
|
const decode = human.tf.node.decodeImage(buffer, 3);
|
||||||
async function init() {
|
let expand;
|
||||||
try {
|
if (decode.shape[2] === 4) { // input is in rgba format, need to convert to rgb
|
||||||
const found = await list();
|
const channels = human.tf.split(decode, 4, 2); // tf.split(tensor, 4, 2); // split rgba to channels
|
||||||
log.data('Camera data:', found);
|
const rgb = human.tf.stack([channels[0], channels[1], channels[2]], 2); // stack channels back to rgb and ignore alpha
|
||||||
} catch {
|
expand = human.tf.reshape(rgb, [1, decode.shape[0], decode.shape[1], 3]); // move extra dim from the end of tensor and use it as batch number instead
|
||||||
log.error('Could not access camera');
|
} else {
|
||||||
process.exit(1);
|
expand = human.tf.expandDims(decode, 0); // inpur ia rgb so use as-is
|
||||||
}
|
}
|
||||||
|
const cast = human.tf.cast(expand, 'float32');
|
||||||
|
return cast;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
const buffer2tensor = human.tf.tidy((buffer) => {
|
|
||||||
if (!buffer) return null;
|
|
||||||
const decode = human.tf.node.decodeImage(buffer, 3);
|
|
||||||
let expand;
|
|
||||||
if (decode.shape[2] === 4) { // input is in rgba format, need to convert to rgb
|
|
||||||
const channels = human.tf.split(decode, 4, 2); // tf.split(tensor, 4, 2); // split rgba to channels
|
|
||||||
const rgb = human.tf.stack([channels[0], channels[1], channels[2]], 2); // stack channels back to rgb and ignore alpha
|
|
||||||
expand = human.tf.reshape(rgb, [1, decode.shape[0], decode.shape[1], 3]); // move extra dim from the end of tensor and use it as batch number instead
|
|
||||||
} else {
|
|
||||||
expand = human.tf.expandDims(decode, 0); // inpur ia rgb so use as-is
|
|
||||||
}
|
|
||||||
const cast = human.tf.cast(expand, 'float32');
|
|
||||||
return cast;
|
|
||||||
});
|
|
||||||
|
|
||||||
async function detect() {
|
async function detect() {
|
||||||
// trigger next frame every 5 sec
|
// trigger next frame every 5 sec
|
||||||
// triggered here before actual capture and detection since we assume it will complete in less than 5sec
|
// triggered here before actual capture and detection since we assume it will complete in less than 5sec
|
||||||
|
@ -67,21 +55,36 @@ async function detect() {
|
||||||
// if there is a chance of race scenario where detection takes longer than loop trigger, then trigger should be at the end of the function instead
|
// if there is a chance of race scenario where detection takes longer than loop trigger, then trigger should be at the end of the function instead
|
||||||
setTimeout(() => detect(), 5000);
|
setTimeout(() => detect(), 5000);
|
||||||
|
|
||||||
const buffer = await capture(); // gets the (default) jpeg data from from webcam
|
camera.capture(tempFile, (err, data) => { // gets the (default) jpeg data from from webcam
|
||||||
const tensor = buffer2tensor(buffer); // create tensor from image buffer
|
if (err) {
|
||||||
if (tensor) {
|
log.error('error capturing webcam:', err);
|
||||||
const res = await human.detect(tensor); // run detection
|
} else {
|
||||||
|
const tensor = buffer2tensor(data); // create tensor from image buffer
|
||||||
// do whatever here with the res
|
if (initial) log.data('input tensor:', tensor.shape);
|
||||||
// or just append it to results array that will contain all processed results over time
|
// eslint-disable-next-line promise/no-promise-in-callback
|
||||||
results.push(res);
|
human.detect(tensor).then((result) => {
|
||||||
}
|
if (result && result.face && result.face.length > 0) {
|
||||||
|
for (let i = 0; i < result.face.length; i++) {
|
||||||
|
const face = result.face[i];
|
||||||
|
const emotion = face.emotion.reduce((prev, curr) => (prev.score > curr.score ? prev : curr));
|
||||||
|
log.data(`detected face: #${i} boxScore:${face.boxScore} faceScore:${face.faceScore} age:${face.age} genderScore:${face.genderScore} gender:${face.gender} emotionScore:${emotion.score} emotion:${emotion.emotion} iris:${face.iris}`);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.data(' Face: N/A');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
initial = false;
|
||||||
|
});
|
||||||
// alternatively to triggering every 5sec sec, simply trigger next frame as fast as possible
|
// alternatively to triggering every 5sec sec, simply trigger next frame as fast as possible
|
||||||
// setImmediate(() => process());
|
// setImmediate(() => process());
|
||||||
}
|
}
|
||||||
|
|
||||||
async function main() {
|
async function main() {
|
||||||
await init();
|
camera.list((list) => {
|
||||||
|
log.data('detected camera:', list);
|
||||||
|
});
|
||||||
|
await human.load();
|
||||||
detect();
|
detect();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue