added blazeface back and front models

pull/280/head
Vladimir Mandic 2020-10-15 20:20:37 -04:00
parent 699ab9f309
commit 03db7fd8da
5 changed files with 20 additions and 21 deletions

View File

@ -218,12 +218,13 @@ human.defaults = {
face: {
enabled: true, // controls if specified modul is enabled (note: module is not loaded until it is required)
detector: {
modelPath: '../models/blazeface/model.json', // path to specific pre-trained model
modelPath: '../models/blazeface/tfhub/model.json', // can be 'tfhub', 'front' or 'back'
inputSize: 128, // 128 for tfhub and front models, 256 for back
maxFaces: 10, // how many faces are we trying to analyze. limiting number in busy scenes will result in higher performance
skipFrames: 10, // how many frames to skip before re-running bounding box detection
minConfidence: 0.8, // threshold for discarding a prediction
minConfidence: 0.5, // threshold for discarding a prediction
iouThreshold: 0.3, // threshold for deciding whether boxes overlap too much in non-maximum suppression
scoreThreshold: 0.75, // threshold for deciding when to remove boxes based on score in non-maximum suppression
scoreThreshold: 0.7, // threshold for deciding when to remove boxes based on score in non-maximum suppression
},
mesh: {
enabled: true,
@ -235,12 +236,12 @@ human.defaults = {
},
age: {
enabled: true,
modelPath: '../models/ssrnet-imdb-age/model.json',
modelPath: '../models/ssrnet-age/imdb/model.json', // can be 'imdb' or 'wiki'
skipFrames: 10, // how many frames to skip before re-running bounding box detection
},
gender: {
enabled: true,
modelPath: '../models/ssrnet-imdb-gender/model.json',
modelPath: '../models/ssrnet-gender/imdb/model.json', // can be 'imdb' or 'wiki'
},
emotion: {
enabled: true,
@ -254,15 +255,15 @@ human.defaults = {
enabled: true,
modelPath: '../models/posenet/model.json',
maxDetections: 5, // how many faces are we trying to analyze. limiting number in busy scenes will result in higher performance
scoreThreshold: 0.75, // threshold for deciding when to remove boxes based on score in non-maximum suppression
scoreThreshold: 0.7, // threshold for deciding when to remove boxes based on score in non-maximum suppression
nmsRadius: 20, // radius for deciding points are too close in non-maximum suppression
},
hand: {
enabled: true,
skipFrames: 10, // how many frames to skip before re-running bounding box detection
minConfidence: 0.8, // threshold for discarding a prediction
minConfidence: 0.5, // threshold for discarding a prediction
iouThreshold: 0.3, // threshold for deciding whether boxes overlap too much in non-maximum suppression
scoreThreshold: 0.75, // threshold for deciding when to remove boxes based on score in non-maximum suppression
scoreThreshold: 0.7, // threshold for deciding when to remove boxes based on score in non-maximum suppression
detector: {
anchors: '../models/handdetect/anchors.json',
modelPath: '../models/handdetect/model.json',
@ -361,14 +362,14 @@ For example, on a desktop with a low-end nVidia GTX1050 it can perform multiple
Performance per module:
- Enabled all: 10 FPS
- Face Detect: 80 FPS
- Face Detect: 80 FPS (standalone)
- Face Geometry: 30 FPS (includes face detect)
- Face Iris: 25 FPS (includes face detect and face geometry)
- Age: 60 FPS (includes face detect)
- Gender: 60 FPS (includes face detect)
- Emotion: 60 FPS (includes face detect)
- Hand: 40 FPS
- Body: 50 FPS
- Hand: 40 FPS (standalone)
- Body: 50 FPS (standalone)
Library can also be used on mobile devices

View File

@ -13,8 +13,7 @@ onmessage = async (msg) => {
config = msg.data.config;
let result = {};
try {
// result = await human.detect(image, config);
result = {};
result = await human.detect(image, config);
} catch (err) {
result.error = err.message;
log('Worker thread error:', err.message);

View File

@ -60,7 +60,7 @@ async function drawFace(result, canvas) {
const labelIris = face.iris ? `iris: ${face.iris}` : '';
const labelEmotion = face.emotion && face.emotion[0] ? `emotion: ${Math.trunc(100 * face.emotion[0].score)}% ${face.emotion[0].emotion}` : '';
ctx.fillStyle = ui.baseLabel;
ctx.fillText(`face ${labelAgeGender} ${labelIris} ${labelEmotion}`, face.box[0] + 2, face.box[1] + 22, face.box[2]);
ctx.fillText(`${Math.trunc(100 * face.confidence)}% face ${labelAgeGender} ${labelIris} ${labelEmotion}`, face.box[0] + 2, face.box[1] + 22);
ctx.stroke();
ctx.lineWidth = 1;
if (face.mesh) {
@ -238,7 +238,7 @@ function webWorker(input, image, canvas) {
log('Creating worker thread');
worker = new Worker('demo-esm-webworker.js', { type: 'module' });
// after receiving message from webworker, parse&draw results and send new frame for processing
worker.addEventListener('message', async (msg) => drawResults(input, msg.data, canvas));
worker.addEventListener('message', (msg) => drawResults(input, msg.data, canvas));
}
// pass image data as arraybuffer to worker by reference to avoid copy
worker.postMessage({ image: image.data.buffer, width: canvas.width, height: canvas.height, config }, [image.data.buffer]);

View File

@ -2,8 +2,9 @@ export default {
face: {
enabled: true, // refers to detector, but since all other face modules rely on detector, it should be a global
detector: {
modelPath: '../models/blazeface/model.json',
inputSize: 128, // fixed value
modelPath: '../models/blazeface/tfhub/model.json', // can be blazeface-front or blazeface-back
anchorSize: 128, // fixed regardless of model
inputSize: 128, // fixed value: 128 for front and tfhub and 256 for back
maxFaces: 10, // maximum number of faces detected in the input, should be set to the minimum number for performance
skipFrames: 10, // how many frames to go without running the bounding box detector
minConfidence: 0.5, // threshold for discarding a prediction

View File

@ -79,17 +79,15 @@ async function detect(input, userConfig) {
// run posenet
timeStamp = performance.now();
let poseRes = [];
tf.engine().startScope();
if (config.body.enabled) poseRes = await models.posenet.estimatePoses(input, config.body);
const poseRes = config.body.enabled ? await models.posenet.estimatePoses(input, config.body) : [];
tf.engine().endScope();
perf.body = Math.trunc(performance.now() - timeStamp);
// run handpose
timeStamp = performance.now();
let handRes = [];
tf.engine().startScope();
if (config.hand.enabled) handRes = await models.handpose.estimateHands(input, config.hand);
const handRes = config.hand.enabled ? await models.handpose.estimateHands(input, config.hand) : [];
tf.engine().endScope();
perf.hand = Math.trunc(performance.now() - timeStamp);