mirror of https://github.com/vladmandic/human
added blazeface back and front models
parent
699ab9f309
commit
03db7fd8da
23
README.md
23
README.md
|
@ -218,12 +218,13 @@ human.defaults = {
|
||||||
face: {
|
face: {
|
||||||
enabled: true, // controls if specified modul is enabled (note: module is not loaded until it is required)
|
enabled: true, // controls if specified modul is enabled (note: module is not loaded until it is required)
|
||||||
detector: {
|
detector: {
|
||||||
modelPath: '../models/blazeface/model.json', // path to specific pre-trained model
|
modelPath: '../models/blazeface/tfhub/model.json', // can be 'tfhub', 'front' or 'back'
|
||||||
|
inputSize: 128, // 128 for tfhub and front models, 256 for back
|
||||||
maxFaces: 10, // how many faces are we trying to analyze. limiting number in busy scenes will result in higher performance
|
maxFaces: 10, // how many faces are we trying to analyze. limiting number in busy scenes will result in higher performance
|
||||||
skipFrames: 10, // how many frames to skip before re-running bounding box detection
|
skipFrames: 10, // how many frames to skip before re-running bounding box detection
|
||||||
minConfidence: 0.8, // threshold for discarding a prediction
|
minConfidence: 0.5, // threshold for discarding a prediction
|
||||||
iouThreshold: 0.3, // threshold for deciding whether boxes overlap too much in non-maximum suppression
|
iouThreshold: 0.3, // threshold for deciding whether boxes overlap too much in non-maximum suppression
|
||||||
scoreThreshold: 0.75, // threshold for deciding when to remove boxes based on score in non-maximum suppression
|
scoreThreshold: 0.7, // threshold for deciding when to remove boxes based on score in non-maximum suppression
|
||||||
},
|
},
|
||||||
mesh: {
|
mesh: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
|
@ -235,12 +236,12 @@ human.defaults = {
|
||||||
},
|
},
|
||||||
age: {
|
age: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
modelPath: '../models/ssrnet-imdb-age/model.json',
|
modelPath: '../models/ssrnet-age/imdb/model.json', // can be 'imdb' or 'wiki'
|
||||||
skipFrames: 10, // how many frames to skip before re-running bounding box detection
|
skipFrames: 10, // how many frames to skip before re-running bounding box detection
|
||||||
},
|
},
|
||||||
gender: {
|
gender: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
modelPath: '../models/ssrnet-imdb-gender/model.json',
|
modelPath: '../models/ssrnet-gender/imdb/model.json', // can be 'imdb' or 'wiki'
|
||||||
},
|
},
|
||||||
emotion: {
|
emotion: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
|
@ -254,15 +255,15 @@ human.defaults = {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
modelPath: '../models/posenet/model.json',
|
modelPath: '../models/posenet/model.json',
|
||||||
maxDetections: 5, // how many faces are we trying to analyze. limiting number in busy scenes will result in higher performance
|
maxDetections: 5, // how many faces are we trying to analyze. limiting number in busy scenes will result in higher performance
|
||||||
scoreThreshold: 0.75, // threshold for deciding when to remove boxes based on score in non-maximum suppression
|
scoreThreshold: 0.7, // threshold for deciding when to remove boxes based on score in non-maximum suppression
|
||||||
nmsRadius: 20, // radius for deciding points are too close in non-maximum suppression
|
nmsRadius: 20, // radius for deciding points are too close in non-maximum suppression
|
||||||
},
|
},
|
||||||
hand: {
|
hand: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
skipFrames: 10, // how many frames to skip before re-running bounding box detection
|
skipFrames: 10, // how many frames to skip before re-running bounding box detection
|
||||||
minConfidence: 0.8, // threshold for discarding a prediction
|
minConfidence: 0.5, // threshold for discarding a prediction
|
||||||
iouThreshold: 0.3, // threshold for deciding whether boxes overlap too much in non-maximum suppression
|
iouThreshold: 0.3, // threshold for deciding whether boxes overlap too much in non-maximum suppression
|
||||||
scoreThreshold: 0.75, // threshold for deciding when to remove boxes based on score in non-maximum suppression
|
scoreThreshold: 0.7, // threshold for deciding when to remove boxes based on score in non-maximum suppression
|
||||||
detector: {
|
detector: {
|
||||||
anchors: '../models/handdetect/anchors.json',
|
anchors: '../models/handdetect/anchors.json',
|
||||||
modelPath: '../models/handdetect/model.json',
|
modelPath: '../models/handdetect/model.json',
|
||||||
|
@ -361,14 +362,14 @@ For example, on a desktop with a low-end nVidia GTX1050 it can perform multiple
|
||||||
Performance per module:
|
Performance per module:
|
||||||
|
|
||||||
- Enabled all: 10 FPS
|
- Enabled all: 10 FPS
|
||||||
- Face Detect: 80 FPS
|
- Face Detect: 80 FPS (standalone)
|
||||||
- Face Geometry: 30 FPS (includes face detect)
|
- Face Geometry: 30 FPS (includes face detect)
|
||||||
- Face Iris: 25 FPS (includes face detect and face geometry)
|
- Face Iris: 25 FPS (includes face detect and face geometry)
|
||||||
- Age: 60 FPS (includes face detect)
|
- Age: 60 FPS (includes face detect)
|
||||||
- Gender: 60 FPS (includes face detect)
|
- Gender: 60 FPS (includes face detect)
|
||||||
- Emotion: 60 FPS (includes face detect)
|
- Emotion: 60 FPS (includes face detect)
|
||||||
- Hand: 40 FPS
|
- Hand: 40 FPS (standalone)
|
||||||
- Body: 50 FPS
|
- Body: 50 FPS (standalone)
|
||||||
|
|
||||||
Library can also be used on mobile devices
|
Library can also be used on mobile devices
|
||||||
|
|
||||||
|
|
|
@ -13,8 +13,7 @@ onmessage = async (msg) => {
|
||||||
config = msg.data.config;
|
config = msg.data.config;
|
||||||
let result = {};
|
let result = {};
|
||||||
try {
|
try {
|
||||||
// result = await human.detect(image, config);
|
result = await human.detect(image, config);
|
||||||
result = {};
|
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
result.error = err.message;
|
result.error = err.message;
|
||||||
log('Worker thread error:', err.message);
|
log('Worker thread error:', err.message);
|
||||||
|
|
|
@ -60,7 +60,7 @@ async function drawFace(result, canvas) {
|
||||||
const labelIris = face.iris ? `iris: ${face.iris}` : '';
|
const labelIris = face.iris ? `iris: ${face.iris}` : '';
|
||||||
const labelEmotion = face.emotion && face.emotion[0] ? `emotion: ${Math.trunc(100 * face.emotion[0].score)}% ${face.emotion[0].emotion}` : '';
|
const labelEmotion = face.emotion && face.emotion[0] ? `emotion: ${Math.trunc(100 * face.emotion[0].score)}% ${face.emotion[0].emotion}` : '';
|
||||||
ctx.fillStyle = ui.baseLabel;
|
ctx.fillStyle = ui.baseLabel;
|
||||||
ctx.fillText(`face ${labelAgeGender} ${labelIris} ${labelEmotion}`, face.box[0] + 2, face.box[1] + 22, face.box[2]);
|
ctx.fillText(`${Math.trunc(100 * face.confidence)}% face ${labelAgeGender} ${labelIris} ${labelEmotion}`, face.box[0] + 2, face.box[1] + 22);
|
||||||
ctx.stroke();
|
ctx.stroke();
|
||||||
ctx.lineWidth = 1;
|
ctx.lineWidth = 1;
|
||||||
if (face.mesh) {
|
if (face.mesh) {
|
||||||
|
@ -238,7 +238,7 @@ function webWorker(input, image, canvas) {
|
||||||
log('Creating worker thread');
|
log('Creating worker thread');
|
||||||
worker = new Worker('demo-esm-webworker.js', { type: 'module' });
|
worker = new Worker('demo-esm-webworker.js', { type: 'module' });
|
||||||
// after receiving message from webworker, parse&draw results and send new frame for processing
|
// after receiving message from webworker, parse&draw results and send new frame for processing
|
||||||
worker.addEventListener('message', async (msg) => drawResults(input, msg.data, canvas));
|
worker.addEventListener('message', (msg) => drawResults(input, msg.data, canvas));
|
||||||
}
|
}
|
||||||
// pass image data as arraybuffer to worker by reference to avoid copy
|
// pass image data as arraybuffer to worker by reference to avoid copy
|
||||||
worker.postMessage({ image: image.data.buffer, width: canvas.width, height: canvas.height, config }, [image.data.buffer]);
|
worker.postMessage({ image: image.data.buffer, width: canvas.width, height: canvas.height, config }, [image.data.buffer]);
|
||||||
|
|
|
@ -2,8 +2,9 @@ export default {
|
||||||
face: {
|
face: {
|
||||||
enabled: true, // refers to detector, but since all other face modules rely on detector, it should be a global
|
enabled: true, // refers to detector, but since all other face modules rely on detector, it should be a global
|
||||||
detector: {
|
detector: {
|
||||||
modelPath: '../models/blazeface/model.json',
|
modelPath: '../models/blazeface/tfhub/model.json', // can be blazeface-front or blazeface-back
|
||||||
inputSize: 128, // fixed value
|
anchorSize: 128, // fixed regardless of model
|
||||||
|
inputSize: 128, // fixed value: 128 for front and tfhub and 256 for back
|
||||||
maxFaces: 10, // maximum number of faces detected in the input, should be set to the minimum number for performance
|
maxFaces: 10, // maximum number of faces detected in the input, should be set to the minimum number for performance
|
||||||
skipFrames: 10, // how many frames to go without running the bounding box detector
|
skipFrames: 10, // how many frames to go without running the bounding box detector
|
||||||
minConfidence: 0.5, // threshold for discarding a prediction
|
minConfidence: 0.5, // threshold for discarding a prediction
|
||||||
|
|
|
@ -79,17 +79,15 @@ async function detect(input, userConfig) {
|
||||||
|
|
||||||
// run posenet
|
// run posenet
|
||||||
timeStamp = performance.now();
|
timeStamp = performance.now();
|
||||||
let poseRes = [];
|
|
||||||
tf.engine().startScope();
|
tf.engine().startScope();
|
||||||
if (config.body.enabled) poseRes = await models.posenet.estimatePoses(input, config.body);
|
const poseRes = config.body.enabled ? await models.posenet.estimatePoses(input, config.body) : [];
|
||||||
tf.engine().endScope();
|
tf.engine().endScope();
|
||||||
perf.body = Math.trunc(performance.now() - timeStamp);
|
perf.body = Math.trunc(performance.now() - timeStamp);
|
||||||
|
|
||||||
// run handpose
|
// run handpose
|
||||||
timeStamp = performance.now();
|
timeStamp = performance.now();
|
||||||
let handRes = [];
|
|
||||||
tf.engine().startScope();
|
tf.engine().startScope();
|
||||||
if (config.hand.enabled) handRes = await models.handpose.estimateHands(input, config.hand);
|
const handRes = config.hand.enabled ? await models.handpose.estimateHands(input, config.hand) : [];
|
||||||
tf.engine().endScope();
|
tf.engine().endScope();
|
||||||
perf.hand = Math.trunc(performance.now() - timeStamp);
|
perf.hand = Math.trunc(performance.now() - timeStamp);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue