mirror of https://github.com/vladmandic/human
added debugging and versioning
parent
567966a162
commit
ff386b6e64
66
README.md
66
README.md
|
@ -197,8 +197,9 @@ or if you want to use promises
|
|||
Additionally, `Human` library exposes several classes:
|
||||
|
||||
```js
|
||||
human.defaults // default configuration object
|
||||
human.models // dynamically maintained object of any loaded models
|
||||
human.config // access to configuration object, normally set as parameter to detect()
|
||||
human.defaults // read-only view of default configuration object
|
||||
human.models // dynamically maintained list of object of any loaded models
|
||||
human.tf // instance of tfjs used by human
|
||||
```
|
||||
|
||||
|
@ -212,15 +213,17 @@ Note that user object and default configuration are merged using deep-merge, so
|
|||
|
||||
```js
|
||||
human.defaults = {
|
||||
console: true, // enable debugging output to console
|
||||
backend: 'webgl', // select tfjs backend to use
|
||||
face: {
|
||||
enabled: true,
|
||||
enabled: true, // controls if specified modul is enabled (note: module is not loaded until it is required)
|
||||
detector: {
|
||||
modelPath: '../models/blazeface/model.json',
|
||||
maxFaces: 10,
|
||||
skipFrames: 10,
|
||||
minConfidence: 0.8,
|
||||
iouThreshold: 0.3,
|
||||
scoreThreshold: 0.75,
|
||||
modelPath: '../models/blazeface/model.json', // path to specific pre-trained model
|
||||
maxFaces: 10, // how many faces are we trying to analyze. limiting number in busy scenes will result in higher performance
|
||||
skipFrames: 10, // how many frames to skip before re-running bounding box detection
|
||||
minConfidence: 0.8, // threshold for discarding a prediction
|
||||
iouThreshold: 0.3, // threshold for deciding whether boxes overlap too much in non-maximum suppression
|
||||
scoreThreshold: 0.75, // threshold for deciding when to remove boxes based on score in non-maximum suppression
|
||||
},
|
||||
mesh: {
|
||||
enabled: true,
|
||||
|
@ -233,7 +236,7 @@ human.defaults = {
|
|||
age: {
|
||||
enabled: true,
|
||||
modelPath: '../models/ssrnet-imdb-age/model.json',
|
||||
skipFrames: 10,
|
||||
skipFrames: 10, // how many frames to skip before re-running bounding box detection
|
||||
},
|
||||
gender: {
|
||||
enabled: true,
|
||||
|
@ -241,25 +244,25 @@ human.defaults = {
|
|||
},
|
||||
emotion: {
|
||||
enabled: true,
|
||||
minConfidence: 0.5,
|
||||
skipFrames: 10,
|
||||
useGrayscale: true,
|
||||
minConfidence: 0.5, // threshold for discarding a prediction
|
||||
skipFrames: 10, // how many frames to skip before re-running bounding box detection
|
||||
useGrayscale: true, // convert color input to grayscale before processing or use single channels when color input is not supported
|
||||
modelPath: '../models/emotion/model.json',
|
||||
},
|
||||
},
|
||||
body: {
|
||||
enabled: true,
|
||||
modelPath: '../models/posenet/model.json',
|
||||
maxDetections: 5,
|
||||
scoreThreshold: 0.75,
|
||||
nmsRadius: 20,
|
||||
maxDetections: 5, // how many faces are we trying to analyze. limiting number in busy scenes will result in higher performance
|
||||
scoreThreshold: 0.75, // threshold for deciding when to remove boxes based on score in non-maximum suppression
|
||||
nmsRadius: 20, // radius for deciding points are too close in non-maximum suppression
|
||||
},
|
||||
hand: {
|
||||
enabled: true,
|
||||
skipFrames: 10,
|
||||
minConfidence: 0.8,
|
||||
iouThreshold: 0.3,
|
||||
scoreThreshold: 0.75,
|
||||
skipFrames: 10, // how many frames to skip before re-running bounding box detection
|
||||
minConfidence: 0.8, // threshold for discarding a prediction
|
||||
iouThreshold: 0.3, // threshold for deciding whether boxes overlap too much in non-maximum suppression
|
||||
scoreThreshold: 0.75, // threshold for deciding when to remove boxes based on score in non-maximum suppression
|
||||
detector: {
|
||||
anchors: '../models/handdetect/anchors.json',
|
||||
modelPath: '../models/handdetect/model.json',
|
||||
|
@ -271,17 +274,6 @@ human.defaults = {
|
|||
};
|
||||
```
|
||||
|
||||
Where:
|
||||
- `enabled`: controls if specified modul is enabled (note: module is not loaded until it is required)
|
||||
- `modelPath`: path to specific pre-trained model weights
|
||||
- `maxFaces`, `maxDetections`: how many faces or people are we trying to analyze. limiting number in busy scenes will result in higher performance
|
||||
- `skipFrames`: how many frames to skip before re-running bounding box detection (e.g., face position does not move fast within a video, so it's ok to use previously detected face position and just run face geometry analysis)
|
||||
- `minConfidence`: threshold for discarding a prediction
|
||||
- `iouThreshold`: threshold for deciding whether boxes overlap too much in non-maximum suppression
|
||||
- `scoreThreshold`: threshold for deciding when to remove boxes based on score in non-maximum suppression
|
||||
- `useGrayscale`: convert color input to grayscale before processing or use single channels when color input is not supported
|
||||
- `nmsRadius`: radius for deciding points are too close in non-maximum suppression
|
||||
|
||||
<hr>
|
||||
|
||||
## Outputs
|
||||
|
@ -290,6 +282,7 @@ Result of `humand.detect()` is a single object that includes data for all enable
|
|||
|
||||
```js
|
||||
result = {
|
||||
version: // <string> version string of the human library
|
||||
face: // <array of detected objects>
|
||||
[
|
||||
{
|
||||
|
@ -325,13 +318,7 @@ result = {
|
|||
emotion, // <string> 'angry', 'discust', 'fear', 'happy', 'sad', 'surpise', 'neutral'
|
||||
}
|
||||
],
|
||||
}
|
||||
```
|
||||
|
||||
Additionally, `result` object includes internal performance data - total time spend and time per module (measured in ms):
|
||||
|
||||
```js
|
||||
result.performance = {
|
||||
performance = { // performance data of last execution for each module measuredin miliseconds
|
||||
body,
|
||||
hand,
|
||||
face,
|
||||
|
@ -339,6 +326,7 @@ Additionally, `result` object includes internal performance data - total time sp
|
|||
emotion,
|
||||
total,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
<hr>
|
||||
|
@ -402,3 +390,5 @@ Library can also be used on mobile devices
|
|||
|
||||
- Tweak default parameters and factorization for age/gender/emotion
|
||||
- Verify age/gender models
|
||||
- Face scalling
|
||||
- NSFW
|
||||
|
|
|
@ -1,6 +1,20 @@
|
|||
import human from '../dist/human.esm.js';
|
||||
|
||||
let config;
|
||||
|
||||
const log = (...msg) => {
|
||||
// eslint-disable-next-line no-console
|
||||
if (config.console) console.log(...msg);
|
||||
};
|
||||
|
||||
onmessage = async (msg) => {
|
||||
const result = await human.detect(msg.data.image, msg.data.config);
|
||||
config = msg.data.config;
|
||||
let result = {};
|
||||
try {
|
||||
result = await human.detect(msg.data.image, config);
|
||||
} catch (err) {
|
||||
result.error = err.message;
|
||||
log('Worker thread error:', err.message);
|
||||
}
|
||||
postMessage(result);
|
||||
};
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
/* global tf, QuickSettings */
|
||||
/* global QuickSettings */
|
||||
|
||||
import human from '../dist/human.esm.js';
|
||||
|
||||
const ui = {
|
||||
backend: 'webgl',
|
||||
baseColor: 'rgba(255, 200, 255, 0.3)',
|
||||
baseLabel: 'rgba(255, 200, 255, 0.8)',
|
||||
baseFont: 'small-caps 1.2rem "Segoe UI"',
|
||||
|
@ -11,6 +10,8 @@ const ui = {
|
|||
};
|
||||
|
||||
const config = {
|
||||
backend: 'webgl',
|
||||
console: true,
|
||||
face: {
|
||||
enabled: true,
|
||||
detector: { maxFaces: 10, skipFrames: 10, minConfidence: 0.5, iouThreshold: 0.3, scoreThreshold: 0.7 },
|
||||
|
@ -37,31 +38,10 @@ function str(...msg) {
|
|||
return line;
|
||||
}
|
||||
|
||||
async function setupTF(input) {
|
||||
// pause video if running before changing backend
|
||||
const live = input.srcObject ? ((input.srcObject.getVideoTracks()[0].readyState === 'live') && (input.readyState > 2) && (!input.paused)) : false;
|
||||
if (live) await input.pause();
|
||||
|
||||
// if user explicitly loaded tfjs, override one used in human library
|
||||
if (window.tf) human.tf = window.tf;
|
||||
|
||||
// cheks for wasm backend
|
||||
if (ui.backend === 'wasm') {
|
||||
if (!window.tf) {
|
||||
document.getElementById('log').innerText = 'Error: WASM Backend is not loaded, enable it in HTML file';
|
||||
ui.backend = 'webgl';
|
||||
} else {
|
||||
human.tf = window.tf;
|
||||
tf.env().set('WASM_HAS_SIMD_SUPPORT', false);
|
||||
tf.env().set('WASM_HAS_MULTITHREAD_SUPPORT', true);
|
||||
}
|
||||
}
|
||||
await human.tf.setBackend(ui.backend);
|
||||
await human.tf.ready();
|
||||
|
||||
// continue video if it was previously running
|
||||
if (live) await input.play();
|
||||
}
|
||||
const log = (...msg) => {
|
||||
// eslint-disable-next-line no-console
|
||||
if (config.console) console.log(...msg);
|
||||
};
|
||||
|
||||
async function drawFace(result, canvas) {
|
||||
const ctx = canvas.getContext('2d');
|
||||
|
@ -234,15 +214,15 @@ async function drawResults(input, result, canvas) {
|
|||
const engine = await human.tf.engine();
|
||||
const memory = `${engine.state.numBytes.toLocaleString()} bytes ${engine.state.numDataBuffers.toLocaleString()} buffers ${engine.state.numTensors.toLocaleString()} tensors`;
|
||||
const gpu = engine.backendInstance.numBytesInGPU ? `GPU: ${engine.backendInstance.numBytesInGPU.toLocaleString()} bytes` : '';
|
||||
const log = document.getElementById('log');
|
||||
log.innerText = `
|
||||
TFJS Version: ${human.tf.version_core} | Backend: {human.tf.getBackend()} | Memory: ${memory} ${gpu}
|
||||
document.getElementById('log').innerText = `
|
||||
TFJS Version: ${human.tf.version_core} | Backend: ${human.tf.getBackend()} | Memory: ${memory} ${gpu}
|
||||
Performance: ${str(result.performance)} | Object size: ${(str(result)).length.toLocaleString()} bytes
|
||||
`;
|
||||
}
|
||||
|
||||
async function webWorker(input, image, canvas) {
|
||||
if (!worker) {
|
||||
log('Creating worker thread');
|
||||
// create new webworker
|
||||
worker = new Worker('demo-esm-webworker.js', { type: 'module' });
|
||||
// after receiving message from webworker, parse&draw results and send new frame for processing
|
||||
|
@ -270,14 +250,19 @@ async function runHumanDetect(input, canvas) {
|
|||
// perform detection
|
||||
await webWorker(input, data, canvas);
|
||||
} else {
|
||||
const result = await human.detect(input, config);
|
||||
let result = {};
|
||||
try {
|
||||
result = await human.detect(input, config);
|
||||
} catch (err) {
|
||||
log('Error during execution:', err.message);
|
||||
}
|
||||
await drawResults(input, result, canvas);
|
||||
if (input.readyState) requestAnimationFrame(() => runHumanDetect(input, canvas)); // immediate loop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function setupUI(input) {
|
||||
function setupUI() {
|
||||
// add all variables to ui control panel
|
||||
settings = QuickSettings.create(10, 10, 'Settings', document.getElementById('main'));
|
||||
const style = document.createElement('style');
|
||||
|
@ -304,10 +289,7 @@ function setupUI(input) {
|
|||
}
|
||||
runHumanDetect(video, canvas);
|
||||
});
|
||||
settings.addDropDown('Backend', ['webgl', 'wasm', 'cpu'], async (val) => {
|
||||
ui.backend = val.value;
|
||||
await setupTF(input);
|
||||
});
|
||||
settings.addDropDown('Backend', ['webgl', 'wasm', 'cpu'], async (val) => config.backend = val.value);
|
||||
settings.addHTML('title', 'Enabled Models'); settings.hideTitle('title');
|
||||
settings.addBoolean('Face Detect', config.face.enabled, (val) => config.face.enabled = val);
|
||||
settings.addBoolean('Face Mesh', config.face.mesh.enabled, (val) => config.face.mesh.enabled = val);
|
||||
|
@ -362,6 +344,7 @@ async function setupCanvas(input) {
|
|||
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
async function setupCamera() {
|
||||
log('Setting up camera');
|
||||
// setup webcam. note that navigator.mediaDevices requires that page is accessed via https
|
||||
const video = document.getElementById('video');
|
||||
if (!navigator.mediaDevices) {
|
||||
|
@ -396,17 +379,22 @@ async function setupImage() {
|
|||
}
|
||||
|
||||
async function main() {
|
||||
log('Human starting ...');
|
||||
|
||||
// setup ui control panel
|
||||
await setupUI();
|
||||
// setup webcam
|
||||
const input = await setupCamera();
|
||||
// or setup image
|
||||
// const input = await setupImage();
|
||||
// setup output canvas from input object
|
||||
await setupCanvas(input);
|
||||
|
||||
const msg = `Human ready: version: ${human.version} TensorFlow/JS version: ${human.tf.version_core}`;
|
||||
document.getElementById('log').innerText = msg;
|
||||
log(msg);
|
||||
|
||||
// run actual detection. if input is video, it will run in a loop else it will run only once
|
||||
// setup ui control panel
|
||||
await setupUI(input);
|
||||
// initialize tensorflow
|
||||
await setupTF(input);
|
||||
// runHumanDetect(video, canvas);
|
||||
}
|
||||
|
||||
|
|
36
src/index.js
36
src/index.js
|
@ -5,6 +5,9 @@ const emotion = require('./emotion/emotion.js');
|
|||
const posenet = require('./posenet/posenet.js');
|
||||
const handpose = require('./handpose/handpose.js');
|
||||
const defaults = require('./config.js').default;
|
||||
const app = require('../package.json');
|
||||
|
||||
let config;
|
||||
|
||||
// object that contains all initialized models
|
||||
const models = {
|
||||
|
@ -17,6 +20,11 @@ const models = {
|
|||
emotion: null,
|
||||
};
|
||||
|
||||
const log = (...msg) => {
|
||||
// eslint-disable-next-line no-console
|
||||
if (config.console) console.log(...msg);
|
||||
};
|
||||
|
||||
// helper function that performs deep merge of multiple objects so it allows full inheriance with overrides
|
||||
function mergeDeep(...objects) {
|
||||
const isObject = (obj) => obj && typeof obj === 'object';
|
||||
|
@ -39,7 +47,24 @@ function mergeDeep(...objects) {
|
|||
async function detect(input, userConfig) {
|
||||
// eslint-disable-next-line no-async-promise-executor
|
||||
return new Promise(async (resolve) => {
|
||||
const config = mergeDeep(defaults, userConfig);
|
||||
config = mergeDeep(defaults, userConfig);
|
||||
|
||||
// check number of loaded models
|
||||
const loadedModels = Object.values(models).filter((a) => a).length;
|
||||
if (loadedModels === 0) log('Human library starting');
|
||||
|
||||
// configure backend
|
||||
if (tf.getBackend() !== config.backend) {
|
||||
log('Human library setting backend:', config.backend);
|
||||
await tf.setBackend(config.backend);
|
||||
await tf.ready();
|
||||
}
|
||||
// explictly enable depthwiseconv since it's diasabled by default due to issues with large shaders
|
||||
let savedWebglPackDepthwiseConvFlag;
|
||||
if (tf.getBackend() === 'webgl') {
|
||||
savedWebglPackDepthwiseConvFlag = tf.env().get('WEBGL_PACK_DEPTHWISECONV');
|
||||
tf.env().set('WEBGL_PACK_DEPTHWISECONV', true);
|
||||
}
|
||||
|
||||
// load models if enabled
|
||||
if (config.face.enabled && !models.facemesh) models.facemesh = await facemesh.load(config.face);
|
||||
|
@ -49,13 +74,6 @@ async function detect(input, userConfig) {
|
|||
if (config.face.enabled && config.face.gender.enabled && !models.gender) models.gender = await ssrnet.loadGender(config);
|
||||
if (config.face.enabled && config.face.emotion.enabled && !models.emotion) models.emotion = await emotion.load(config);
|
||||
|
||||
// explictly enable depthwiseconv since it's diasabled by default due to issues with large shaders
|
||||
let savedWebglPackDepthwiseConvFlag;
|
||||
if (tf.getBackend() === 'webgl') {
|
||||
savedWebglPackDepthwiseConvFlag = tf.env().get('WEBGL_PACK_DEPTHWISECONV');
|
||||
tf.env().set('WEBGL_PACK_DEPTHWISECONV', true);
|
||||
}
|
||||
|
||||
const perf = {};
|
||||
let timeStamp;
|
||||
|
||||
|
@ -122,9 +140,11 @@ async function detect(input, userConfig) {
|
|||
|
||||
exports.detect = detect;
|
||||
exports.defaults = defaults;
|
||||
exports.config = config;
|
||||
exports.models = models;
|
||||
exports.facemesh = facemesh;
|
||||
exports.ssrnet = ssrnet;
|
||||
exports.posenet = posenet;
|
||||
exports.handpose = handpose;
|
||||
exports.tf = tf;
|
||||
exports.version = app.version;
|
||||
|
|
Loading…
Reference in New Issue