update default configuration

master
Vladimir Mandic 2021-10-23 09:41:20 -04:00
parent 97e86c65f6
commit 20ac74ed7d
1 changed files with 45 additions and 31 deletions

@ -32,13 +32,13 @@ Below is full output of `human.defaults` object
Any property can be overriden by passing user object during `human.detect()`
Note that user object and default configuration are merged using deep-merge, so you do not need to redefine entire configuration
All configuration details can be changed in real-time!
All configuration details can be changed in real-time
```js
const config: Config = {
backend: '', // select tfjs backend to use, leave empty to use default backend
// for browsers it can be `webgl`, `wasm`, `cpu`, or `humangl` which is a custom version of webgl
// for nodejs it can be `tensorflow`, `wasm` or `cpu`
// for browser environments: 'webgl', 'wasm', 'cpu', or 'humangl' (which is a custom version of webgl)
// for nodejs environments: 'tensorflow', 'wasm', 'cpu'
// default set to `humangl` for browsers and `tensorflow` for nodejs
modelBasePath: '', // base path for all models
// default set to `../models/` for browsers and `file://models/` for nodejs
@ -50,10 +50,10 @@ const config: Config = {
// warmup pre-initializes all models for faster inference but can take
// significant time on startup
// only used for `webgl` and `humangl` backends
cacheSensitivity: 0.75, // cache sensitivity
cacheSensitivity: 0.70, // cache sensitivity
// values 0..1 where 0.01 means reset cache if input changed more than 1%
// set to 0 to disable caching
skipFrame: false, // internal & dynamic
skipAllowed: false, // internal & dynamic
filter: { // run input through image filters before inference
// image filters run with near-zero latency as they are executed on the GPU
enabled: true, // enable image pre-processing filters
@ -93,13 +93,12 @@ const config: Config = {
rotation: true, // use best-guess rotated face image or just box with rotation as-is
// false means higher performance, but incorrect mesh mapping if face angle is above 20 degrees
// this parameter is not valid in nodejs
maxDetected: 15, // maximum number of faces detected in the input
maxDetected: 1, // maximum number of faces detected in the input
// should be set to the minimum number for performance
skipFrames: 15, // how many max frames to go without re-running the face bounding box detector
skipFrames: 99, // how many max frames to go without re-running the face bounding box detector
// only used when cacheSensitivity is not zero
skipTime: 2500, // how many ms to go without re-running the face bounding box detector
// only used when cacheSensitivity is not zero
// e.g., if model is running st 25 FPS, we can re-use existing bounding
// box for updated face analysis as the head probably hasn't moved much
// in short time (10 * 1/25 = 0.25 sec)
minConfidence: 0.2, // threshold for discarding a prediction
iouThreshold: 0.1, // ammount of overlap between two detected objects before one object is removed
return: false, // return extracted face as tensor
@ -117,27 +116,33 @@ const config: Config = {
// can be either absolute path or relative to modelBasePath
},
emotion: {
enabled: true,
minConfidence: 0.1, // threshold for discarding a prediction
skipFrames: 99, // how max many frames to go without re-running the detector
// only used when cacheSensitivity is not zero
skipTime: 1500, // how many ms to go without re-running the face bounding box detector
// only used when cacheSensitivity is not zero
modelPath: 'emotion.json', // face emotion model, can be absolute path or relative to modelBasePath
},
description: {
enabled: true, // to improve accuracy of face description extraction it is
// recommended to enable detector.rotation and mesh.enabled
modelPath: 'faceres.json', // face description model
// can be either absolute path or relative to modelBasePath
skipFrames: 11, // how many max frames to go without re-running the detector
skipFrames: 99, // how many max frames to go without re-running the detector
// only used when cacheSensitivity is not zero
skipTime: 3000, // how many ms to go without re-running the face bounding box detector
// only used when cacheSensitivity is not zero
minConfidence: 0.1, // threshold for discarding a prediction
},
emotion: {
enabled: true,
minConfidence: 0.1, // threshold for discarding a prediction
skipFrames: 17, // how max many frames to go without re-running the detector
// only used when cacheSensitivity is not zero
modelPath: 'emotion.json', // face emotion model, can be absolute path or relative to modelBasePath
},
antispoof: {
enabled: false,
skipFrames: 14, // how max many frames to go without re-running the detector
skipFrames: 99, // how max many frames to go without re-running the detector
// only used when cacheSensitivity is not zero
skipTime: 4000, // how many ms to go without re-running the face bounding box detector
// only used when cacheSensitivity is not zero
modelPath: 'antispoof.json', // face description model
// can be either absolute path or relative to modelBasePath
@ -148,30 +153,37 @@ const config: Config = {
enabled: true,
modelPath: 'movenet-lightning.json', // body model, can be absolute path or relative to modelBasePath
// can be 'posenet', 'blazepose', 'efficientpose', 'movenet-lightning', 'movenet-thunder'
maxDetected: 1, // maximum number of people detected in the input
detector: {
modelPath: '', // optional body detector
},
maxDetected: -1, // maximum number of people detected in the input
// should be set to the minimum number for performance
// only valid for posenet as other models detects single pose
minConfidence: 0.2, // threshold for discarding a prediction
// only valid for posenet and movenet-multipose as other models detects single pose
// set to -1 to autodetect based on number of detected faces
minConfidence: 0.3, // threshold for discarding a prediction
skipFrames: 1, // how many max frames to go without re-running the detector
// only used when cacheSensitivity is not zero
skipTime: 200, // how many ms to go without re-running the face bounding box detector
// only used when cacheSensitivity is not zero
},
hand: {
enabled: true,
rotation: true, // use best-guess rotated hand image or just box with rotation as-is
// false means higher performance, but incorrect finger mapping if hand is inverted
skipFrames: 18, // how many max frames to go without re-running the hand bounding box detector
// only valid for `handdetect` variation
skipFrames: 99, // how many max frames to go without re-running the hand bounding box detector
// only used when cacheSensitivity is not zero
// e.g., if model is running st 25 FPS, we can re-use existing bounding
// box for updated hand skeleton analysis as the hand probably
// hasn't moved much in short time (10 * 1/25 = 0.25 sec)
minConfidence: 0.8, // threshold for discarding a prediction
skipTime: 2000, // how many ms to go without re-running the face bounding box detector
// only used when cacheSensitivity is not zero
minConfidence: 0.50, // threshold for discarding a prediction
iouThreshold: 0.2, // ammount of overlap between two detected objects before one object is removed
maxDetected: 1, // maximum number of hands detected in the input
maxDetected: -1, // maximum number of hands detected in the input
// should be set to the minimum number for performance
// set to -1 to autodetect based on number of detected faces
landmarks: true, // detect hand landmarks or just hand boundary box
detector: {
modelPath: 'handdetect.json', // hand detector model, can be absolute path or relative to modelBasePath
modelPath: 'handtrack.json', // hand detector model, can be absolute path or relative to modelBasePath
// can be 'handdetect' or 'handtrack'
},
skeleton: {
@ -186,7 +198,9 @@ const config: Config = {
minConfidence: 0.2, // threshold for discarding a prediction
iouThreshold: 0.4, // ammount of overlap between two detected objects before one object is removed
maxDetected: 10, // maximum number of objects detected in the input
skipFrames: 19, // how many max frames to go without re-running the detector
skipFrames: 99, // how many max frames to go without re-running the detector
// only used when cacheSensitivity is not zero
skipTime: 1000, // how many ms to go without re-running object detector
// only used when cacheSensitivity is not zero
},