update documentation and models

master
Vladimir Mandic 2021-09-22 15:53:25 -04:00
parent d293f4a20b
commit a0497b6d14
2 changed files with 30 additions and 20 deletions

@ -36,19 +36,24 @@ All configuration details can be changed in real-time!
```js
const config: Config = {
backend: 'webgl', // select tfjs backend to use, leave empty to use default backend
// can be 'webgl', 'wasm', 'cpu', or 'humangl' which is a custom version of webgl
modelBasePath: '../models/', // base path for all models
wasmPath: '../node_modules/@tensorflow/tfjs-backend-wasm/dist/', // path for wasm binaries, only used for backend: wasm
backend: '', // select tfjs backend to use, leave empty to use default backend
// for browsers it can be `webgl`, `wasm`, `cpu`, or `humangl` which is a custom version of webgl
// for nodejs it can be `tensorflow`, `wasm` or `cpu`
// default set to `humangl` for browsers and `tensorflow` for nodejs
modelBasePath: '', // base path for all models
// default set to `../models/` for browsers and `file://models/` for nodejs
wasmPath: '', // path for wasm binaries, only used for backend: wasm
// default set to download from jsdeliv during Human class instantiation
debug: true, // print additional status messages to console
async: true, // execute enabled models in parallel
warmup: 'full', // what to use for human.warmup(), can be 'none', 'face', 'full'
// warmup pre-initializes all models for faster inference but can take
// significant time on startup
// only used for `webgl` and `humangl` backends
cacheSensitivity: 0.005, // cache sensitivity
cacheSensitivity: 0.75, // cache sensitivity
// values 0..1 where 0.01 means reset cache if input changed more than 1%
// set to 0 to disable caching
skipFrame: false, // internal & dynamic
filter: { // run input through image filters before inference
// image filters run with near-zero latency as they are executed on the GPU
enabled: true, // enable image pre-processing filters
@ -85,12 +90,12 @@ const config: Config = {
// (note: module is not loaded until it is required)
detector: {
modelPath: 'blazeface.json', // detector model, can be absolute path or relative to modelBasePath
rotation: false, // use best-guess rotated face image or just box with rotation as-is
rotation: true, // use best-guess rotated face image or just box with rotation as-is
// false means higher performance, but incorrect mesh mapping if face angle is above 20 degrees
// this parameter is not valid in nodejs
maxDetected: 10, // maximum number of faces detected in the input
maxDetected: 15, // maximum number of faces detected in the input
// should be set to the minimum number for performance
skipFrames: 21, // how many max frames to go without re-running the face bounding box detector
skipFrames: 15, // how many max frames to go without re-running the face bounding box detector
// only used when cacheSensitivity is not zero
// e.g., if model is running st 25 FPS, we can re-use existing bounding
// box for updated face analysis as the head probably hasn't moved much
@ -98,7 +103,7 @@ const config: Config = {
minConfidence: 0.2, // threshold for discarding a prediction
iouThreshold: 0.1, // ammount of overlap between two detected objects before one object is removed
return: false, // return extracted face as tensor
// in which case user is reponsible for disposing the tensor
// in which case user is reponsible for disposing the tensor
},
mesh: {
@ -117,7 +122,7 @@ const config: Config = {
// recommended to enable detector.rotation and mesh.enabled
modelPath: 'faceres.json', // face description model
// can be either absolute path or relative to modelBasePath
skipFrames: 31, // how many max frames to go without re-running the detector
skipFrames: 11, // how many max frames to go without re-running the detector
// only used when cacheSensitivity is not zero
minConfidence: 0.1, // threshold for discarding a prediction
},
@ -125,7 +130,7 @@ const config: Config = {
emotion: {
enabled: true,
minConfidence: 0.1, // threshold for discarding a prediction
skipFrames: 32, // how max many frames to go without re-running the detector
skipFrames: 17, // how max many frames to go without re-running the detector
// only used when cacheSensitivity is not zero
modelPath: 'emotion.json', // face emotion model, can be absolute path or relative to modelBasePath
},
@ -138,25 +143,28 @@ const config: Config = {
maxDetected: 1, // maximum number of people detected in the input
// should be set to the minimum number for performance
// only valid for posenet as other models detects single pose
minConfidence: 0.1, // threshold for discarding a prediction
},
minConfidence: 0.2, // threshold for discarding a prediction
skipFrames: 1, // how many max frames to go without re-running the detector
// only used when cacheSensitivity is not zero
},
hand: {
enabled: true,
rotation: false, // use best-guess rotated hand image or just box with rotation as-is
rotation: true, // use best-guess rotated hand image or just box with rotation as-is
// false means higher performance, but incorrect finger mapping if hand is inverted
skipFrames: 32, // how many max frames to go without re-running the hand bounding box detector
skipFrames: 18, // how many max frames to go without re-running the hand bounding box detector
// only used when cacheSensitivity is not zero
// e.g., if model is running st 25 FPS, we can re-use existing bounding
// box for updated hand skeleton analysis as the hand probably
// hasn't moved much in short time (10 * 1/25 = 0.25 sec)
minConfidence: 0.1, // threshold for discarding a prediction
iouThreshold: 0.1, // ammount of overlap between two detected objects before one object is removed
maxDetected: 2, // maximum number of hands detected in the input
minConfidence: 0.8, // threshold for discarding a prediction
iouThreshold: 0.2, // ammount of overlap between two detected objects before one object is removed
maxDetected: 1, // maximum number of hands detected in the input
// should be set to the minimum number for performance
landmarks: true, // detect hand landmarks or just hand boundary box
detector: {
modelPath: 'handdetect.json', // hand detector model, can be absolute path or relative to modelBasePath
// can be 'handdetect' or 'handtrack'
},
skeleton: {
modelPath: 'handskeleton.json', // hand skeleton model, can be absolute path or relative to modelBasePath
@ -170,7 +178,7 @@ const config: Config = {
minConfidence: 0.2, // threshold for discarding a prediction
iouThreshold: 0.4, // ammount of overlap between two detected objects before one object is removed
maxDetected: 10, // maximum number of objects detected in the input
skipFrames: 41, // how many max frames to go without re-running the detector
skipFrames: 19, // how many max frames to go without re-running the detector
// only used when cacheSensitivity is not zero
},
@ -182,8 +190,8 @@ const config: Config = {
// remove background or replace it with user-provided background
modelPath: 'selfie.json', // experimental: object detection model, can be absolute path or relative to modelBasePath
// can be 'selfie' or 'meet'
blur: 8, // blur segmentation output by n pixels for more realistic image
},
};
```

@ -94,6 +94,7 @@ but can be switched on-the-fly due to standardized output implementation
| MoveNet-Thunder | 158K | movenet-thunder.json | 12M | movenet-thunder.bin | 178 |
| MoveNet-MultiPose | 235K | movenet-thunder.json | 9.1M | movenet-thunder.bin | 303 |
| Google Selfie | 82K | selfie.json | 208K | selfie.bin | 136 |
| Hand Tracking | 605K | handtrack.json | 2.9M | handtrack.bin | 619 |
<br>
@ -119,6 +120,7 @@ but can be switched on-the-fly due to standardized output implementation
- Face Embedding: [**BecauseofAI MobileFace**](https://github.com/becauseofAI/MobileFace)
- ObjectDetection: [**NanoDet**](https://github.com/RangiLyu/nanodet)
- ObjectDetection: [**MB3-CenterNet**](https://github.com/610265158/mobilenetv3_centernet)
- Hand Tracking: [**HandTracking**](https://github.com/victordibia/handtracking)
- Image Filters: [**WebGLImageFilter**](https://github.com/phoboslab/WebGLImageFilter)
- Pinto Model Zoo: [**Pinto**](https://github.com/PINTO0309/PINTO_model_zoo)