build and docs cleanup

pull/356/head
Vladimir Mandic 2021-10-29 15:55:20 -04:00
parent a710ef88ec
commit f3411437a0
7 changed files with 113 additions and 177 deletions

View File

@ -9,11 +9,12 @@
## Changelog ## Changelog
### **HEAD -> main** 2021/10/28 mandic00@live.com
### **2.4.3** 2021/10/28 mandic00@live.com ### **2.4.3** 2021/10/28 mandic00@live.com
- additional human.performance counters
### **origin/main** 2021/10/27 mandic00@live.com
### **2.4.2** 2021/10/27 mandic00@live.com ### **2.4.2** 2021/10/27 mandic00@live.com

View File

@ -58,8 +58,8 @@ Check out [**Live Demo**](https://vladmandic.github.io/human/demo/index.html) ap
- [**Home**](https://github.com/vladmandic/human/wiki) - [**Home**](https://github.com/vladmandic/human/wiki)
- [**Installation**](https://github.com/vladmandic/human/wiki/Install) - [**Installation**](https://github.com/vladmandic/human/wiki/Install)
- [**Usage & Functions**](https://github.com/vladmandic/human/wiki/Usage) - [**Usage & Functions**](https://github.com/vladmandic/human/wiki/Usage)
- [**Configuration Details**](https://github.com/vladmandic/human/wiki/Configuration) - [**Configuration Details**](https://github.com/vladmandic/human/wiki/Config)
- [**Output Details**](https://github.com/vladmandic/human/wiki/Outputs) - [**Result Details**](https://github.com/vladmandic/human/wiki/Result)
- [**Caching & Smoothing**](https://github.com/vladmandic/human/wiki/Caching) - [**Caching & Smoothing**](https://github.com/vladmandic/human/wiki/Caching)
- [**Face Recognition & Face Description**](https://github.com/vladmandic/human/wiki/Embedding) - [**Face Recognition & Face Description**](https://github.com/vladmandic/human/wiki/Embedding)
- [**Gesture Recognition**](https://github.com/vladmandic/human/wiki/Gesture) - [**Gesture Recognition**](https://github.com/vladmandic/human/wiki/Gesture)

17
TODO.md
View File

@ -14,21 +14,20 @@
- TFLite Models: <https://js.tensorflow.org/api_tflite/0.0.1-alpha.4/> - TFLite Models: <https://js.tensorflow.org/api_tflite/0.0.1-alpha.4/>
- Body segmentation: `robust-video-matting` - Body segmentation: `robust-video-matting`
<br><hr><br>
## Known Issues
### Type Definitions
- `tfjs.esm.d.ts` missing namespace `OptimizerConstructors`
- exports from `match` are marked as private
#### WebGPU #### WebGPU
Experimental support only until support is officially added in Chromium Experimental support only until support is officially added in Chromium
- Performance issues: - Performance issues:
<https://github.com/tensorflow/tfjs/issues/5689> <https://github.com/tensorflow/tfjs/issues/5689>
<br><hr><br>
## Known Issues
- `tfjs.esm.d.ts` missing namespace `OptimizerConstructors`
- exports from `match` are marked as private
<br>
### Face Detection ### Face Detection
Enhanced rotation correction for face detection is not working in NodeJS due to missing kernel op in TFJS Enhanced rotation correction for face detection is not working in NodeJS due to missing kernel op in TFJS

View File

@ -44,9 +44,9 @@ async function webCam() {
dom.canvas.width = dom.video.videoWidth; dom.canvas.width = dom.video.videoWidth;
dom.canvas.height = dom.video.videoHeight; dom.canvas.height = dom.video.videoHeight;
const track = stream.getVideoTracks()[0]; const track = stream.getVideoTracks()[0];
const capabilities = track.getCapabilities(); const capabilities = track.getCapabilities ? track.getCapabilities() : "";
const settings = track.getSettings(); const settings = track.getSettings ? track.getSettings() : "";
const constraints = track.getConstraints(); const constraints = track.getConstraints ? track.getConstraints() : "";
log("video:", dom.video.videoWidth, dom.video.videoHeight, track.label, { stream, track, settings, constraints, capabilities }); log("video:", dom.video.videoWidth, dom.video.videoHeight, track.label, { stream, track, settings, constraints, capabilities });
dom.canvas.onclick = () => { dom.canvas.onclick = () => {
if (dom.video.paused) if (dom.video.paused)

View File

@ -55,6 +55,7 @@
"tensorflow" "tensorflow"
], ],
"devDependencies": { "devDependencies": {
"@tensorflow/tfjs": "^3.11.0",
"@tensorflow/tfjs-backend-cpu": "^3.11.0", "@tensorflow/tfjs-backend-cpu": "^3.11.0",
"@tensorflow/tfjs-backend-wasm": "^3.11.0", "@tensorflow/tfjs-backend-wasm": "^3.11.0",
"@tensorflow/tfjs-backend-webgl": "^3.11.0", "@tensorflow/tfjs-backend-webgl": "^3.11.0",
@ -63,9 +64,8 @@
"@tensorflow/tfjs-core": "^3.11.0", "@tensorflow/tfjs-core": "^3.11.0",
"@tensorflow/tfjs-data": "^3.11.0", "@tensorflow/tfjs-data": "^3.11.0",
"@tensorflow/tfjs-layers": "^3.11.0", "@tensorflow/tfjs-layers": "^3.11.0",
"@tensorflow/tfjs-node-gpu": "^3.11.0",
"@tensorflow/tfjs-node": "^3.11.0", "@tensorflow/tfjs-node": "^3.11.0",
"@tensorflow/tfjs": "^3.11.0", "@tensorflow/tfjs-node-gpu": "^3.11.0",
"@types/node": "^16.11.6", "@types/node": "^16.11.6",
"@typescript-eslint/eslint-plugin": "^5.2.0", "@typescript-eslint/eslint-plugin": "^5.2.0",
"@typescript-eslint/parser": "^5.2.0", "@typescript-eslint/parser": "^5.2.0",
@ -74,21 +74,19 @@
"canvas": "^2.8.0", "canvas": "^2.8.0",
"dayjs": "^1.10.7", "dayjs": "^1.10.7",
"esbuild": "^0.13.10", "esbuild": "^0.13.10",
"eslint": "8.1.0",
"eslint-config-airbnb-base": "^14.2.1", "eslint-config-airbnb-base": "^14.2.1",
"eslint-plugin-html": "^6.2.0", "eslint-plugin-html": "^6.2.0",
"eslint-plugin-import": "^2.25.2", "eslint-plugin-import": "^2.25.2",
"eslint-plugin-json": "^3.1.0", "eslint-plugin-json": "^3.1.0",
"eslint-plugin-node": "^11.1.0", "eslint-plugin-node": "^11.1.0",
"eslint-plugin-promise": "^5.1.1", "eslint-plugin-promise": "^5.1.1",
"eslint": "8.1.0", "long": "4.0.0",
"long": "4",
"node-fetch": "^3.0.0", "node-fetch": "^3.0.0",
"rimraf": "^3.0.2", "rimraf": "^3.0.2",
"seedrandom": "^3.0.5", "seedrandom": "^3.0.5",
"tslib": "^2.3.1", "tslib": "^2.3.1",
"typedoc": "0.22.7", "typedoc": "0.22.7",
"typescript": "4.4.4" "typescript": "4.4.4"
},
"dependencies": {
} }
} }

View File

@ -182,43 +182,46 @@ export interface GestureConfig {
*/ */
export interface Config { export interface Config {
/** Backend used for TFJS operations /** Backend used for TFJS operations
* Valid build-in backends are: * valid build-in backends are:
* - Browser: `cpu`, `wasm`, `webgl`, `humangl` * - Browser: `cpu`, `wasm`, `webgl`, `humangl`, `webgpu`
* - NodeJS: `cpu`, `wasm`, `tensorflow` * - NodeJS: `cpu`, `wasm`, `tensorflow`
* * default: `humangl` for browser and `tensorflow` for nodejs
* Experimental:
* - Browser: `webgpu` - requires custom build of `tfjs-backend-webgpu`
*
* Defaults: `humangl` for browser and `tensorflow` for nodejs
*/ */
backend: '' | 'cpu' | 'wasm' | 'webgl' | 'humangl' | 'tensorflow' | 'webgpu', backend: '' | 'cpu' | 'wasm' | 'webgl' | 'humangl' | 'tensorflow' | 'webgpu',
// backend: string;
/** Path to *.wasm files if backend is set to `wasm` /** Path to *.wasm files if backend is set to `wasm`
* - if not set, auto-detects to link to CDN `jsdelivr` when running in browser * default: auto-detects to link to CDN `jsdelivr` when running in browser
*/ */
wasmPath: string, wasmPath: string,
/** Print debug statements to console */ /** Print debug statements to console
* default: `true`
*/
debug: boolean, debug: boolean,
/** Perform model loading and inference concurrently or sequentially */ /** Perform model loading and inference concurrently or sequentially
* default: `true`
*/
async: boolean, async: boolean,
/** What to use for `human.warmup()` /** What to use for `human.warmup()`
* - warmup pre-initializes all models for faster inference but can take significant time on startup * - warmup pre-initializes all models for faster inference but can take significant time on startup
* - used by `webgl`, `humangl` and `webgpu` backends
* default: `full`
*/ */
warmup: 'none' | 'face' | 'full' | 'body', warmup: 'none' | 'face' | 'full' | 'body',
// warmup: string; // warmup: string;
/** Base model path (typically starting with file://, http:// or https://) for all models /** Base model path (typically starting with file://, http:// or https://) for all models
* - individual modelPath values are relative to this path * - individual modelPath values are relative to this path
* default: `../models/` for browsers and `file://models/` for nodejs
*/ */
modelBasePath: string, modelBasePath: string,
/** Cache sensitivity /** Cache sensitivity
* - values 0..1 where 0.01 means reset cache if input changed more than 1% * - values 0..1 where 0.01 means reset cache if input changed more than 1%
* - set to 0 to disable caching * - set to 0 to disable caching
* default: 0.7
*/ */
cacheSensitivity: number; cacheSensitivity: number;
@ -247,185 +250,120 @@ export interface Config {
segmentation: Partial<SegmentationConfig>, segmentation: Partial<SegmentationConfig>,
} }
/** - [See all default Config values...](https://github.com/vladmandic/human/blob/main/src/config.ts#L250) */ /** - [See all default Config values...](https://github.com/vladmandic/human/blob/main/src/config.ts#L253) */
const config: Config = { const config: Config = {
backend: '', // select tfjs backend to use, leave empty to use default backend backend: '',
// for browser environments: 'webgl', 'wasm', 'cpu', or 'humangl' (which is a custom version of webgl) modelBasePath: '',
// for nodejs environments: 'tensorflow', 'wasm', 'cpu' wasmPath: '',
// default set to `humangl` for browsers and `tensorflow` for nodejs debug: true,
modelBasePath: '', // base path for all models async: true,
// default set to `../models/` for browsers and `file://models/` for nodejs warmup: 'full',
wasmPath: '', // path for wasm binaries, only used for backend: wasm cacheSensitivity: 0.70,
// default set to download from jsdeliv during Human class instantiation skipAllowed: false,
debug: true, // print additional status messages to console filter: {
async: true, // execute enabled models in parallel enabled: true,
warmup: 'full', // what to use for human.warmup(), can be 'none', 'face', 'full' width: 0,
// warmup pre-initializes all models for faster inference but can take height: 0,
// significant time on startup flip: false,
// only used for `webgl` and `humangl` backends return: true,
cacheSensitivity: 0.70, // cache sensitivity brightness: 0,
// values 0..1 where 0.01 means reset cache if input changed more than 1% contrast: 0,
// set to 0 to disable caching sharpness: 0,
skipAllowed: false, // internal & dynamic blur: 0,
filter: { // run input through image filters before inference saturation: 0,
// image filters run with near-zero latency as they are executed on the GPU hue: 0,
enabled: true, // enable image pre-processing filters negative: false,
width: 0, // resize input width sepia: false,
height: 0, // resize input height vintage: false,
// if both width and height are set to 0, there is no resizing kodachrome: false,
// if just one is set, second one is scaled automatically technicolor: false,
// if both are set, values are used as-is polaroid: false,
flip: false, // flip input as mirror image pixelate: 0,
return: true, // return processed canvas imagedata in result
brightness: 0, // range: -1 (darken) to 1 (lighten)
contrast: 0, // range: -1 (reduce contrast) to 1 (increase contrast)
sharpness: 0, // range: 0 (no sharpening) to 1 (maximum sharpening)
blur: 0, // range: 0 (no blur) to N (blur radius in pixels)
saturation: 0, // range: -1 (reduce saturation) to 1 (increase saturation)
hue: 0, // range: 0 (no change) to 360 (hue rotation in degrees)
negative: false, // image negative
sepia: false, // image sepia colors
vintage: false, // image vintage colors
kodachrome: false, // image kodachrome colors
technicolor: false, // image technicolor colors
polaroid: false, // image polaroid camera effect
pixelate: 0, // range: 0 (no pixelate) to N (number of pixels to pixelate)
}, },
gesture: { gesture: {
enabled: true, // enable gesture recognition based on model results enabled: true,
}, },
face: { face: {
enabled: true, // controls if specified modul is enabled enabled: true,
// face.enabled is required for all face models:
// detector, mesh, iris, age, gender, emotion
// (note: module is not loaded until it is required)
detector: { detector: {
modelPath: 'blazeface.json', // detector model, can be absolute path or relative to modelBasePath modelPath: 'blazeface.json',
rotation: true, // use best-guess rotated face image or just box with rotation as-is rotation: true,
// false means higher performance, but incorrect mesh mapping if face angle is above 20 degrees maxDetected: 1,
// this parameter is not valid in nodejs skipFrames: 99,
maxDetected: 1, // maximum number of faces detected in the input skipTime: 2500,
// should be set to the minimum number for performance minConfidence: 0.2,
skipFrames: 99, // how many max frames to go without re-running the face bounding box detector iouThreshold: 0.1,
// only used when cacheSensitivity is not zero return: false,
skipTime: 2500, // how many ms to go without re-running the face bounding box detector
// only used when cacheSensitivity is not zero
minConfidence: 0.2, // threshold for discarding a prediction
iouThreshold: 0.1, // ammount of overlap between two detected objects before one object is removed
return: false, // return extracted face as tensor
// in which case user is reponsible for disposing the tensor
}, },
mesh: { mesh: {
enabled: true, enabled: true,
modelPath: 'facemesh.json', // facemesh model, can be absolute path or relative to modelBasePath modelPath: 'facemesh.json',
}, },
iris: { iris: {
enabled: true, enabled: true,
modelPath: 'iris.json', // face iris model modelPath: 'iris.json',
// can be either absolute path or relative to modelBasePath
}, },
emotion: { emotion: {
enabled: true, enabled: true,
minConfidence: 0.1, // threshold for discarding a prediction minConfidence: 0.1,
skipFrames: 99, // how max many frames to go without re-running the detector skipFrames: 99,
// only used when cacheSensitivity is not zero skipTime: 1500,
skipTime: 1500, // how many ms to go without re-running the face bounding box detector modelPath: 'emotion.json',
// only used when cacheSensitivity is not zero
modelPath: 'emotion.json', // face emotion model, can be absolute path or relative to modelBasePath
}, },
description: { description: {
enabled: true, // to improve accuracy of face description extraction it is enabled: true,
// recommended to enable detector.rotation and mesh.enabled modelPath: 'faceres.json',
modelPath: 'faceres.json', // face description model skipFrames: 99,
// can be either absolute path or relative to modelBasePath skipTime: 3000,
skipFrames: 99, // how many max frames to go without re-running the detector minConfidence: 0.1,
// only used when cacheSensitivity is not zero
skipTime: 3000, // how many ms to go without re-running the face bounding box detector
// only used when cacheSensitivity is not zero
minConfidence: 0.1, // threshold for discarding a prediction
}, },
antispoof: { antispoof: {
enabled: false, enabled: false,
skipFrames: 99, // how max many frames to go without re-running the detector skipFrames: 99,
// only used when cacheSensitivity is not zero skipTime: 4000,
skipTime: 4000, // how many ms to go without re-running the face bounding box detector modelPath: 'antispoof.json',
// only used when cacheSensitivity is not zero
modelPath: 'antispoof.json', // face description model
// can be either absolute path or relative to modelBasePath
}, },
}, },
body: { body: {
enabled: true, enabled: true,
modelPath: 'movenet-lightning.json', // body model, can be absolute path or relative to modelBasePath modelPath: 'movenet-lightning.json',
// can be 'posenet', 'blazepose', 'efficientpose', 'movenet-lightning', 'movenet-thunder'
detector: { detector: {
modelPath: '', // optional body detector modelPath: '',
}, },
maxDetected: -1, // maximum number of people detected in the input maxDetected: -1,
// should be set to the minimum number for performance minConfidence: 0.3,
// only valid for posenet and movenet-multipose as other models detects single pose skipFrames: 1,
// set to -1 to autodetect based on number of detected faces skipTime: 200,
minConfidence: 0.3, // threshold for discarding a prediction
skipFrames: 1, // how many max frames to go without re-running the detector
// only used when cacheSensitivity is not zero
skipTime: 200, // how many ms to go without re-running the face bounding box detector
// only used when cacheSensitivity is not zero
}, },
hand: { hand: {
enabled: true, enabled: true,
rotation: true, // use best-guess rotated hand image or just box with rotation as-is rotation: true,
// false means higher performance, but incorrect finger mapping if hand is inverted skipFrames: 99,
// only valid for `handdetect` variation skipTime: 2000,
skipFrames: 99, // how many max frames to go without re-running the hand bounding box detector minConfidence: 0.50,
// only used when cacheSensitivity is not zero iouThreshold: 0.2,
skipTime: 2000, // how many ms to go without re-running the face bounding box detector maxDetected: -1,
// only used when cacheSensitivity is not zero landmarks: true,
minConfidence: 0.50, // threshold for discarding a prediction
iouThreshold: 0.2, // ammount of overlap between two detected objects before one object is removed
maxDetected: -1, // maximum number of hands detected in the input
// should be set to the minimum number for performance
// set to -1 to autodetect based on number of detected faces
landmarks: true, // detect hand landmarks or just hand boundary box
detector: { detector: {
modelPath: 'handtrack.json', // hand detector model, can be absolute path or relative to modelBasePath modelPath: 'handtrack.json',
// can be 'handdetect' or 'handtrack'
}, },
skeleton: { skeleton: {
modelPath: 'handskeleton.json', // hand skeleton model, can be absolute path or relative to modelBasePath modelPath: 'handskeleton.json',
}, },
}, },
object: { object: {
enabled: false, enabled: false,
modelPath: 'mb3-centernet.json', // experimental: object detection model, can be absolute path or relative to modelBasePath modelPath: 'mb3-centernet.json',
// can be 'mb3-centernet' or 'nanodet' minConfidence: 0.2,
minConfidence: 0.2, // threshold for discarding a prediction iouThreshold: 0.4,
iouThreshold: 0.4, // ammount of overlap between two detected objects before one object is removed maxDetected: 10,
maxDetected: 10, // maximum number of objects detected in the input skipFrames: 99,
skipFrames: 99, // how many max frames to go without re-running the detector skipTime: 1000,
// only used when cacheSensitivity is not zero
skipTime: 1000, // how many ms to go without re-running object detector
// only used when cacheSensitivity is not zero
}, },
segmentation: { segmentation: {
enabled: false, // controlls and configures all body segmentation module enabled: false,
// removes background from input containing person modelPath: 'selfie.json',
// if segmentation is enabled it will run as preprocessing task before any other model blur: 8,
// alternatively leave it disabled and use it on-demand using human.segmentation method which can
// remove background or replace it with user-provided background
modelPath: 'selfie.json', // experimental: object detection model, can be absolute path or relative to modelBasePath
// can be 'selfie' or 'meet'
blur: 8, // blur segmentation output by n pixels for more realistic image
}, },
}; };

2
wiki

@ -1 +1 @@
Subproject commit 5e874c076123f1c2b2821b1c37f6005e775465aa Subproject commit 5e89af1004860ea9f302e516699b5e0b4e0a825f