mirror of https://github.com/vladmandic/human
updated docs
parent
a9c1394caf
commit
e73a55ab96
|
@ -14,9 +14,12 @@ npm run build
|
||||||
|
|
||||||
This will rebuild library itself (all variations) as well as demo
|
This will rebuild library itself (all variations) as well as demo
|
||||||
|
|
||||||
|
<br>
|
||||||
|
|
||||||
Project is written in pure `JavaScript` [ECMAScript version 2020](https://www.ecma-international.org/ecma-262/11.0/index.html)
|
Project is written in pure `JavaScript` [ECMAScript version 2020](https://www.ecma-international.org/ecma-262/11.0/index.html)
|
||||||
Build target is `JavaScript` **EMCAScript version 2018**
|
Build target is `JavaScript` **EMCAScript version 2018**
|
||||||
|
|
||||||
Only project depdendency is [@tensorflow/tfjs](https://github.com/tensorflow/tfjs)
|
<br>
|
||||||
Development dependencies are [eslint](https://github.com/eslint) used for code linting and [esbuild](https://github.com/evanw/esbuild) used for IIFE and ESM script bundling
|
|
||||||
|
|
||||||
|
Only project depdendency is [@tensorflow/tfjs](https://github.com/tensorflow/tfjs)
|
||||||
|
Development dependencies are [eslint](https://github.com/eslint) used for code linting and [esbuild](https://github.com/evanw/esbuild) used for IIFE and ESM script bundling
|
||||||
|
|
133
Configuration.md
133
Configuration.md
|
@ -16,27 +16,35 @@ config = {
|
||||||
backend: 'webgl', // select tfjs backend to use
|
backend: 'webgl', // select tfjs backend to use
|
||||||
console: true, // enable debugging output to console
|
console: true, // enable debugging output to console
|
||||||
async: true, // execute enabled models in parallel
|
async: true, // execute enabled models in parallel
|
||||||
// this disables per-model performance data but slightly increases performance
|
// this disables per-model performance data but
|
||||||
|
// slightly increases performance
|
||||||
// cannot be used if profiling is enabled
|
// cannot be used if profiling is enabled
|
||||||
profile: false, // enable tfjs profiling
|
profile: false, // enable tfjs profiling
|
||||||
// this has significant performance impact, only enable for debugging purposes
|
// this has significant performance impact
|
||||||
|
// only enable for debugging purposes
|
||||||
// currently only implemented for age,gender,emotion models
|
// currently only implemented for age,gender,emotion models
|
||||||
deallocate: false, // aggresively deallocate gpu memory after each usage
|
deallocate: false, // aggresively deallocate gpu memory after each usage
|
||||||
// only valid for webgl backend and only during first call, cannot be changed unless library is reloaded
|
// only valid for webgl backend and only during first call
|
||||||
// this has significant performance impact, only enable on low-memory devices
|
// cannot be changed unless library is reloaded
|
||||||
|
// this has significant performance impact
|
||||||
|
// only enable on low-memory devices
|
||||||
scoped: false, // enable scoped runs
|
scoped: false, // enable scoped runs
|
||||||
// some models *may* have memory leaks, this wrapps everything in a local scope at a cost of performance
|
// some models *may* have memory leaks,
|
||||||
|
// this wrapps everything in a local scope at a cost of performance
|
||||||
// typically not needed
|
// typically not needed
|
||||||
videoOptimized: true, // perform additional optimizations when input is video, must be disabled for images
|
videoOptimized: true, // perform additional optimizations when input is video,
|
||||||
filter: { // note: image filters are only available in Browser environments and not in NodeJS as they require WebGL for processing
|
// must be disabled for images
|
||||||
|
// basically this skips object box boundary detection for every n frames
|
||||||
|
// while maintaining in-box detection since objects cannot move that fast
|
||||||
|
|
||||||
|
filter: {
|
||||||
enabled: true, // enable image pre-processing filters
|
enabled: true, // enable image pre-processing filters
|
||||||
return: true, // return processed canvas imagedata in result
|
|
||||||
width: 0, // resize input width
|
width: 0, // resize input width
|
||||||
height: 0, // resize input height
|
height: 0, // resize input height
|
||||||
// usefull on low-performance devices to reduce the size of processed input
|
|
||||||
// if both width and height are set to 0, there is no resizing
|
// if both width and height are set to 0, there is no resizing
|
||||||
// if just one is set, second one is scaled automatically
|
// if just one is set, second one is scaled automatically
|
||||||
// if both are set, values are used as-is
|
// if both are set, values are used as-is
|
||||||
|
return: true, // return processed canvas imagedata in result
|
||||||
brightness: 0, // range: -1 (darken) to 1 (lighten)
|
brightness: 0, // range: -1 (darken) to 1 (lighten)
|
||||||
contrast: 0, // range: -1 (reduce contrast) to 1 (increase contrast)
|
contrast: 0, // range: -1 (reduce contrast) to 1 (increase contrast)
|
||||||
sharpness: 0, // range: 0 (no sharpening) to 1 (maximum sharpening)
|
sharpness: 0, // range: 0 (no sharpening) to 1 (maximum sharpening)
|
||||||
|
@ -51,90 +59,115 @@ config = {
|
||||||
polaroid: false, // image polaroid camera effect
|
polaroid: false, // image polaroid camera effect
|
||||||
pixelate: 0, // range: 0 (no pixelate) to N (number of pixels to pixelate)
|
pixelate: 0, // range: 0 (no pixelate) to N (number of pixels to pixelate)
|
||||||
},
|
},
|
||||||
|
|
||||||
|
gesture: {
|
||||||
|
enabled: true, // enable simple gesture recognition
|
||||||
|
},
|
||||||
|
|
||||||
face: {
|
face: {
|
||||||
enabled: true, // controls if specified modul is enabled
|
enabled: true, // controls if specified modul is enabled
|
||||||
// face.enabled is required for all face models: detector, mesh, iris, age, gender, emotion
|
// face.enabled is required for all face models:
|
||||||
// note: module is not loaded until it is required
|
// detector, mesh, iris, age, gender, emotion
|
||||||
|
// (note: module is not loaded until it is required)
|
||||||
detector: {
|
detector: {
|
||||||
modelPath: '../models/blazeface/back/model.json', // can be 'front' or 'back'.
|
modelPath: '../models/blazeface-back.json', // can be 'front' or 'back'.
|
||||||
// 'front' is optimized for large faces such as front-facing camera and 'back' is optimized for distanct faces.
|
// 'front' is optimized for large faces
|
||||||
|
// such as front-facing camera and
|
||||||
|
// 'back' is optimized for distanct faces.
|
||||||
inputSize: 256, // fixed value: 128 for front and 256 for 'back'
|
inputSize: 256, // fixed value: 128 for front and 256 for 'back'
|
||||||
maxFaces: 10, // maximum number of faces detected in the input, should be set to the minimum number for performance
|
maxFaces: 10, // maximum number of faces detected in the input
|
||||||
skipFrames: 10, // how many frames to go without re-running the face bounding box detector
|
// should be set to the minimum number for performance
|
||||||
// only used for video inputs, ignored for static inputs
|
skipFrames: 15, // how many frames to go without re-running the face bounding box detector
|
||||||
// if model is running st 25 FPS, we can re-use existing bounding box for updated face mesh analysis
|
// only used for video inputs
|
||||||
// as the face probably hasn't moved much in short time (10 * 1/25 = 0.25 sec)
|
// e.g., if model is running st 25 FPS, we can re-use existing bounding
|
||||||
minConfidence: 0.5, // threshold for discarding a prediction
|
// box for updated face analysis as the head probably hasn't moved much
|
||||||
iouThreshold: 0.3, // threshold for deciding whether boxes overlap too much in non-maximum suppression
|
// in short time (10 * 1/25 = 0.25 sec)
|
||||||
scoreThreshold: 0.7, // threshold for deciding when to remove boxes based on score in non-maximum suppression
|
minConfidence: 0.1, // threshold for discarding a prediction
|
||||||
|
iouThreshold: 0.1, // threshold for deciding whether boxes overlap too much in
|
||||||
|
// non-maximum suppression (0.1 means drop if overlap 10%)
|
||||||
|
scoreThreshold: 0.2, // threshold for deciding when to remove boxes based on score
|
||||||
|
// in non-maximum suppression,
|
||||||
|
// this is applied on detection objects only and before minConfidence
|
||||||
},
|
},
|
||||||
|
|
||||||
mesh: {
|
mesh: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
modelPath: '../models/facemesh/model.json',
|
modelPath: '../models/facemesh.json',
|
||||||
inputSize: 192, // fixed value
|
inputSize: 192, // fixed value
|
||||||
},
|
},
|
||||||
|
|
||||||
iris: {
|
iris: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
modelPath: '../models/iris/model.json',
|
modelPath: '../models/iris.json',
|
||||||
enlargeFactor: 2.3, // empiric tuning
|
|
||||||
inputSize: 64, // fixed value
|
inputSize: 64, // fixed value
|
||||||
},
|
},
|
||||||
|
|
||||||
age: {
|
age: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
modelPath: '../models/ssrnet-age/imdb/model.json', // can be 'imdb' or 'wiki'
|
modelPath: '../models/age-ssrnet-imdb.json', // can be 'age-ssrnet-imdb' or 'age-ssrnet-wiki'
|
||||||
// which determines training set for model
|
// which determines training set for model
|
||||||
inputSize: 64, // fixed value
|
inputSize: 64, // fixed value
|
||||||
skipFrames: 10, // how many frames to go without re-running the detector, only used for video inputs
|
skipFrames: 15, // how many frames to go without re-running the detector
|
||||||
|
// only used for video inputs
|
||||||
},
|
},
|
||||||
|
|
||||||
gender: {
|
gender: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
minConfidence: 0.8, // threshold for discarding a prediction
|
minConfidence: 0.1, // threshold for discarding a prediction
|
||||||
modelPath: '../models/ssrnet-gender/imdb/model.json',
|
modelPath: '../models/gender-ssrnet-imdb.json', // can be 'gender', 'gender-ssrnet-imdb' or 'gender-ssrnet-wiki'
|
||||||
|
inputSize: 64, // fixed value
|
||||||
|
skipFrames: 15, // how many frames to go without re-running the detector
|
||||||
|
// only used for video inputs
|
||||||
},
|
},
|
||||||
|
|
||||||
emotion: {
|
emotion: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
inputSize: 64, // fixed value
|
inputSize: 64, // fixed value
|
||||||
minConfidence: 0.5, // threshold for discarding a prediction
|
minConfidence: 0.2, // threshold for discarding a prediction
|
||||||
skipFrames: 10, // how many frames to go without re-running the detector, only used for video inputs
|
skipFrames: 15, // how many frames to go without re-running the detector
|
||||||
modelPath: '../models/emotion/model.json',
|
modelPath: '../models/emotion-large.json', // can be 'mini', 'large'
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
body: {
|
body: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
modelPath: '../models/posenet/model.json',
|
modelPath: '../models/posenet.json',
|
||||||
inputResolution: 257, // fixed value
|
inputResolution: 257, // fixed value
|
||||||
outputStride: 16, // fixed value
|
maxDetections: 10, // maximum number of people detected in the input
|
||||||
maxDetections: 10, // maximum number of people detected in the input, should be set to the minimum number for performance
|
// should be set to the minimum number for performance
|
||||||
scoreThreshold: 0.7, // threshold for deciding when to remove boxes based on score in non-maximum suppression
|
scoreThreshold: 0.8, // threshold for deciding when to remove boxes based on score
|
||||||
|
// in non-maximum suppression
|
||||||
nmsRadius: 20, // radius for deciding points are too close in non-maximum suppression
|
nmsRadius: 20, // radius for deciding points are too close in non-maximum suppression
|
||||||
},
|
},
|
||||||
|
|
||||||
hand: {
|
hand: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
inputSize: 256, // fixed value
|
inputSize: 256, // fixed value
|
||||||
skipFrames: 10, // how many frames to go without re-running the hand bounding box detector
|
skipFrames: 15, // how many frames to go without re-running the hand bounding box detector
|
||||||
// only used for video inputs
|
// only used for video inputs
|
||||||
// if model is running st 25 FPS, we can re-use existing bounding box for updated hand skeleton analysis
|
// e.g., if model is running st 25 FPS, we can re-use existing bounding
|
||||||
// as the hand probably hasn't moved much in short time (10 * 1/25 = 0.25 sec)
|
// box for updated hand skeleton analysis as the hand probably
|
||||||
|
// hasn't moved much in short time (10 * 1/25 = 0.25 sec)
|
||||||
minConfidence: 0.5, // threshold for discarding a prediction
|
minConfidence: 0.5, // threshold for discarding a prediction
|
||||||
iouThreshold: 0.3, // threshold for deciding whether boxes overlap too much in non-maximum suppression
|
iouThreshold: 0.1, // threshold for deciding whether boxes overlap too much
|
||||||
scoreThreshold: 0.7, // threshold for deciding when to remove boxes based on score in non-maximum suppression
|
// in non-maximum suppression
|
||||||
enlargeFactor: 1.65, // empiric tuning as skeleton prediction prefers hand box with some whitespace
|
scoreThreshold: 0.8, // threshold for deciding when to remove boxes based on
|
||||||
maxHands: 10, // maximum number of hands detected in the input, should be set to the minimum number for performance
|
// score in non-maximum suppression
|
||||||
|
maxHands: 1, // maximum number of hands detected in the input
|
||||||
|
// should be set to the minimum number for performance
|
||||||
|
landmarks: true, // detect hand landmarks or just hand boundary box
|
||||||
detector: {
|
detector: {
|
||||||
modelPath: '../models/handdetect/model.json',
|
modelPath: '../models/handdetect.json',
|
||||||
},
|
},
|
||||||
skeleton: {
|
skeleton: {
|
||||||
modelPath: '../models/handskeleton/model.json',
|
modelPath: '../models/handskeleton.json',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
gesture: {
|
|
||||||
enabled: true, // enable simple gesture recognition
|
|
||||||
// takes processed data and based on geometry detects simple gestures
|
|
||||||
// easily expandable via code, see `src/gesture.js`
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
```
|
```
|
||||||
|
|
||||||
|
<br>
|
||||||
|
|
||||||
Any user configuration and default configuration are merged using deep-merge, so you do not need to redefine entire configuration
|
Any user configuration and default configuration are merged using deep-merge, so you do not need to redefine entire configuration
|
||||||
Configurtion object is large, but typically you only need to modify few values:
|
Configurtion object is large, but typically you only need to modify few values:
|
||||||
|
|
||||||
|
|
3
Demos.md
3
Demos.md
|
@ -39,6 +39,8 @@ npm run dev
|
||||||
```
|
```
|
||||||
On first start, it will install all development dependencies required to rebuild `Human` library
|
On first start, it will install all development dependencies required to rebuild `Human` library
|
||||||
|
|
||||||
|
By default, web server will run on port `8000` which is configurable in `dev-server.js:options.port`
|
||||||
|
|
||||||
```log
|
```log
|
||||||
> @vladmandic/human@0.7.5 dev /home/vlado/dev/human
|
> @vladmandic/human@0.7.5 dev /home/vlado/dev/human
|
||||||
> npm install && node --trace-warnings --unhandled-rejections=strict --trace-uncaught --no-deprecation dev-server.js
|
> npm install && node --trace-warnings --unhandled-rejections=strict --trace-uncaught --no-deprecation dev-server.js
|
||||||
|
@ -62,4 +64,3 @@ found 0 vulnerabilities
|
||||||
- `node.js`: Demo using NodeJS with CommonJS module
|
- `node.js`: Demo using NodeJS with CommonJS module
|
||||||
This is a very simple demo as althought `Human` library is compatible with NodeJS execution
|
This is a very simple demo as althought `Human` library is compatible with NodeJS execution
|
||||||
and is able to load images and models from local filesystem,
|
and is able to load images and models from local filesystem,
|
||||||
|
|
||||||
|
|
8
Home.md
8
Home.md
|
@ -27,16 +27,16 @@
|
||||||
- [**Performance Notes**](https://github.com/vladmandic/human/wiki/Performance)
|
- [**Performance Notes**](https://github.com/vladmandic/human/wiki/Performance)
|
||||||
- [**Credits**](https://github.com/vladmandic/human/wiki/Credits)
|
- [**Credits**](https://github.com/vladmandic/human/wiki/Credits)
|
||||||
|
|
||||||
|
<br>
|
||||||
|
|
||||||
Compatible with *Browser*, *WebWorker* and *NodeJS* execution on both Windows and Linux
|
Compatible with *Browser*, *WebWorker* and *NodeJS* execution on both Windows and Linux
|
||||||
- Browser/WebWorker: Compatible with *CPU*, *WebGL*, *WASM* and *WebGPU* backends
|
- Browser/WebWorker: Compatible with *CPU*, *WebGL*, *WASM* and *WebGPU* backends
|
||||||
- NodeJS: Compatible with software *tfjs-node* and CUDA accelerated backends *tfjs-node-gpu*
|
- NodeJS: Compatible with software *tfjs-node* and CUDA accelerated backends *tfjs-node-gpu*
|
||||||
|
|
||||||
(and maybe with React-Native as it doesn't use any DOM objects)
|
(and maybe with React-Native as it doesn't use any DOM objects)
|
||||||
|
|
||||||
|
<br>
|
||||||
|
|
||||||
*This is a pre-release project, see [issues](https://github.com/vladmandic/human/issues) for list of known limitations and planned enhancements*
|
*This is a pre-release project, see [issues](https://github.com/vladmandic/human/issues) for list of known limitations and planned enhancements*
|
||||||
|
|
||||||
*Suggestions are welcome!*
|
*Suggestions are welcome!*
|
||||||
|
|
||||||
<br>
|
|
||||||
<hr>
|
|
||||||
<br>
|
|
||||||
|
|
|
@ -25,6 +25,7 @@ Defaults:
|
||||||
"browser": "dist/human.esm.js",
|
"browser": "dist/human.esm.js",
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
<br>
|
||||||
|
|
||||||
### 1. [IIFE](https://developer.mozilla.org/en-US/docs/Glossary/IIFE) script
|
### 1. [IIFE](https://developer.mozilla.org/en-US/docs/Glossary/IIFE) script
|
||||||
|
|
||||||
|
@ -45,6 +46,8 @@ Which you can use to create instance of `human` library:
|
||||||
|
|
||||||
This way you can also use `Human` library within embbedded `<script>` tag within your `html` page for all-in-one approach
|
This way you can also use `Human` library within embbedded `<script>` tag within your `html` page for all-in-one approach
|
||||||
|
|
||||||
|
<br>
|
||||||
|
|
||||||
### 2. [ESM](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/import) module
|
### 2. [ESM](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/import) module
|
||||||
|
|
||||||
*Recommended for usage within `Browser`*
|
*Recommended for usage within `Browser`*
|
||||||
|
@ -90,6 +93,8 @@ Install with:
|
||||||
const human = new Human();
|
const human = new Human();
|
||||||
```
|
```
|
||||||
|
|
||||||
|
<br>
|
||||||
|
|
||||||
### 3. [NPM](https://www.npmjs.com/) module
|
### 3. [NPM](https://www.npmjs.com/) module
|
||||||
|
|
||||||
*Recommended for `NodeJS` projects that will execute in the backend*
|
*Recommended for `NodeJS` projects that will execute in the backend*
|
||||||
|
@ -117,6 +122,8 @@ const config = {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
<br>
|
||||||
|
|
||||||
### Weights
|
### Weights
|
||||||
|
|
||||||
Pretrained model weights are includes in `./models`
|
Pretrained model weights are includes in `./models`
|
||||||
|
|
|
@ -4,6 +4,8 @@ Performance will vary depending on your hardware, but also on number of resoluti
|
||||||
|
|
||||||
For example, it can perform multiple face detections at 60+ FPS, but drops to ~15 FPS on a medium complex images if all modules are enabled
|
For example, it can perform multiple face detections at 60+ FPS, but drops to ~15 FPS on a medium complex images if all modules are enabled
|
||||||
|
|
||||||
|
<br>
|
||||||
|
|
||||||
### Performance per module on a **notebook** with nVidia GTX1050 GPU on a FullHD input:
|
### Performance per module on a **notebook** with nVidia GTX1050 GPU on a FullHD input:
|
||||||
|
|
||||||
- Enabled all: 15 FPS
|
- Enabled all: 15 FPS
|
||||||
|
@ -32,5 +34,7 @@ For example, it can perform multiple face detections at 60+ FPS, but drops to ~1
|
||||||
- Hand: 40 FPS (standalone)
|
- Hand: 40 FPS (standalone)
|
||||||
- Body: 10 FPS (standalone)
|
- Body: 10 FPS (standalone)
|
||||||
|
|
||||||
|
<br>
|
||||||
|
|
||||||
For performance details, see output of `result.performance` object during after running inference
|
For performance details, see output of `result.performance` object during after running inference
|
||||||
|
|
||||||
|
|
8
Usage.md
8
Usage.md
|
@ -1,9 +1,11 @@
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
`Human` library does not require special initialization.
|
`Human` library does not require special initialization
|
||||||
All configuration is done in a single JSON object and all model weights will be dynamically loaded upon their first usage
|
All configuration is done in a single JSON object and all model weights are dynamically loaded upon their first usage
|
||||||
(and only then, `Human` will not load weights that it doesn't need according to configuration).
|
(and only then, `Human` will not load weights that it doesn't need according to configuration).
|
||||||
|
|
||||||
|
<br>
|
||||||
|
|
||||||
There is only *ONE* method you need:
|
There is only *ONE* method you need:
|
||||||
|
|
||||||
```js
|
```js
|
||||||
|
@ -34,6 +36,8 @@ Additionally, `Human` library exposes several objects and methods:
|
||||||
// if you want to pre-load them instead of on-demand loading during 'human.detect()'
|
// if you want to pre-load them instead of on-demand loading during 'human.detect()'
|
||||||
```
|
```
|
||||||
|
|
||||||
|
<br>
|
||||||
|
|
||||||
Note that when using `Human` library in `NodeJS`, you must load and parse the image *before* you pass it for detection and dispose it afterwards
|
Note that when using `Human` library in `NodeJS`, you must load and parse the image *before* you pass it for detection and dispose it afterwards
|
||||||
Input format is `Tensor4D[1, width, height, 3]` of type `float32`
|
Input format is `Tensor4D[1, width, height, 3]` of type `float32`
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue