mirror of https://github.com/vladmandic/human
update config for faceboxes
parent
fb11ed0f09
commit
0fa077bf63
|
@ -1,7 +1,7 @@
|
|||
|
||||
# @vladmandic/human
|
||||
|
||||
Version: **0.11.1**
|
||||
Version: **0.11.4**
|
||||
Description: **Human: AI-powered 3D Face Detection, Face Embedding & Recognition, Body Pose Tracking, Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction & Gesture Recognition**
|
||||
|
||||
Author: **Vladimir Mandic <mandic00@live.com>**
|
||||
|
@ -10,6 +10,13 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
|
|||
|
||||
## Changelog
|
||||
|
||||
### **HEAD -> main, tag: v0.11.4, origin/main, origin/HEAD** 2021/02/06 mandic00@live.com
|
||||
|
||||
### **tag: v0.11.3** 2021/02/02 mandic00@live.com
|
||||
|
||||
### **tag: v0.11.2** 2021/01/30 mandic00@live.com
|
||||
- added warmup for nodejs
|
||||
|
||||
### **update for tfjs 3.0.0** 2021/01/29 mandic00@live.com
|
||||
|
||||
### **tag: v0.11.1** 2021/01/29 mandic00@live.com
|
||||
|
|
|
@ -37,7 +37,6 @@ config = {
|
|||
backend: 'webgl', // select tfjs backend to use
|
||||
wasmPath: '../assets/', // path for wasm binaries
|
||||
// only used for backend: wasm
|
||||
console: true, // enable debugging output to console
|
||||
async: true, // execute enabled models in parallel
|
||||
// this disables per-model performance data but
|
||||
// slightly increases performance
|
||||
|
@ -62,7 +61,6 @@ config = {
|
|||
warmup: 'face', // what to use for human.warmup(), can be 'none', 'face', 'full'
|
||||
// warmup pre-initializes all models for faster inference but can take
|
||||
// significant time on startup
|
||||
|
||||
filter: {
|
||||
enabled: true, // enable image pre-processing filters
|
||||
width: 0, // resize input width
|
||||
|
@ -96,16 +94,16 @@ config = {
|
|||
// detector, mesh, iris, age, gender, emotion
|
||||
// (note: module is not loaded until it is required)
|
||||
detector: {
|
||||
modelPath: '../models/blazeface-back.json', // can be 'front' or 'back'.
|
||||
// 'front' is optimized for large faces
|
||||
// such as front-facing camera and
|
||||
// 'back' is optimized for distanct faces.
|
||||
modelPath: '../models/blazeface-back.json', // can be 'blazeface-front', 'blazeface-front' or 'faceboxes'
|
||||
// 'blazeface-front' is blazeface model optimized for large faces such as front-facing camera
|
||||
// 'blazeface-back' is blazeface model optimized for smaller and/or distanct faces
|
||||
// 'faceboxes' is alternative model to 'blazeface
|
||||
inputSize: 256, // fixed value: 128 for front and 256 for 'back'
|
||||
rotation: false, // use best-guess rotated face image or just box with rotation as-is
|
||||
// false means higher performance, but incorrect mesh mapping if face angle is above 20 degrees
|
||||
maxFaces: 10, // maximum number of faces detected in the input
|
||||
// should be set to the minimum number for performance
|
||||
skipFrames: 15, // how many frames to go without re-running the face bounding box detector
|
||||
skipFrames: 11, // how many frames to go without re-running the face bounding box detector
|
||||
// only used for video inputs
|
||||
// e.g., if model is running st 25 FPS, we can re-use existing bounding
|
||||
// box for updated face analysis as the head probably hasn't moved much
|
||||
|
@ -136,7 +134,7 @@ config = {
|
|||
modelPath: '../models/age-ssrnet-imdb.json', // can be 'age-ssrnet-imdb' or 'age-ssrnet-wiki'
|
||||
// which determines training set for model
|
||||
inputSize: 64, // fixed value
|
||||
skipFrames: 15, // how many frames to go without re-running the detector
|
||||
skipFrames: 31, // how many frames to go without re-running the detector
|
||||
// only used for video inputs
|
||||
},
|
||||
|
||||
|
@ -145,7 +143,7 @@ config = {
|
|||
minConfidence: 0.1, // threshold for discarding a prediction
|
||||
modelPath: '../models/gender-ssrnet-imdb.json', // can be 'gender', 'gender-ssrnet-imdb' or 'gender-ssrnet-wiki'
|
||||
inputSize: 64, // fixed value
|
||||
skipFrames: 15, // how many frames to go without re-running the detector
|
||||
skipFrames: 41, // how many frames to go without re-running the detector
|
||||
// only used for video inputs
|
||||
},
|
||||
|
||||
|
@ -153,7 +151,7 @@ config = {
|
|||
enabled: true,
|
||||
inputSize: 64, // fixed value
|
||||
minConfidence: 0.2, // threshold for discarding a prediction
|
||||
skipFrames: 15, // how many frames to go without re-running the detector
|
||||
skipFrames: 21, // how many frames to go without re-running the detector
|
||||
modelPath: '../models/emotion-large.json', // can be 'mini', 'large'
|
||||
},
|
||||
|
||||
|
@ -170,9 +168,12 @@ config = {
|
|||
inputSize: 257, // fixed value
|
||||
maxDetections: 10, // maximum number of people detected in the input
|
||||
// should be set to the minimum number for performance
|
||||
scoreThreshold: 0.8, // threshold for deciding when to remove boxes based on score
|
||||
scoreThreshold: 0.5, // threshold for deciding when to remove boxes based on score
|
||||
// in non-maximum suppression
|
||||
nmsRadius: 20, // radius for deciding points are too close in non-maximum suppression
|
||||
outputStride: 16, // size of block in which to run point detectopn, smaller value means higher resolution
|
||||
// defined by model itself, can be 8, 16, or 32
|
||||
modelType: 'MobileNet', // Human includes MobileNet version, but you can switch to ResNet
|
||||
},
|
||||
|
||||
hand: {
|
||||
|
@ -180,15 +181,15 @@ config = {
|
|||
rotation: false, // use best-guess rotated hand image or just box with rotation as-is
|
||||
// false means higher performance, but incorrect finger mapping if hand is inverted
|
||||
inputSize: 256, // fixed value
|
||||
skipFrames: 15, // how many frames to go without re-running the hand bounding box detector
|
||||
skipFrames: 12, // how many frames to go without re-running the hand bounding box detector
|
||||
// only used for video inputs
|
||||
// e.g., if model is running st 25 FPS, we can re-use existing bounding
|
||||
// box for updated hand skeleton analysis as the hand probably
|
||||
// hasn't moved much in short time (10 * 1/25 = 0.25 sec)
|
||||
minConfidence: 0.5, // threshold for discarding a prediction
|
||||
minConfidence: 0.1, // threshold for discarding a prediction
|
||||
iouThreshold: 0.1, // threshold for deciding whether boxes overlap too much
|
||||
// in non-maximum suppression
|
||||
scoreThreshold: 0.8, // threshold for deciding when to remove boxes based on
|
||||
scoreThreshold: 0.5, // threshold for deciding when to remove boxes based on
|
||||
// score in non-maximum suppression
|
||||
maxHands: 1, // maximum number of hands detected in the input
|
||||
// should be set to the minimum number for performance
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
## Credits
|
||||
|
||||
- Face Detection: [**MediaPipe BlazeFace**](https://drive.google.com/file/d/1f39lSzU5Oq-j_OXgS67KfN5wNsoeAZ4V/view)
|
||||
- Face Boxes: [**FaceBoxes**](https://github.com/TropComplique/FaceBoxes-tensorflow/)
|
||||
- Facial Spacial Geometry: [**MediaPipe FaceMesh**](https://drive.google.com/file/d/1VFC_wIpw4O7xBOiTgUldl79d9LA-LsnA/view)
|
||||
- Eye Iris Details: [**MediaPipe Iris**](https://drive.google.com/file/d/1bsWbokp9AklH2ANjCfmjqEzzxO1CNbMu/view)
|
||||
- Hand Detection & Skeleton: [**MediaPipe HandPose**](https://drive.google.com/file/d/1sv4sSb9BSNVZhLzxXJ0jBv9DqD-4jnAz/view)
|
||||
|
|
Loading…
Reference in New Issue