mirror of https://github.com/vladmandic/human
major update for 1.8 release candidate
parent
3b81af15f2
commit
9062444877
|
@ -48,19 +48,6 @@ const config: Config = {
|
||||||
// this disables per-model performance data but
|
// this disables per-model performance data but
|
||||||
// slightly increases performance
|
// slightly increases performance
|
||||||
// cannot be used if profiling is enabled
|
// cannot be used if profiling is enabled
|
||||||
profile: false, // internal: enable tfjs profiling
|
|
||||||
// this has significant performance impact
|
|
||||||
// only enable for debugging purposes
|
|
||||||
// currently only implemented for age,gender,emotion models
|
|
||||||
deallocate: false, // internal: aggresively deallocate gpu memory after each usage
|
|
||||||
// only valid for webgl and humangl backend and only during first call
|
|
||||||
// cannot be changed unless library is reloaded
|
|
||||||
// this has significant performance impact
|
|
||||||
// only enable on low-memory devices
|
|
||||||
scoped: false, // internal: enable scoped runs
|
|
||||||
// some models *may* have memory leaks,
|
|
||||||
// this wrapps everything in a local scope at a cost of performance
|
|
||||||
// typically not needed
|
|
||||||
videoOptimized: true, // perform additional optimizations when input is video,
|
videoOptimized: true, // perform additional optimizations when input is video,
|
||||||
// must be disabled for images
|
// must be disabled for images
|
||||||
// automatically disabled for Image, ImageData, ImageBitmap and Tensor inputs
|
// automatically disabled for Image, ImageData, ImageBitmap and Tensor inputs
|
||||||
|
@ -105,12 +92,11 @@ const config: Config = {
|
||||||
// detector, mesh, iris, age, gender, emotion
|
// detector, mesh, iris, age, gender, emotion
|
||||||
// (note: module is not loaded until it is required)
|
// (note: module is not loaded until it is required)
|
||||||
detector: {
|
detector: {
|
||||||
modelPath: 'blazeface-back.json', // detector model
|
modelPath: 'blazeface-back.json', // detector model, can be absolute path or relative to modelBasePath
|
||||||
// can be either absolute path or relative to modelBasePath
|
|
||||||
rotation: false, // use best-guess rotated face image or just box with rotation as-is
|
rotation: false, // use best-guess rotated face image or just box with rotation as-is
|
||||||
// false means higher performance, but incorrect mesh mapping if face angle is above 20 degrees
|
// false means higher performance, but incorrect mesh mapping if face angle is above 20 degrees
|
||||||
// this parameter is not valid in nodejs
|
// this parameter is not valid in nodejs
|
||||||
maxFaces: 10, // maximum number of faces detected in the input
|
maxDetected: 10, // maximum number of faces detected in the input
|
||||||
// should be set to the minimum number for performance
|
// should be set to the minimum number for performance
|
||||||
skipFrames: 21, // how many frames to go without re-running the face bounding box detector
|
skipFrames: 21, // how many frames to go without re-running the face bounding box detector
|
||||||
// only used for video inputs
|
// only used for video inputs
|
||||||
|
@ -120,18 +106,13 @@ const config: Config = {
|
||||||
skipInitial: false, // if previous detection resulted in no faces detected,
|
skipInitial: false, // if previous detection resulted in no faces detected,
|
||||||
// should skipFrames be reset immediately to force new detection cycle
|
// should skipFrames be reset immediately to force new detection cycle
|
||||||
minConfidence: 0.2, // threshold for discarding a prediction
|
minConfidence: 0.2, // threshold for discarding a prediction
|
||||||
iouThreshold: 0.1, // threshold for deciding whether boxes overlap too much in
|
iouThreshold: 0.1, // ammount of overlap between two detected objects before one object is removed
|
||||||
// non-maximum suppression (0.1 means drop if overlap 10%)
|
|
||||||
scoreThreshold: 0.2, // threshold for deciding when to remove boxes based on score
|
|
||||||
// in non-maximum suppression,
|
|
||||||
// this is applied on detection objects only and before minConfidence
|
|
||||||
return: false, // return extracted face as tensor
|
return: false, // return extracted face as tensor
|
||||||
},
|
},
|
||||||
|
|
||||||
mesh: {
|
mesh: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
modelPath: 'facemesh.json', // facemesh model
|
modelPath: 'facemesh.json', // facemesh model, can be absolute path or relative to modelBasePath
|
||||||
// can be either absolute path or relative to modelBasePath
|
|
||||||
},
|
},
|
||||||
|
|
||||||
iris: {
|
iris: {
|
||||||
|
@ -154,25 +135,18 @@ const config: Config = {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
minConfidence: 0.1, // threshold for discarding a prediction
|
minConfidence: 0.1, // threshold for discarding a prediction
|
||||||
skipFrames: 32, // how many frames to go without re-running the detector
|
skipFrames: 32, // how many frames to go without re-running the detector
|
||||||
modelPath: 'emotion.json', // face emotion model
|
modelPath: 'emotion.json', // face emotion model, can be absolute path or relative to modelBasePath
|
||||||
// can be either absolute path or relative to modelBasePath
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
body: {
|
body: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
modelPath: 'posenet.json', // body model
|
modelPath: 'posenet.json', // body model, can be absolute path or relative to modelBasePath
|
||||||
// can be either absolute path or relative to modelBasePath
|
// can be 'posenet' or 'blazepose'
|
||||||
// can be 'posenet', 'blazepose' or 'efficientpose'
|
maxDetected: 1, // maximum number of people detected in the input
|
||||||
// 'blazepose' and 'efficientpose' are experimental
|
|
||||||
maxDetections: 1, // maximum number of people detected in the input
|
|
||||||
// should be set to the minimum number for performance
|
// should be set to the minimum number for performance
|
||||||
// only valid for posenet as blazepose only detects single pose
|
// only valid for posenet as blazepose only detects single pose
|
||||||
scoreThreshold: 0.2, // threshold for deciding when to remove boxes based on score
|
minConfidence: 0.2, // threshold for discarding a prediction
|
||||||
// in non-maximum suppression
|
|
||||||
// only valid for posenet as blazepose only detects single pose
|
|
||||||
nmsRadius: 20, // radius for deciding points are too close in non-maximum suppression
|
|
||||||
// only valid for posenet as blazepose only detects single pose
|
|
||||||
},
|
},
|
||||||
|
|
||||||
hand: {
|
hand: {
|
||||||
|
@ -187,35 +161,28 @@ const config: Config = {
|
||||||
skipInitial: false, // if previous detection resulted in no hands detected,
|
skipInitial: false, // if previous detection resulted in no hands detected,
|
||||||
// should skipFrames be reset immediately to force new detection cycle
|
// should skipFrames be reset immediately to force new detection cycle
|
||||||
minConfidence: 0.1, // threshold for discarding a prediction
|
minConfidence: 0.1, // threshold for discarding a prediction
|
||||||
iouThreshold: 0.1, // threshold for deciding whether boxes overlap too much
|
iouThreshold: 0.1, // ammount of overlap between two detected objects before one object is removed
|
||||||
// in non-maximum suppression
|
maxDetected: 1, // maximum number of hands detected in the input
|
||||||
scoreThreshold: 0.5, // threshold for deciding when to remove boxes based on
|
|
||||||
// score in non-maximum suppression
|
|
||||||
maxHands: 1, // maximum number of hands detected in the input
|
|
||||||
// should be set to the minimum number for performance
|
// should be set to the minimum number for performance
|
||||||
landmarks: true, // detect hand landmarks or just hand boundary box
|
landmarks: true, // detect hand landmarks or just hand boundary box
|
||||||
detector: {
|
detector: {
|
||||||
modelPath: 'handdetect.json', // hand detector model
|
modelPath: 'handdetect.json', // hand detector model, can be absolute path or relative to modelBasePath
|
||||||
// can be either absolute path or relative to modelBasePath
|
|
||||||
},
|
},
|
||||||
skeleton: {
|
skeleton: {
|
||||||
modelPath: 'handskeleton.json', // hand skeleton model
|
modelPath: 'handskeleton.json', // hand skeleton model, can be absolute path or relative to modelBasePath
|
||||||
// can be either absolute path or relative to modelBasePath
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
object: {
|
object: {
|
||||||
enabled: false,
|
enabled: false,
|
||||||
modelPath: 'nanodet.json', // object detection model
|
modelPath: 'nanodet.json', // experimental: object detection model, can be absolute path or relative to modelBasePath
|
||||||
// can be either absolute path or relative to modelBasePath
|
minConfidence: 0.2, // threshold for discarding a prediction
|
||||||
// 'nanodet' is experimental
|
iouThreshold: 0.4, // ammount of overlap between two detected objects before one object is removed
|
||||||
minConfidence: 0.20, // threshold for discarding a prediction
|
maxDetected: 10, // maximum number of objects detected in the input
|
||||||
iouThreshold: 0.40, // threshold for deciding whether boxes overlap too much
|
|
||||||
// in non-maximum suppression
|
|
||||||
maxResults: 10, // maximum number of objects detected in the input
|
|
||||||
skipFrames: 41, // how many frames to go without re-running the detector
|
skipFrames: 41, // how many frames to go without re-running the detector
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
export { config as defaults };
|
||||||
```
|
```
|
||||||
|
|
||||||
<br>
|
<br>
|
||||||
|
|
Loading…
Reference in New Issue