mirror of https://github.com/vladmandic/human
major update for 1.8 release candidate
parent
3b81af15f2
commit
9062444877
|
@ -48,19 +48,6 @@ const config: Config = {
|
|||
// this disables per-model performance data but
|
||||
// slightly increases performance
|
||||
// cannot be used if profiling is enabled
|
||||
profile: false, // internal: enable tfjs profiling
|
||||
// this has significant performance impact
|
||||
// only enable for debugging purposes
|
||||
// currently only implemented for age,gender,emotion models
|
||||
deallocate: false, // internal: aggresively deallocate gpu memory after each usage
|
||||
// only valid for webgl and humangl backend and only during first call
|
||||
// cannot be changed unless library is reloaded
|
||||
// this has significant performance impact
|
||||
// only enable on low-memory devices
|
||||
scoped: false, // internal: enable scoped runs
|
||||
// some models *may* have memory leaks,
|
||||
// this wrapps everything in a local scope at a cost of performance
|
||||
// typically not needed
|
||||
videoOptimized: true, // perform additional optimizations when input is video,
|
||||
// must be disabled for images
|
||||
// automatically disabled for Image, ImageData, ImageBitmap and Tensor inputs
|
||||
|
@ -105,12 +92,11 @@ const config: Config = {
|
|||
// detector, mesh, iris, age, gender, emotion
|
||||
// (note: module is not loaded until it is required)
|
||||
detector: {
|
||||
modelPath: 'blazeface-back.json', // detector model
|
||||
// can be either absolute path or relative to modelBasePath
|
||||
modelPath: 'blazeface-back.json', // detector model, can be absolute path or relative to modelBasePath
|
||||
rotation: false, // use best-guess rotated face image or just box with rotation as-is
|
||||
// false means higher performance, but incorrect mesh mapping if face angle is above 20 degrees
|
||||
// this parameter is not valid in nodejs
|
||||
maxFaces: 10, // maximum number of faces detected in the input
|
||||
maxDetected: 10, // maximum number of faces detected in the input
|
||||
// should be set to the minimum number for performance
|
||||
skipFrames: 21, // how many frames to go without re-running the face bounding box detector
|
||||
// only used for video inputs
|
||||
|
@ -120,18 +106,13 @@ const config: Config = {
|
|||
skipInitial: false, // if previous detection resulted in no faces detected,
|
||||
// should skipFrames be reset immediately to force new detection cycle
|
||||
minConfidence: 0.2, // threshold for discarding a prediction
|
||||
iouThreshold: 0.1, // threshold for deciding whether boxes overlap too much in
|
||||
// non-maximum suppression (0.1 means drop if overlap 10%)
|
||||
scoreThreshold: 0.2, // threshold for deciding when to remove boxes based on score
|
||||
// in non-maximum suppression,
|
||||
// this is applied on detection objects only and before minConfidence
|
||||
iouThreshold: 0.1, // ammount of overlap between two detected objects before one object is removed
|
||||
return: false, // return extracted face as tensor
|
||||
},
|
||||
|
||||
mesh: {
|
||||
enabled: true,
|
||||
modelPath: 'facemesh.json', // facemesh model
|
||||
// can be either absolute path or relative to modelBasePath
|
||||
modelPath: 'facemesh.json', // facemesh model, can be absolute path or relative to modelBasePath
|
||||
},
|
||||
|
||||
iris: {
|
||||
|
@ -154,25 +135,18 @@ const config: Config = {
|
|||
enabled: true,
|
||||
minConfidence: 0.1, // threshold for discarding a prediction
|
||||
skipFrames: 32, // how many frames to go without re-running the detector
|
||||
modelPath: 'emotion.json', // face emotion model
|
||||
// can be either absolute path or relative to modelBasePath
|
||||
modelPath: 'emotion.json', // face emotion model, can be absolute path or relative to modelBasePath
|
||||
},
|
||||
},
|
||||
|
||||
body: {
|
||||
enabled: true,
|
||||
modelPath: 'posenet.json', // body model
|
||||
// can be either absolute path or relative to modelBasePath
|
||||
// can be 'posenet', 'blazepose' or 'efficientpose'
|
||||
// 'blazepose' and 'efficientpose' are experimental
|
||||
maxDetections: 1, // maximum number of people detected in the input
|
||||
modelPath: 'posenet.json', // body model, can be absolute path or relative to modelBasePath
|
||||
// can be 'posenet' or 'blazepose'
|
||||
maxDetected: 1, // maximum number of people detected in the input
|
||||
// should be set to the minimum number for performance
|
||||
// only valid for posenet as blazepose only detects single pose
|
||||
scoreThreshold: 0.2, // threshold for deciding when to remove boxes based on score
|
||||
// in non-maximum suppression
|
||||
// only valid for posenet as blazepose only detects single pose
|
||||
nmsRadius: 20, // radius for deciding points are too close in non-maximum suppression
|
||||
// only valid for posenet as blazepose only detects single pose
|
||||
minConfidence: 0.2, // threshold for discarding a prediction
|
||||
},
|
||||
|
||||
hand: {
|
||||
|
@ -187,35 +161,28 @@ const config: Config = {
|
|||
skipInitial: false, // if previous detection resulted in no hands detected,
|
||||
// should skipFrames be reset immediately to force new detection cycle
|
||||
minConfidence: 0.1, // threshold for discarding a prediction
|
||||
iouThreshold: 0.1, // threshold for deciding whether boxes overlap too much
|
||||
// in non-maximum suppression
|
||||
scoreThreshold: 0.5, // threshold for deciding when to remove boxes based on
|
||||
// score in non-maximum suppression
|
||||
maxHands: 1, // maximum number of hands detected in the input
|
||||
iouThreshold: 0.1, // ammount of overlap between two detected objects before one object is removed
|
||||
maxDetected: 1, // maximum number of hands detected in the input
|
||||
// should be set to the minimum number for performance
|
||||
landmarks: true, // detect hand landmarks or just hand boundary box
|
||||
detector: {
|
||||
modelPath: 'handdetect.json', // hand detector model
|
||||
// can be either absolute path or relative to modelBasePath
|
||||
modelPath: 'handdetect.json', // hand detector model, can be absolute path or relative to modelBasePath
|
||||
},
|
||||
skeleton: {
|
||||
modelPath: 'handskeleton.json', // hand skeleton model
|
||||
// can be either absolute path or relative to modelBasePath
|
||||
modelPath: 'handskeleton.json', // hand skeleton model, can be absolute path or relative to modelBasePath
|
||||
},
|
||||
},
|
||||
|
||||
object: {
|
||||
enabled: false,
|
||||
modelPath: 'nanodet.json', // object detection model
|
||||
// can be either absolute path or relative to modelBasePath
|
||||
// 'nanodet' is experimental
|
||||
minConfidence: 0.20, // threshold for discarding a prediction
|
||||
iouThreshold: 0.40, // threshold for deciding whether boxes overlap too much
|
||||
// in non-maximum suppression
|
||||
maxResults: 10, // maximum number of objects detected in the input
|
||||
modelPath: 'nanodet.json', // experimental: object detection model, can be absolute path or relative to modelBasePath
|
||||
minConfidence: 0.2, // threshold for discarding a prediction
|
||||
iouThreshold: 0.4, // ammount of overlap between two detected objects before one object is removed
|
||||
maxDetected: 10, // maximum number of objects detected in the input
|
||||
skipFrames: 41, // how many frames to go without re-running the detector
|
||||
},
|
||||
};
|
||||
export { config as defaults };
|
||||
```
|
||||
|
||||
<br>
|
||||
|
|
Loading…
Reference in New Issue