mirror of https://github.com/vladmandic/human
add modelBasePath option
parent
f5850bc4ed
commit
d09d36cf9d
|
@ -20,12 +20,11 @@ Main configuration objects are:
|
||||||
|
|
||||||
With **config.face** having several subsections:
|
With **config.face** having several subsections:
|
||||||
|
|
||||||
|
- **config.face.detector**: controls general face detection that all other face modules depend on
|
||||||
- **config.face.mesh**: controls facial mesh and landscape detection
|
- **config.face.mesh**: controls facial mesh and landscape detection
|
||||||
|
- **config.face.description**: controls age & gender prediction and face descriptor
|
||||||
- **config.face.iris**: controls iris detection
|
- **config.face.iris**: controls iris detection
|
||||||
- **config.face.age**: controls age prediction
|
|
||||||
- **config.face.gender**: controls gender prediction
|
|
||||||
- **config.face.emotion**: controls emotion prediction
|
- **config.face.emotion**: controls emotion prediction
|
||||||
- **config.face.embedding**: controls generation of face embedding data used for face similarity checks
|
|
||||||
|
|
||||||
<br>
|
<br>
|
||||||
|
|
||||||
|
@ -41,6 +40,7 @@ const config: Config = {
|
||||||
// can be 'webgl', 'wasm', 'cpu', or 'humangl' which is a custom version of webgl
|
// can be 'webgl', 'wasm', 'cpu', or 'humangl' which is a custom version of webgl
|
||||||
// leave as empty string to continue using default backend
|
// leave as empty string to continue using default backend
|
||||||
// when backend is set outside of Human library
|
// when backend is set outside of Human library
|
||||||
|
modelBasePath: '../models/', // base path for all models
|
||||||
wasmPath: '../assets/', // path for wasm binaries
|
wasmPath: '../assets/', // path for wasm binaries
|
||||||
// only used for backend: wasm
|
// only used for backend: wasm
|
||||||
debug: true, // print additional status messages to console
|
debug: true, // print additional status messages to console
|
||||||
|
@ -101,7 +101,8 @@ const config: Config = {
|
||||||
// detector, mesh, iris, age, gender, emotion
|
// detector, mesh, iris, age, gender, emotion
|
||||||
// (note: module is not loaded until it is required)
|
// (note: module is not loaded until it is required)
|
||||||
detector: {
|
detector: {
|
||||||
modelPath: '../models/blazeface-back.json',
|
modelPath: 'blazeface-back.json', // detector model
|
||||||
|
// can be either absolute path or relative to modelBasePath
|
||||||
rotation: false, // use best-guess rotated face image or just box with rotation as-is
|
rotation: false, // use best-guess rotated face image or just box with rotation as-is
|
||||||
// false means higher performance, but incorrect mesh mapping if face angle is above 20 degrees
|
// false means higher performance, but incorrect mesh mapping if face angle is above 20 degrees
|
||||||
// this parameter is not valid in nodejs
|
// this parameter is not valid in nodejs
|
||||||
|
@ -125,18 +126,21 @@ const config: Config = {
|
||||||
|
|
||||||
mesh: {
|
mesh: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
modelPath: '../models/facemesh.json',
|
modelPath: 'facemesh.json', // facemesh model
|
||||||
|
// can be either absolute path or relative to modelBasePath
|
||||||
},
|
},
|
||||||
|
|
||||||
iris: {
|
iris: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
modelPath: '../models/iris.json',
|
modelPath: 'iris.json', // face iris model
|
||||||
|
// can be either absolute path or relative to modelBasePath
|
||||||
},
|
},
|
||||||
|
|
||||||
description: {
|
description: {
|
||||||
enabled: true, // to improve accuracy of face description extraction it is
|
enabled: true, // to improve accuracy of face description extraction it is
|
||||||
// recommended to enable detector.rotation and mesh.enabled
|
// recommended to enable detector.rotation and mesh.enabled
|
||||||
modelPath: '../models/faceres.json',
|
modelPath: 'faceres.json', // face description model
|
||||||
|
// can be either absolute path or relative to modelBasePath
|
||||||
skipFrames: 31, // how many frames to go without re-running the detector
|
skipFrames: 31, // how many frames to go without re-running the detector
|
||||||
// only used for video inputs
|
// only used for video inputs
|
||||||
},
|
},
|
||||||
|
@ -145,33 +149,16 @@ const config: Config = {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
minConfidence: 0.1, // threshold for discarding a prediction
|
minConfidence: 0.1, // threshold for discarding a prediction
|
||||||
skipFrames: 32, // how many frames to go without re-running the detector
|
skipFrames: 32, // how many frames to go without re-running the detector
|
||||||
modelPath: '../models/emotion.json',
|
modelPath: 'emotion.json', // face emotion model
|
||||||
},
|
// can be either absolute path or relative to modelBasePath
|
||||||
|
|
||||||
age: {
|
|
||||||
enabled: false, // obsolete, replaced by description module
|
|
||||||
modelPath: '../models/age.json',
|
|
||||||
skipFrames: 33, // how many frames to go without re-running the detector
|
|
||||||
// only used for video inputs
|
|
||||||
},
|
|
||||||
|
|
||||||
gender: {
|
|
||||||
enabled: false, // obsolete, replaced by description module
|
|
||||||
minConfidence: 0.1, // threshold for discarding a prediction
|
|
||||||
modelPath: '../models/gender.json',
|
|
||||||
skipFrames: 34, // how many frames to go without re-running the detector
|
|
||||||
// only used for video inputs
|
|
||||||
},
|
|
||||||
|
|
||||||
embedding: {
|
|
||||||
enabled: false, // obsolete, replaced by description module
|
|
||||||
modelPath: '../models/mobileface.json',
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
body: {
|
body: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
modelPath: '../models/posenet.json', // can be 'posenet', 'blazepose' or 'efficientpose'
|
modelPath: 'posenet.json', // body model
|
||||||
|
// can be either absolute path or relative to modelBasePath
|
||||||
|
// can be 'posenet', 'blazepose' or 'efficientpose'
|
||||||
// 'blazepose' and 'efficientpose' are experimental
|
// 'blazepose' and 'efficientpose' are experimental
|
||||||
maxDetections: 10, // maximum number of people detected in the input
|
maxDetections: 10, // maximum number of people detected in the input
|
||||||
// should be set to the minimum number for performance
|
// should be set to the minimum number for performance
|
||||||
|
@ -203,16 +190,19 @@ const config: Config = {
|
||||||
// should be set to the minimum number for performance
|
// should be set to the minimum number for performance
|
||||||
landmarks: true, // detect hand landmarks or just hand boundary box
|
landmarks: true, // detect hand landmarks or just hand boundary box
|
||||||
detector: {
|
detector: {
|
||||||
modelPath: '../models/handdetect.json',
|
modelPath: 'handdetect.json', // hand detector model
|
||||||
|
// can be either absolute path or relative to modelBasePath
|
||||||
},
|
},
|
||||||
skeleton: {
|
skeleton: {
|
||||||
modelPath: '../models/handskeleton.json',
|
modelPath: 'handskeleton.json', // hand skeleton model
|
||||||
|
// can be either absolute path or relative to modelBasePath
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
object: {
|
object: {
|
||||||
enabled: false,
|
enabled: false,
|
||||||
modelPath: '../models/nanodet.json',
|
modelPath: 'nanodet.json', // object detection model
|
||||||
|
// can be either absolute path or relative to modelBasePath
|
||||||
// 'nanodet' is experimental
|
// 'nanodet' is experimental
|
||||||
minConfidence: 0.20, // threshold for discarding a prediction
|
minConfidence: 0.20, // threshold for discarding a prediction
|
||||||
iouThreshold: 0.40, // threshold for deciding whether boxes overlap too much
|
iouThreshold: 0.40, // threshold for deciding whether boxes overlap too much
|
||||||
|
|
Loading…
Reference in New Issue