2020-10-16 21:04:51 +02:00
|
|
|
/* eslint-disable indent */
|
|
|
|
/* eslint-disable no-multi-spaces */
|
|
|
|
|
2020-10-12 01:22:43 +02:00
|
|
|
export default {
|
2020-10-16 21:04:51 +02:00
|
|
|
backend: 'webgl', // select tfjs backend to use
|
|
|
|
console: true, // enable debugging output to console
|
2020-10-17 16:06:02 +02:00
|
|
|
scoped: false, // enable scoped runs
|
|
|
|
// some models *may* have memory leaks, this wrapps everything in a local scope at a cost of performance
|
|
|
|
// typically not needed
|
2020-10-12 01:22:43 +02:00
|
|
|
face: {
|
2020-10-16 21:04:51 +02:00
|
|
|
enabled: true, // controls if specified modul is enabled
|
|
|
|
// face.enabled is required for all face models: detector, mesh, iris, age, gender, emotion
|
|
|
|
// (note: module is not loaded until it is required)
|
2020-10-12 01:22:43 +02:00
|
|
|
detector: {
|
2020-10-17 13:15:23 +02:00
|
|
|
modelPath: '../models/blazeface/back/model.json', // can be 'front' or 'back'.
|
2020-10-16 21:04:51 +02:00
|
|
|
// 'front' is optimized for large faces such as front-facing camera and 'back' is optimized for distanct faces.
|
2020-10-17 13:15:23 +02:00
|
|
|
inputSize: 256, // fixed value: 128 for front and 256 for 'back'
|
2020-10-16 21:04:51 +02:00
|
|
|
maxFaces: 10, // maximum number of faces detected in the input, should be set to the minimum number for performance
|
2020-10-18 14:07:45 +02:00
|
|
|
skipFrames: 10, // how many frames to go without re-running the face bounding box detector, only used for video inputs
|
2020-10-16 21:04:51 +02:00
|
|
|
// if model is running st 25 FPS, we can re-use existing bounding box for updated face mesh analysis
|
|
|
|
// as face probably hasn't moved much in short time (10 * 1/25 = 0.25 sec)
|
|
|
|
minConfidence: 0.5, // threshold for discarding a prediction
|
|
|
|
iouThreshold: 0.3, // threshold for deciding whether boxes overlap too much in non-maximum suppression
|
|
|
|
scoreThreshold: 0.7, // threshold for deciding when to remove boxes based on score in non-maximum suppression
|
2020-10-12 01:22:43 +02:00
|
|
|
},
|
|
|
|
mesh: {
|
|
|
|
enabled: true,
|
2020-10-12 16:20:51 +02:00
|
|
|
modelPath: '../models/facemesh/model.json',
|
2020-10-16 21:04:51 +02:00
|
|
|
inputSize: 192, // fixed value
|
2020-10-12 01:22:43 +02:00
|
|
|
},
|
|
|
|
iris: {
|
|
|
|
enabled: true,
|
2020-10-12 16:14:26 +02:00
|
|
|
modelPath: '../models/iris/model.json',
|
2020-10-16 21:04:51 +02:00
|
|
|
enlargeFactor: 2.3, // empiric tuning
|
|
|
|
inputSize: 64, // fixed value
|
2020-10-12 01:22:43 +02:00
|
|
|
},
|
|
|
|
age: {
|
|
|
|
enabled: true,
|
2020-10-16 21:04:51 +02:00
|
|
|
modelPath: '../models/ssrnet-age/imdb/model.json', // can be 'imdb' or 'wiki'
|
|
|
|
// which determines training set for model
|
|
|
|
inputSize: 64, // fixed value
|
2020-10-18 14:07:45 +02:00
|
|
|
skipFrames: 10, // how many frames to go without re-running the detector, only used for video inputs
|
2020-10-12 01:22:43 +02:00
|
|
|
},
|
|
|
|
gender: {
|
|
|
|
enabled: true,
|
2020-10-16 21:04:51 +02:00
|
|
|
minConfidence: 0.8, // threshold for discarding a prediction
|
2020-10-12 16:14:26 +02:00
|
|
|
modelPath: '../models/ssrnet-gender/imdb/model.json',
|
2020-10-12 01:22:43 +02:00
|
|
|
},
|
2020-10-15 00:22:38 +02:00
|
|
|
emotion: {
|
|
|
|
enabled: true,
|
2020-10-16 21:04:51 +02:00
|
|
|
inputSize: 64, // fixed value
|
|
|
|
minConfidence: 0.5, // threshold for discarding a prediction
|
|
|
|
skipFrames: 10, // how many frames to go without re-running the detector
|
|
|
|
useGrayscale: true, // convert image to grayscale before prediction or use highest channel
|
2020-10-15 00:22:38 +02:00
|
|
|
modelPath: '../models/emotion/model.json',
|
|
|
|
},
|
2020-10-12 01:22:43 +02:00
|
|
|
},
|
|
|
|
body: {
|
|
|
|
enabled: true,
|
2020-10-12 16:14:26 +02:00
|
|
|
modelPath: '../models/posenet/model.json',
|
2020-10-16 21:04:51 +02:00
|
|
|
inputResolution: 257, // fixed value
|
|
|
|
outputStride: 16, // fixed value
|
|
|
|
maxDetections: 10, // maximum number of people detected in the input, should be set to the minimum number for performance
|
|
|
|
scoreThreshold: 0.7, // threshold for deciding when to remove boxes based on score in non-maximum suppression
|
|
|
|
nmsRadius: 20, // radius for deciding points are too close in non-maximum suppression
|
2020-10-12 01:22:43 +02:00
|
|
|
},
|
|
|
|
hand: {
|
|
|
|
enabled: true,
|
2020-10-16 21:04:51 +02:00
|
|
|
inputSize: 256, // fixed value
|
2020-10-18 14:07:45 +02:00
|
|
|
skipFrames: 10, // how many frames to go without re-running the hand bounding box detector, only used for video inputs
|
2020-10-16 21:04:51 +02:00
|
|
|
// if model is running st 25 FPS, we can re-use existing bounding box for updated hand skeleton analysis
|
2020-10-18 14:07:45 +02:00
|
|
|
// as the hand probably hasn't moved much in short time (10 * 1/25 = 0.25 sec)
|
2020-10-16 21:04:51 +02:00
|
|
|
minConfidence: 0.5, // threshold for discarding a prediction
|
|
|
|
iouThreshold: 0.3, // threshold for deciding whether boxes overlap too much in non-maximum suppression
|
|
|
|
scoreThreshold: 0.7, // threshold for deciding when to remove boxes based on score in non-maximum suppression
|
|
|
|
enlargeFactor: 1.65, // empiric tuning as skeleton prediction prefers hand box with some whitespace
|
|
|
|
maxHands: 10, // maximum number of hands detected in the input, should be set to the minimum number for performance
|
2020-10-12 01:22:43 +02:00
|
|
|
detector: {
|
2020-10-12 16:14:26 +02:00
|
|
|
anchors: '../models/handdetect/anchors.json',
|
|
|
|
modelPath: '../models/handdetect/model.json',
|
2020-10-12 01:22:43 +02:00
|
|
|
},
|
|
|
|
skeleton: {
|
2020-10-12 16:14:26 +02:00
|
|
|
modelPath: '../models/handskeleton/model.json',
|
2020-10-12 01:22:43 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|