2020-10-16 21:04:51 +02:00
/* eslint-disable indent */
/* eslint-disable no-multi-spaces */
2020-10-12 01:22:43 +02:00
export default {
2020-10-16 21:04:51 +02:00
backend : 'webgl' , // select tfjs backend to use
console : true , // enable debugging output to console
2020-11-06 17:39:39 +01:00
async : true , // execute enabled models in parallel
2020-11-03 00:54:03 +01:00
// this disables per-model performance data but slightly increases performance
// cannot be used if profiling is enabled
2020-11-01 19:10:22 +01:00
profile : false , // enable tfjs profiling
2020-11-01 19:07:53 +01:00
// this has significant performance impact, only enable for debugging purposes
// currently only implemented for age,gender,emotion models
2020-11-01 19:10:22 +01:00
deallocate : false , // aggresively deallocate gpu memory after each usage
2020-11-01 19:07:53 +01:00
// only valid for webgl backend and only during first call, cannot be changed unless library is reloaded
// this has significant performance impact, only enable on low-memory devices
2020-10-17 16:06:02 +02:00
scoped : false , // enable scoped runs
// some models *may* have memory leaks, this wrapps everything in a local scope at a cost of performance
// typically not needed
2020-10-18 20:14:05 +02:00
videoOptimized : true , // perform additional optimizations when input is video, must be disabled for images
2020-11-01 19:10:22 +01:00
// basically this skips object box boundary detection for every n frames
// while maintaining in-box detection since objects cannot move that fast
2020-10-18 18:12:09 +02:00
filter : {
enabled : true , // enable image pre-processing filters
2020-10-27 15:06:01 +01:00
width : 0 , // resize input width
height : 0 , // resize input height
// if both width and height are set to 0, there is no resizing
// if just one is set, second one is scaled automatically
// if both are set, values are used as-is
2020-10-18 18:12:09 +02:00
return : true , // return processed canvas imagedata in result
brightness : 0 , // range: -1 (darken) to 1 (lighten)
contrast : 0 , // range: -1 (reduce contrast) to 1 (increase contrast)
sharpness : 0 , // range: 0 (no sharpening) to 1 (maximum sharpening)
blur : 0 , // range: 0 (no blur) to N (blur radius in pixels)
saturation : 0 , // range: -1 (reduce saturation) to 1 (increase saturation)
hue : 0 , // range: 0 (no change) to 360 (hue rotation in degrees)
negative : false , // image negative
sepia : false , // image sepia colors
vintage : false , // image vintage colors
kodachrome : false , // image kodachrome colors
technicolor : false , // image technicolor colors
polaroid : false , // image polaroid camera effect
pixelate : 0 , // range: 0 (no pixelate) to N (number of pixels to pixelate)
} ,
2020-11-04 16:18:22 +01:00
gesture : {
enabled : true , // enable simple gesture recognition
} ,
2020-10-12 01:22:43 +02:00
face : {
2020-10-16 21:04:51 +02:00
enabled : true , // controls if specified modul is enabled
// face.enabled is required for all face models: detector, mesh, iris, age, gender, emotion
// (note: module is not loaded until it is required)
2020-10-12 01:22:43 +02:00
detector : {
2020-11-03 15:34:36 +01:00
modelPath : '../models/blazeface-back.json' , // can be 'front' or 'back'.
2020-11-06 05:46:37 +01:00
// 'front' is optimized for large faces such as front-facing camera and 'back' is optimized for distanct faces.
2020-10-17 13:15:23 +02:00
inputSize : 256 , // fixed value: 128 for front and 256 for 'back'
2020-10-16 21:04:51 +02:00
maxFaces : 10 , // maximum number of faces detected in the input, should be set to the minimum number for performance
2020-11-05 21:59:28 +01:00
skipFrames : 15 , // how many frames to go without re-running the face bounding box detector, only used for video inputs
2020-10-16 21:04:51 +02:00
// if model is running st 25 FPS, we can re-use existing bounding box for updated face mesh analysis
// as face probably hasn't moved much in short time (10 * 1/25 = 0.25 sec)
2020-11-06 21:35:58 +01:00
minConfidence : 0.1 , // threshold for discarding a prediction
iouThreshold : 0.1 , // threshold for deciding whether boxes overlap too much in non-maximum suppression (0.1 means drop if overlap 10%)
scoreThreshold : 0.1 , // threshold for deciding when to remove boxes based on score in non-maximum suppression, this is applied on detection objects only and before minConfidence
2020-10-12 01:22:43 +02:00
} ,
mesh : {
enabled : true ,
2020-11-03 15:34:36 +01:00
modelPath : '../models/facemesh.json' ,
2020-10-16 21:04:51 +02:00
inputSize : 192 , // fixed value
2020-10-12 01:22:43 +02:00
} ,
iris : {
enabled : true ,
2020-11-03 15:34:36 +01:00
modelPath : '../models/iris.json' ,
2020-10-16 21:04:51 +02:00
enlargeFactor : 2.3 , // empiric tuning
inputSize : 64 , // fixed value
2020-10-12 01:22:43 +02:00
} ,
age : {
enabled : true ,
2020-11-06 21:35:58 +01:00
modelPath : '../models/age-ssrnet-imdb.json' , // can be 'age-ssrnet-imdb' or 'age-ssrnet-wiki'
2020-11-05 21:38:09 +01:00
// which determines training set for model
2020-10-16 21:04:51 +02:00
inputSize : 64 , // fixed value
2020-11-05 21:59:28 +01:00
skipFrames : 15 , // how many frames to go without re-running the detector, only used for video inputs
2020-10-12 01:22:43 +02:00
} ,
gender : {
enabled : true ,
2020-11-06 21:35:58 +01:00
minConfidence : 0.1 , // threshold for discarding a prediction
modelPath : '../models/gender-ssrnet-imdb.json' , // can be 'gender', 'gender-ssrnet-imdb' or 'gender-ssrnet-wiki'
inputSize : 64 , // fixed value
skipFrames : 15 , // how many frames to go without re-running the detector, only used for video inputs
2020-10-12 01:22:43 +02:00
} ,
2020-10-15 00:22:38 +02:00
emotion : {
enabled : true ,
2020-11-06 05:46:37 +01:00
inputSize : 64 , // fixed value
2020-11-06 21:35:58 +01:00
minConfidence : 0.2 , // threshold for discarding a prediction
2020-11-05 21:59:28 +01:00
skipFrames : 15 , // how many frames to go without re-running the detector
2020-11-06 05:46:37 +01:00
modelPath : '../models/emotion-large.json' , // can be 'mini', 'large'
2020-10-15 00:22:38 +02:00
} ,
2020-10-12 01:22:43 +02:00
} ,
body : {
enabled : true ,
2020-11-03 15:34:36 +01:00
modelPath : '../models/posenet.json' ,
2020-10-16 21:04:51 +02:00
inputResolution : 257 , // fixed value
outputStride : 16 , // fixed value
maxDetections : 10 , // maximum number of people detected in the input, should be set to the minimum number for performance
2020-11-06 19:50:16 +01:00
scoreThreshold : 0.8 , // threshold for deciding when to remove boxes based on score in non-maximum suppression
2020-10-16 21:04:51 +02:00
nmsRadius : 20 , // radius for deciding points are too close in non-maximum suppression
2020-10-12 01:22:43 +02:00
} ,
hand : {
enabled : true ,
2020-10-16 21:04:51 +02:00
inputSize : 256 , // fixed value
2020-11-05 21:59:28 +01:00
skipFrames : 15 , // how many frames to go without re-running the hand bounding box detector, only used for video inputs
2020-10-16 21:04:51 +02:00
// if model is running st 25 FPS, we can re-use existing bounding box for updated hand skeleton analysis
2020-10-18 14:07:45 +02:00
// as the hand probably hasn't moved much in short time (10 * 1/25 = 0.25 sec)
2020-11-06 21:35:58 +01:00
minConfidence : 0.1 , // threshold for discarding a prediction
iouThreshold : 0.2 , // threshold for deciding whether boxes overlap too much in non-maximum suppression
scoreThreshold : 0.1 , // threshold for deciding when to remove boxes based on score in non-maximum suppression
2020-10-16 21:04:51 +02:00
enlargeFactor : 1.65 , // empiric tuning as skeleton prediction prefers hand box with some whitespace
maxHands : 10 , // maximum number of hands detected in the input, should be set to the minimum number for performance
2020-10-12 01:22:43 +02:00
detector : {
2020-11-03 15:34:36 +01:00
modelPath : '../models/handdetect.json' ,
2020-10-12 01:22:43 +02:00
} ,
skeleton : {
2020-11-03 15:34:36 +01:00
modelPath : '../models/handskeleton.json' ,
2020-10-12 01:22:43 +02:00
} ,
} ,
} ;