2020-10-16 21:04:51 +02:00
|
|
|
/* eslint-disable indent */
|
|
|
|
/* eslint-disable no-multi-spaces */
|
|
|
|
|
2020-10-12 01:22:43 +02:00
|
|
|
export default {
|
2020-10-16 21:04:51 +02:00
|
|
|
backend: 'webgl', // select tfjs backend to use
|
2021-03-02 17:27:42 +01:00
|
|
|
// can be 'webgl', 'wasm', 'cpu', or 'humangl' which is a custom version of webgl
|
|
|
|
// leave as empty string to continue using default backend
|
|
|
|
// when backend is set outside of Human library
|
2020-12-13 00:34:30 +01:00
|
|
|
wasmPath: '../assets/', // path for wasm binaries
|
2020-11-10 02:13:38 +01:00
|
|
|
// only used for backend: wasm
|
2021-03-02 17:27:42 +01:00
|
|
|
debug: true, // print additional status messages to console
|
2020-11-06 17:39:39 +01:00
|
|
|
async: true, // execute enabled models in parallel
|
2020-11-08 18:26:45 +01:00
|
|
|
// this disables per-model performance data but
|
|
|
|
// slightly increases performance
|
2020-11-03 00:54:03 +01:00
|
|
|
// cannot be used if profiling is enabled
|
2020-11-01 19:10:22 +01:00
|
|
|
profile: false, // enable tfjs profiling
|
2020-11-08 18:26:45 +01:00
|
|
|
// this has significant performance impact
|
|
|
|
// only enable for debugging purposes
|
2020-11-01 19:07:53 +01:00
|
|
|
// currently only implemented for age,gender,emotion models
|
2020-11-01 19:10:22 +01:00
|
|
|
deallocate: false, // aggresively deallocate gpu memory after each usage
|
2020-11-08 18:26:45 +01:00
|
|
|
// only valid for webgl backend and only during first call
|
|
|
|
// cannot be changed unless library is reloaded
|
|
|
|
// this has significant performance impact
|
|
|
|
// only enable on low-memory devices
|
2020-10-17 16:06:02 +02:00
|
|
|
scoped: false, // enable scoped runs
|
2020-11-08 18:26:45 +01:00
|
|
|
// some models *may* have memory leaks,
|
|
|
|
// this wrapps everything in a local scope at a cost of performance
|
2020-10-17 16:06:02 +02:00
|
|
|
// typically not needed
|
2020-11-08 18:26:45 +01:00
|
|
|
videoOptimized: true, // perform additional optimizations when input is video,
|
|
|
|
// must be disabled for images
|
2020-11-01 19:10:22 +01:00
|
|
|
// basically this skips object box boundary detection for every n frames
|
|
|
|
// while maintaining in-box detection since objects cannot move that fast
|
2020-12-13 00:34:30 +01:00
|
|
|
warmup: 'face', // what to use for human.warmup(), can be 'none', 'face', 'full'
|
2020-12-11 16:11:49 +01:00
|
|
|
// warmup pre-initializes all models for faster inference but can take
|
|
|
|
// significant time on startup
|
2020-10-18 18:12:09 +02:00
|
|
|
filter: {
|
|
|
|
enabled: true, // enable image pre-processing filters
|
2020-10-27 15:06:01 +01:00
|
|
|
width: 0, // resize input width
|
|
|
|
height: 0, // resize input height
|
|
|
|
// if both width and height are set to 0, there is no resizing
|
|
|
|
// if just one is set, second one is scaled automatically
|
|
|
|
// if both are set, values are used as-is
|
2020-10-18 18:12:09 +02:00
|
|
|
return: true, // return processed canvas imagedata in result
|
|
|
|
brightness: 0, // range: -1 (darken) to 1 (lighten)
|
|
|
|
contrast: 0, // range: -1 (reduce contrast) to 1 (increase contrast)
|
|
|
|
sharpness: 0, // range: 0 (no sharpening) to 1 (maximum sharpening)
|
|
|
|
blur: 0, // range: 0 (no blur) to N (blur radius in pixels)
|
|
|
|
saturation: 0, // range: -1 (reduce saturation) to 1 (increase saturation)
|
|
|
|
hue: 0, // range: 0 (no change) to 360 (hue rotation in degrees)
|
|
|
|
negative: false, // image negative
|
|
|
|
sepia: false, // image sepia colors
|
|
|
|
vintage: false, // image vintage colors
|
|
|
|
kodachrome: false, // image kodachrome colors
|
|
|
|
technicolor: false, // image technicolor colors
|
|
|
|
polaroid: false, // image polaroid camera effect
|
|
|
|
pixelate: 0, // range: 0 (no pixelate) to N (number of pixels to pixelate)
|
|
|
|
},
|
2020-11-08 18:26:45 +01:00
|
|
|
|
2020-11-04 16:18:22 +01:00
|
|
|
gesture: {
|
|
|
|
enabled: true, // enable simple gesture recognition
|
|
|
|
},
|
2020-11-08 18:26:45 +01:00
|
|
|
|
2020-10-12 01:22:43 +02:00
|
|
|
face: {
|
2020-10-16 21:04:51 +02:00
|
|
|
enabled: true, // controls if specified modul is enabled
|
2020-11-08 18:26:45 +01:00
|
|
|
// face.enabled is required for all face models:
|
|
|
|
// detector, mesh, iris, age, gender, emotion
|
2020-10-16 21:04:51 +02:00
|
|
|
// (note: module is not loaded until it is required)
|
2020-10-12 01:22:43 +02:00
|
|
|
detector: {
|
2021-02-26 14:52:34 +01:00
|
|
|
modelPath: '../models/blazeface-back.json', // can be 'blazeface-front', 'blazeface-back' or 'faceboxes'
|
2021-02-06 23:41:53 +01:00
|
|
|
// 'blazeface-front' is blazeface model optimized for large faces such as front-facing camera
|
|
|
|
// 'blazeface-back' is blazeface model optimized for smaller and/or distanct faces
|
2021-02-26 16:05:56 +01:00
|
|
|
// 'faceboxes' is alternative model to 'blazeface'
|
2020-10-17 13:15:23 +02:00
|
|
|
inputSize: 256, // fixed value: 128 for front and 256 for 'back'
|
2021-03-06 16:38:04 +01:00
|
|
|
rotation: true, // use best-guess rotated face image or just box with rotation as-is
|
2020-12-10 20:47:53 +01:00
|
|
|
// false means higher performance, but incorrect mesh mapping if face angle is above 20 degrees
|
2020-11-08 18:26:45 +01:00
|
|
|
maxFaces: 10, // maximum number of faces detected in the input
|
|
|
|
// should be set to the minimum number for performance
|
2021-03-08 16:06:34 +01:00
|
|
|
skipFrames: 21, // how many frames to go without re-running the face bounding box detector
|
2020-11-08 18:26:45 +01:00
|
|
|
// only used for video inputs
|
|
|
|
// e.g., if model is running st 25 FPS, we can re-use existing bounding
|
|
|
|
// box for updated face analysis as the head probably hasn't moved much
|
|
|
|
// in short time (10 * 1/25 = 0.25 sec)
|
2021-03-01 23:20:02 +01:00
|
|
|
skipInitial: false, // if previous detection resulted in no faces detected,
|
|
|
|
// should skipFrames be reset immediately
|
2021-03-03 15:59:04 +01:00
|
|
|
minConfidence: 0.1, // threshold for discarding a prediction
|
|
|
|
iouThreshold: 0.1, // threshold for deciding whether boxes overlap too much in
|
2020-11-08 18:26:45 +01:00
|
|
|
// non-maximum suppression (0.1 means drop if overlap 10%)
|
2021-03-03 15:59:04 +01:00
|
|
|
scoreThreshold: 0.1, // threshold for deciding when to remove boxes based on score
|
2020-11-08 18:26:45 +01:00
|
|
|
// in non-maximum suppression,
|
|
|
|
// this is applied on detection objects only and before minConfidence
|
2020-10-12 01:22:43 +02:00
|
|
|
},
|
2020-11-08 18:26:45 +01:00
|
|
|
|
2020-10-12 01:22:43 +02:00
|
|
|
mesh: {
|
|
|
|
enabled: true,
|
2020-11-03 15:34:36 +01:00
|
|
|
modelPath: '../models/facemesh.json',
|
2020-10-16 21:04:51 +02:00
|
|
|
inputSize: 192, // fixed value
|
2020-10-12 01:22:43 +02:00
|
|
|
},
|
2020-11-08 18:26:45 +01:00
|
|
|
|
2020-10-12 01:22:43 +02:00
|
|
|
iris: {
|
|
|
|
enabled: true,
|
2020-11-03 15:34:36 +01:00
|
|
|
modelPath: '../models/iris.json',
|
2020-10-16 21:04:51 +02:00
|
|
|
inputSize: 64, // fixed value
|
2020-10-12 01:22:43 +02:00
|
|
|
},
|
2020-11-08 18:26:45 +01:00
|
|
|
|
2020-10-12 01:22:43 +02:00
|
|
|
age: {
|
|
|
|
enabled: true,
|
2021-03-04 16:33:08 +01:00
|
|
|
modelPath: '../models/age-ssrnet-imdb.json',
|
2020-10-16 21:04:51 +02:00
|
|
|
inputSize: 64, // fixed value
|
2020-12-11 16:11:49 +01:00
|
|
|
skipFrames: 31, // how many frames to go without re-running the detector
|
2020-11-08 18:26:45 +01:00
|
|
|
// only used for video inputs
|
2020-10-12 01:22:43 +02:00
|
|
|
},
|
2020-11-08 18:26:45 +01:00
|
|
|
|
2020-10-12 01:22:43 +02:00
|
|
|
gender: {
|
|
|
|
enabled: true,
|
2021-03-03 15:59:04 +01:00
|
|
|
minConfidence: 0.1, // threshold for discarding a prediction
|
2021-03-04 16:33:08 +01:00
|
|
|
modelPath: '../models/gender.json', // can be 'gender' or 'gender-ssrnet-imdb'
|
2020-11-06 21:35:58 +01:00
|
|
|
inputSize: 64, // fixed value
|
2021-03-08 16:06:34 +01:00
|
|
|
skipFrames: 32, // how many frames to go without re-running the detector
|
2020-11-08 18:26:45 +01:00
|
|
|
// only used for video inputs
|
2020-10-12 01:22:43 +02:00
|
|
|
},
|
2020-11-08 18:26:45 +01:00
|
|
|
|
2020-10-15 00:22:38 +02:00
|
|
|
emotion: {
|
|
|
|
enabled: true,
|
2020-11-06 05:46:37 +01:00
|
|
|
inputSize: 64, // fixed value
|
2021-03-03 15:59:04 +01:00
|
|
|
minConfidence: 0.1, // threshold for discarding a prediction
|
2021-03-08 16:06:34 +01:00
|
|
|
skipFrames: 33, // how many frames to go without re-running the detector
|
2021-03-04 16:33:08 +01:00
|
|
|
modelPath: '../models/emotion.json',
|
2020-10-15 00:22:38 +02:00
|
|
|
},
|
2020-11-13 22:13:35 +01:00
|
|
|
|
|
|
|
embedding: {
|
|
|
|
enabled: false,
|
|
|
|
inputSize: 112, // fixed value
|
|
|
|
modelPath: '../models/mobilefacenet.json',
|
|
|
|
},
|
2020-10-12 01:22:43 +02:00
|
|
|
},
|
2020-11-08 18:26:45 +01:00
|
|
|
|
2020-10-12 01:22:43 +02:00
|
|
|
body: {
|
|
|
|
enabled: true,
|
2021-03-05 13:39:37 +01:00
|
|
|
modelPath: '../models/posenet.json', // can be 'posenet', 'blazepose' or 'blazepose-upper'
|
2021-03-04 16:33:08 +01:00
|
|
|
inputSize: 257, // fixed value, 257 for posenet and 256 for blazepose
|
2020-11-08 18:26:45 +01:00
|
|
|
maxDetections: 10, // maximum number of people detected in the input
|
|
|
|
// should be set to the minimum number for performance
|
2021-03-04 16:33:08 +01:00
|
|
|
// only valid for posenet as blazepose only detects single pose
|
2021-03-05 20:30:09 +01:00
|
|
|
scoreThreshold: 0.3, // threshold for deciding when to remove boxes based on score
|
2020-11-08 18:26:45 +01:00
|
|
|
// in non-maximum suppression
|
2021-03-04 16:33:08 +01:00
|
|
|
// only valid for posenet as blazepose only detects single pose
|
2020-10-16 21:04:51 +02:00
|
|
|
nmsRadius: 20, // radius for deciding points are too close in non-maximum suppression
|
2021-03-04 16:33:08 +01:00
|
|
|
// only valid for posenet as blazepose only detects single pose
|
|
|
|
modelType: 'posenet-mobilenet', // can be 'posenet-mobilenet', 'posenet-resnet', 'blazepose'
|
2020-10-12 01:22:43 +02:00
|
|
|
},
|
2020-11-08 18:26:45 +01:00
|
|
|
|
2020-10-12 01:22:43 +02:00
|
|
|
hand: {
|
|
|
|
enabled: true,
|
2020-12-10 21:46:45 +01:00
|
|
|
rotation: false, // use best-guess rotated hand image or just box with rotation as-is
|
|
|
|
// false means higher performance, but incorrect finger mapping if hand is inverted
|
2020-10-16 21:04:51 +02:00
|
|
|
inputSize: 256, // fixed value
|
2020-12-11 16:11:49 +01:00
|
|
|
skipFrames: 12, // how many frames to go without re-running the hand bounding box detector
|
2020-11-08 18:26:45 +01:00
|
|
|
// only used for video inputs
|
|
|
|
// e.g., if model is running st 25 FPS, we can re-use existing bounding
|
|
|
|
// box for updated hand skeleton analysis as the hand probably
|
|
|
|
// hasn't moved much in short time (10 * 1/25 = 0.25 sec)
|
2021-03-01 23:20:02 +01:00
|
|
|
skipInitial: false, // if previous detection resulted in no faces detected,
|
|
|
|
// should skipFrames be reset immediately
|
2020-12-10 20:47:53 +01:00
|
|
|
minConfidence: 0.1, // threshold for discarding a prediction
|
2020-11-08 18:26:45 +01:00
|
|
|
iouThreshold: 0.1, // threshold for deciding whether boxes overlap too much
|
|
|
|
// in non-maximum suppression
|
2020-12-10 20:47:53 +01:00
|
|
|
scoreThreshold: 0.5, // threshold for deciding when to remove boxes based on
|
2020-11-08 18:26:45 +01:00
|
|
|
// score in non-maximum suppression
|
|
|
|
maxHands: 1, // maximum number of hands detected in the input
|
|
|
|
// should be set to the minimum number for performance
|
2020-11-08 15:56:02 +01:00
|
|
|
landmarks: true, // detect hand landmarks or just hand boundary box
|
2020-10-12 01:22:43 +02:00
|
|
|
detector: {
|
2020-11-03 15:34:36 +01:00
|
|
|
modelPath: '../models/handdetect.json',
|
2020-10-12 01:22:43 +02:00
|
|
|
},
|
|
|
|
skeleton: {
|
2020-11-03 15:34:36 +01:00
|
|
|
modelPath: '../models/handskeleton.json',
|
2020-10-12 01:22:43 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|