2020-10-16 21:04:51 +02:00
|
|
|
/* eslint-disable indent */
|
|
|
|
/* eslint-disable no-multi-spaces */
|
|
|
|
|
2021-10-22 22:09:52 +02:00
|
|
|
export interface GenericConfig {
|
|
|
|
enabled: boolean,
|
2021-09-12 06:05:06 +02:00
|
|
|
modelPath: string,
|
2021-10-22 22:09:52 +02:00
|
|
|
skipFrames: number,
|
|
|
|
skipTime: number,
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Dectector part of face configuration */
|
|
|
|
export interface FaceDetectorConfig extends GenericConfig {
|
2021-09-12 06:05:06 +02:00
|
|
|
rotation: boolean,
|
|
|
|
maxDetected: number,
|
|
|
|
minConfidence: number,
|
|
|
|
iouThreshold: number,
|
|
|
|
return: boolean,
|
|
|
|
}
|
|
|
|
|
2021-09-22 22:00:43 +02:00
|
|
|
/** Mesh part of face configuration */
|
2021-10-22 22:09:52 +02:00
|
|
|
export type FaceMeshConfig = GenericConfig
|
2021-09-12 06:05:06 +02:00
|
|
|
|
2021-09-22 22:00:43 +02:00
|
|
|
/** Iris part of face configuration */
|
2021-10-22 22:09:52 +02:00
|
|
|
export type FaceIrisConfig = GenericConfig
|
2021-09-12 06:05:06 +02:00
|
|
|
|
2021-09-22 22:00:43 +02:00
|
|
|
/** Description or face embedding part of face configuration
|
|
|
|
* - also used by age and gender detection
|
|
|
|
*/
|
2021-10-22 22:09:52 +02:00
|
|
|
export interface FaceDescriptionConfig extends GenericConfig {
|
2021-09-12 06:05:06 +02:00
|
|
|
minConfidence: number,
|
|
|
|
}
|
|
|
|
|
2021-09-22 22:00:43 +02:00
|
|
|
/** Emotion part of face configuration */
|
2021-10-22 22:09:52 +02:00
|
|
|
export interface FaceEmotionConfig extends GenericConfig {
|
2021-09-12 06:05:06 +02:00
|
|
|
minConfidence: number,
|
|
|
|
}
|
|
|
|
|
2021-10-13 16:56:56 +02:00
|
|
|
/** Emotion part of face configuration */
|
2021-10-22 22:09:52 +02:00
|
|
|
export type FaceAntiSpoofConfig = GenericConfig
|
2021-10-13 16:56:56 +02:00
|
|
|
|
2021-09-12 05:54:35 +02:00
|
|
|
/** Controlls and configures all face-specific options:
|
|
|
|
* - face detection, face mesh detection, age, gender, emotion detection and face description
|
2021-09-22 22:00:43 +02:00
|
|
|
*
|
2021-09-12 05:54:35 +02:00
|
|
|
* Parameters:
|
|
|
|
* - enabled: true/false
|
|
|
|
* - modelPath: path for each of face models
|
|
|
|
* - minConfidence: threshold for discarding a prediction
|
|
|
|
* - iouThreshold: ammount of overlap between two detected objects before one object is removed
|
|
|
|
* - maxDetected: maximum number of faces detected in the input, should be set to the minimum number for performance
|
|
|
|
* - rotation: use calculated rotated face image or just box with rotation as-is, false means higher performance, but incorrect mesh mapping on higher face angles
|
|
|
|
* - return: return extracted face as tensor for futher user processing, in which case user is reponsible for manually disposing the tensor
|
|
|
|
*/
|
|
|
|
export interface FaceConfig {
|
|
|
|
enabled: boolean,
|
2021-09-12 06:05:06 +02:00
|
|
|
detector: Partial<FaceDetectorConfig>,
|
|
|
|
mesh: Partial<FaceMeshConfig>,
|
|
|
|
iris: Partial<FaceIrisConfig>,
|
|
|
|
description: Partial<FaceDescriptionConfig>,
|
|
|
|
emotion: Partial<FaceEmotionConfig>,
|
2021-10-13 16:56:56 +02:00
|
|
|
antispoof: Partial<FaceAntiSpoofConfig>,
|
2021-09-12 05:54:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Controlls and configures all body detection specific options
|
2021-09-22 22:00:43 +02:00
|
|
|
*
|
|
|
|
* Parameters:
|
2021-09-12 05:54:35 +02:00
|
|
|
* - enabled: true/false
|
|
|
|
* - modelPath: body pose model, can be absolute path or relative to modelBasePath
|
|
|
|
* - minConfidence: threshold for discarding a prediction
|
|
|
|
* - maxDetected: maximum number of people detected in the input, should be set to the minimum number for performance
|
2021-09-27 19:58:13 +02:00
|
|
|
* - detector: optional body detector
|
2021-09-22 22:00:43 +02:00
|
|
|
*
|
2021-09-26 01:14:03 +02:00
|
|
|
* `maxDetected` is valid for `posenet` and `movenet-multipose` as other models are single-pose only
|
|
|
|
* `maxDetected` can be set to -1 to auto-detect based on number of detected faces
|
|
|
|
*
|
2021-09-22 22:00:43 +02:00
|
|
|
* Changing `modelPath` will change module responsible for hand detection and tracking
|
2021-09-26 01:14:03 +02:00
|
|
|
* Allowed values are `posenet.json`, `blazepose.json`, `efficientpose.json`, `movenet-lightning.json`, `movenet-thunder.json`, `movenet-multipose.json`
|
2021-09-12 05:54:35 +02:00
|
|
|
*/
|
2021-10-22 22:09:52 +02:00
|
|
|
export interface BodyConfig extends GenericConfig {
|
2021-09-12 05:54:35 +02:00
|
|
|
maxDetected: number,
|
|
|
|
minConfidence: number,
|
2021-09-27 19:58:13 +02:00
|
|
|
detector?: {
|
|
|
|
modelPath: string
|
|
|
|
},
|
2021-09-12 05:54:35 +02:00
|
|
|
}
|
|
|
|
|
2021-10-22 22:09:52 +02:00
|
|
|
/** Controls and configures all hand detection specific options
|
2021-09-22 22:00:43 +02:00
|
|
|
*
|
|
|
|
* Parameters:
|
2021-09-12 05:54:35 +02:00
|
|
|
* - enabled: true/false
|
|
|
|
* - landmarks: detect hand landmarks or just hand boundary box
|
|
|
|
* - modelPath: paths for hand detector and hand skeleton models, can be absolute path or relative to modelBasePath
|
|
|
|
* - minConfidence: threshold for discarding a prediction
|
|
|
|
* - iouThreshold: ammount of overlap between two detected objects before one object is removed
|
|
|
|
* - maxDetected: maximum number of hands detected in the input, should be set to the minimum number for performance
|
|
|
|
* - rotation: use best-guess rotated hand image or just box with rotation as-is, false means higher performance, but incorrect finger mapping if hand is inverted
|
2021-09-22 22:00:43 +02:00
|
|
|
*
|
2021-09-26 01:14:03 +02:00
|
|
|
* `maxDetected` can be set to -1 to auto-detect based on number of detected faces
|
|
|
|
*
|
2021-09-22 22:00:43 +02:00
|
|
|
* Changing `detector.modelPath` will change module responsible for hand detection and tracking
|
|
|
|
* Allowed values are `handdetect.json` and `handtrack.json`
|
2021-09-12 05:54:35 +02:00
|
|
|
*/
|
2021-10-22 22:09:52 +02:00
|
|
|
export interface HandConfig extends GenericConfig {
|
2021-09-12 05:54:35 +02:00
|
|
|
rotation: boolean,
|
|
|
|
minConfidence: number,
|
|
|
|
iouThreshold: number,
|
|
|
|
maxDetected: number,
|
|
|
|
landmarks: boolean,
|
|
|
|
detector: {
|
2021-09-12 05:59:41 +02:00
|
|
|
modelPath?: string,
|
2021-09-12 05:54:35 +02:00
|
|
|
},
|
|
|
|
skeleton: {
|
2021-09-12 05:59:41 +02:00
|
|
|
modelPath?: string,
|
2021-09-12 05:54:35 +02:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Controlls and configures all object detection specific options
|
|
|
|
* - enabled: true/false
|
|
|
|
* - modelPath: object detection model, can be absolute path or relative to modelBasePath
|
|
|
|
* - minConfidence: minimum score that detection must have to return as valid object
|
|
|
|
* - iouThreshold: ammount of overlap between two detected objects before one object is removed
|
|
|
|
* - maxDetected: maximum number of detections to return
|
2021-09-22 22:00:43 +02:00
|
|
|
*
|
|
|
|
* Changing `modelPath` will change module responsible for hand detection and tracking
|
|
|
|
* Allowed values are `mb3-centernet.json` and `nanodet.json`
|
2021-09-12 05:54:35 +02:00
|
|
|
*/
|
2021-10-22 22:09:52 +02:00
|
|
|
export interface ObjectConfig extends GenericConfig {
|
2021-09-12 05:54:35 +02:00
|
|
|
minConfidence: number,
|
|
|
|
iouThreshold: number,
|
|
|
|
maxDetected: number,
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Controlls and configures all body segmentation module
|
|
|
|
* removes background from input containing person
|
|
|
|
* if segmentation is enabled it will run as preprocessing task before any other model
|
|
|
|
* alternatively leave it disabled and use it on-demand using human.segmentation method which can
|
|
|
|
* remove background or replace it with user-provided background
|
|
|
|
*
|
|
|
|
* - enabled: true/false
|
|
|
|
* - modelPath: object detection model, can be absolute path or relative to modelBasePath
|
2021-09-22 21:16:14 +02:00
|
|
|
* - blur: blur segmentation output by <number> pixels for more realistic image
|
2021-09-22 22:00:43 +02:00
|
|
|
*
|
|
|
|
* Changing `modelPath` will change module responsible for hand detection and tracking
|
|
|
|
* Allowed values are `selfie.json` and `meet.json`
|
|
|
|
|
2021-09-12 05:54:35 +02:00
|
|
|
*/
|
|
|
|
export interface SegmentationConfig {
|
|
|
|
enabled: boolean,
|
|
|
|
modelPath: string,
|
2021-09-22 21:16:14 +02:00
|
|
|
blur: number,
|
2021-09-12 05:54:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Run input through image filters before inference
|
2021-09-22 22:00:43 +02:00
|
|
|
* - available only in Browser environments
|
|
|
|
* - image filters run with near-zero latency as they are executed on the GPU using WebGL
|
2021-09-12 05:54:35 +02:00
|
|
|
*/
|
|
|
|
export interface FilterConfig {
|
|
|
|
enabled: boolean,
|
|
|
|
/** Resize input width
|
|
|
|
* - if both width and height are set to 0, there is no resizing
|
|
|
|
* - if just one is set, second one is scaled automatically
|
|
|
|
* - if both are set, values are used as-is
|
|
|
|
*/
|
|
|
|
width: number,
|
|
|
|
/** Resize input height
|
|
|
|
* - if both width and height are set to 0, there is no resizing
|
|
|
|
* - if just one is set, second one is scaled automatically
|
|
|
|
* - if both are set, values are used as-is
|
|
|
|
*/
|
|
|
|
height: number,
|
|
|
|
/** Return processed canvas imagedata in result */
|
|
|
|
return: boolean,
|
|
|
|
/** Flip input as mirror image */
|
|
|
|
flip: boolean,
|
|
|
|
/** Range: -1 (darken) to 1 (lighten) */
|
|
|
|
brightness: number,
|
|
|
|
/** Range: -1 (reduce contrast) to 1 (increase contrast) */
|
|
|
|
contrast: number,
|
|
|
|
/** Range: 0 (no sharpening) to 1 (maximum sharpening) */
|
|
|
|
sharpness: number,
|
|
|
|
/** Range: 0 (no blur) to N (blur radius in pixels) */
|
|
|
|
blur: number
|
|
|
|
/** Range: -1 (reduce saturation) to 1 (increase saturation) */
|
|
|
|
saturation: number,
|
|
|
|
/** Range: 0 (no change) to 360 (hue rotation in degrees) */
|
|
|
|
hue: number,
|
|
|
|
/** Image negative */
|
|
|
|
negative: boolean,
|
|
|
|
/** Image sepia colors */
|
|
|
|
sepia: boolean,
|
|
|
|
/** Image vintage colors */
|
|
|
|
vintage: boolean,
|
|
|
|
/** Image kodachrome colors */
|
|
|
|
kodachrome: boolean,
|
|
|
|
/** Image technicolor colors */
|
|
|
|
technicolor: boolean,
|
|
|
|
/** Image polaroid camera effect */
|
|
|
|
polaroid: boolean,
|
|
|
|
/** Range: 0 (no pixelate) to N (number of pixels to pixelate) */
|
|
|
|
pixelate: number,
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Controlls gesture detection */
|
|
|
|
export interface GestureConfig {
|
|
|
|
enabled: boolean,
|
|
|
|
}
|
|
|
|
|
2021-09-12 06:30:11 +02:00
|
|
|
/**
|
|
|
|
* Configuration interface definition for **Human** library
|
|
|
|
*
|
|
|
|
* Contains all configurable parameters
|
|
|
|
* @typedef Config
|
2021-09-24 15:55:27 +02:00
|
|
|
*
|
|
|
|
* Defaults: [config](https://github.com/vladmandic/human/blob/main/src/config.ts#L292)
|
2021-09-12 06:30:11 +02:00
|
|
|
*/
|
2021-03-17 23:23:19 +01:00
|
|
|
export interface Config {
|
2021-09-24 15:55:27 +02:00
|
|
|
/** Backend used for TFJS operations
|
|
|
|
* Valid build-in backends are:
|
|
|
|
* - Browser: `cpu`, `wasm`, `webgl`, `humangl`
|
|
|
|
* - NodeJS: `cpu`, `wasm`, `tensorflow`
|
|
|
|
*
|
|
|
|
* Experimental:
|
|
|
|
* - Browser: `webgpu` - requires custom build of `tfjs-backend-webgpu`
|
|
|
|
*
|
|
|
|
* Defaults: `humangl` for browser and `tensorflow` for nodejs
|
|
|
|
*/
|
2021-09-13 19:28:35 +02:00
|
|
|
backend: '' | 'cpu' | 'wasm' | 'webgl' | 'humangl' | 'tensorflow' | 'webgpu',
|
|
|
|
// backend: string;
|
2021-04-25 19:16:04 +02:00
|
|
|
|
2021-09-24 15:55:27 +02:00
|
|
|
/** Path to *.wasm files if backend is set to `wasm`
|
|
|
|
* - if not set, auto-detects to link to CDN `jsdelivr` when running in browser
|
|
|
|
*/
|
2021-03-18 01:16:40 +01:00
|
|
|
wasmPath: string,
|
2021-04-25 19:16:04 +02:00
|
|
|
|
2021-04-13 17:05:52 +02:00
|
|
|
/** Print debug statements to console */
|
2021-03-18 01:16:40 +01:00
|
|
|
debug: boolean,
|
2021-04-25 19:16:04 +02:00
|
|
|
|
2021-04-13 17:05:52 +02:00
|
|
|
/** Perform model loading and inference concurrently or sequentially */
|
2021-03-18 01:16:40 +01:00
|
|
|
async: boolean,
|
2021-04-25 19:16:04 +02:00
|
|
|
|
2021-04-13 17:05:52 +02:00
|
|
|
/** What to use for `human.warmup()`
|
|
|
|
* - warmup pre-initializes all models for faster inference but can take significant time on startup
|
|
|
|
*/
|
2021-09-13 19:28:35 +02:00
|
|
|
warmup: 'none' | 'face' | 'full' | 'body',
|
|
|
|
// warmup: string;
|
2021-04-25 19:16:04 +02:00
|
|
|
|
2021-04-13 17:05:52 +02:00
|
|
|
/** Base model path (typically starting with file://, http:// or https://) for all models
|
2021-04-25 19:16:04 +02:00
|
|
|
* - individual modelPath values are relative to this path
|
2021-04-13 17:05:52 +02:00
|
|
|
*/
|
2021-04-09 14:07:58 +02:00
|
|
|
modelBasePath: string,
|
2021-04-25 19:16:04 +02:00
|
|
|
|
2021-05-18 17:26:16 +02:00
|
|
|
/** Cache sensitivity
|
|
|
|
* - values 0..1 where 0.01 means reset cache if input changed more than 1%
|
|
|
|
* - set to 0 to disable caching
|
|
|
|
*/
|
|
|
|
cacheSensitivity: number;
|
|
|
|
|
2021-09-20 23:17:13 +02:00
|
|
|
/** Internal Variable */
|
2021-10-23 15:38:52 +02:00
|
|
|
skipAllowed: boolean;
|
2021-06-03 15:41:53 +02:00
|
|
|
|
2021-04-13 17:05:52 +02:00
|
|
|
/** Run input through image filters before inference
|
|
|
|
* - image filters run with near-zero latency as they are executed on the GPU
|
2021-09-24 15:55:27 +02:00
|
|
|
*
|
|
|
|
* {@link FilterConfig}
|
2021-04-13 17:05:52 +02:00
|
|
|
*/
|
2021-09-12 05:54:35 +02:00
|
|
|
filter: Partial<FilterConfig>,
|
2021-04-19 22:02:47 +02:00
|
|
|
|
2021-09-24 15:55:27 +02:00
|
|
|
/** {@link GestureConfig} */
|
2021-09-12 05:54:35 +02:00
|
|
|
gesture: Partial<GestureConfig>;
|
2021-04-25 19:16:04 +02:00
|
|
|
|
2021-09-24 15:55:27 +02:00
|
|
|
/** {@link FaceConfig} */
|
2021-09-12 05:54:35 +02:00
|
|
|
face: Partial<FaceConfig>,
|
2021-04-25 19:16:04 +02:00
|
|
|
|
2021-09-24 15:55:27 +02:00
|
|
|
/** {@link BodyConfig} */
|
2021-09-12 05:54:35 +02:00
|
|
|
body: Partial<BodyConfig>,
|
2021-04-25 19:16:04 +02:00
|
|
|
|
2021-09-24 15:55:27 +02:00
|
|
|
/** {@link HandConfig} */
|
2021-09-12 05:54:35 +02:00
|
|
|
hand: Partial<HandConfig>,
|
2021-04-25 19:16:04 +02:00
|
|
|
|
2021-09-24 15:55:27 +02:00
|
|
|
/** {@link ObjectConfig} */
|
2021-09-12 05:54:35 +02:00
|
|
|
object: Partial<ObjectConfig>,
|
2021-06-04 19:51:01 +02:00
|
|
|
|
2021-09-24 15:55:27 +02:00
|
|
|
/** {@link SegmentationConfig} */
|
2021-09-12 05:54:35 +02:00
|
|
|
segmentation: Partial<SegmentationConfig>,
|
2021-03-17 23:23:19 +01:00
|
|
|
}
|
|
|
|
|
2021-09-12 06:30:11 +02:00
|
|
|
/**
|
2021-09-12 14:49:56 +02:00
|
|
|
* [See all default Config values...](https://github.com/vladmandic/human/blob/main/src/config.ts#L244)
|
2021-09-12 06:30:11 +02:00
|
|
|
*
|
|
|
|
*/
|
2021-03-17 23:23:19 +01:00
|
|
|
const config: Config = {
|
2021-09-12 18:42:17 +02:00
|
|
|
backend: '', // select tfjs backend to use, leave empty to use default backend
|
2021-09-22 22:00:43 +02:00
|
|
|
// for browser environments: 'webgl', 'wasm', 'cpu', or 'humangl' (which is a custom version of webgl)
|
|
|
|
// for nodejs environments: 'tensorflow', 'wasm', 'cpu'
|
2021-09-12 18:42:17 +02:00
|
|
|
// default set to `humangl` for browsers and `tensorflow` for nodejs
|
|
|
|
modelBasePath: '', // base path for all models
|
|
|
|
// default set to `../models/` for browsers and `file://models/` for nodejs
|
2021-08-31 19:00:06 +02:00
|
|
|
wasmPath: '', // path for wasm binaries, only used for backend: wasm
|
|
|
|
// default set to download from jsdeliv during Human class instantiation
|
2021-03-02 17:27:42 +01:00
|
|
|
debug: true, // print additional status messages to console
|
2020-11-06 17:39:39 +01:00
|
|
|
async: true, // execute enabled models in parallel
|
2021-04-26 13:19:30 +02:00
|
|
|
warmup: 'full', // what to use for human.warmup(), can be 'none', 'face', 'full'
|
2020-12-11 16:11:49 +01:00
|
|
|
// warmup pre-initializes all models for faster inference but can take
|
|
|
|
// significant time on startup
|
2021-04-13 17:05:52 +02:00
|
|
|
// only used for `webgl` and `humangl` backends
|
2021-10-23 15:38:52 +02:00
|
|
|
cacheSensitivity: 0.70, // cache sensitivity
|
2021-05-18 17:26:16 +02:00
|
|
|
// values 0..1 where 0.01 means reset cache if input changed more than 1%
|
|
|
|
// set to 0 to disable caching
|
2021-10-23 15:38:52 +02:00
|
|
|
skipAllowed: false, // internal & dynamic
|
2021-04-13 17:05:52 +02:00
|
|
|
filter: { // run input through image filters before inference
|
|
|
|
// image filters run with near-zero latency as they are executed on the GPU
|
2020-10-18 18:12:09 +02:00
|
|
|
enabled: true, // enable image pre-processing filters
|
2020-10-27 15:06:01 +01:00
|
|
|
width: 0, // resize input width
|
|
|
|
height: 0, // resize input height
|
|
|
|
// if both width and height are set to 0, there is no resizing
|
|
|
|
// if just one is set, second one is scaled automatically
|
|
|
|
// if both are set, values are used as-is
|
2021-04-19 22:02:47 +02:00
|
|
|
flip: false, // flip input as mirror image
|
2020-10-18 18:12:09 +02:00
|
|
|
return: true, // return processed canvas imagedata in result
|
|
|
|
brightness: 0, // range: -1 (darken) to 1 (lighten)
|
|
|
|
contrast: 0, // range: -1 (reduce contrast) to 1 (increase contrast)
|
|
|
|
sharpness: 0, // range: 0 (no sharpening) to 1 (maximum sharpening)
|
|
|
|
blur: 0, // range: 0 (no blur) to N (blur radius in pixels)
|
|
|
|
saturation: 0, // range: -1 (reduce saturation) to 1 (increase saturation)
|
|
|
|
hue: 0, // range: 0 (no change) to 360 (hue rotation in degrees)
|
|
|
|
negative: false, // image negative
|
|
|
|
sepia: false, // image sepia colors
|
|
|
|
vintage: false, // image vintage colors
|
|
|
|
kodachrome: false, // image kodachrome colors
|
|
|
|
technicolor: false, // image technicolor colors
|
|
|
|
polaroid: false, // image polaroid camera effect
|
|
|
|
pixelate: 0, // range: 0 (no pixelate) to N (number of pixels to pixelate)
|
|
|
|
},
|
2020-11-08 18:26:45 +01:00
|
|
|
|
2020-11-04 16:18:22 +01:00
|
|
|
gesture: {
|
2021-04-25 19:16:04 +02:00
|
|
|
enabled: true, // enable gesture recognition based on model results
|
2020-11-04 16:18:22 +01:00
|
|
|
},
|
2020-11-08 18:26:45 +01:00
|
|
|
|
2020-10-12 01:22:43 +02:00
|
|
|
face: {
|
2020-10-16 21:04:51 +02:00
|
|
|
enabled: true, // controls if specified modul is enabled
|
2020-11-08 18:26:45 +01:00
|
|
|
// face.enabled is required for all face models:
|
|
|
|
// detector, mesh, iris, age, gender, emotion
|
2020-10-16 21:04:51 +02:00
|
|
|
// (note: module is not loaded until it is required)
|
2020-10-12 01:22:43 +02:00
|
|
|
detector: {
|
2021-04-25 20:15:38 +02:00
|
|
|
modelPath: 'blazeface.json', // detector model, can be absolute path or relative to modelBasePath
|
2021-06-02 18:43:43 +02:00
|
|
|
rotation: true, // use best-guess rotated face image or just box with rotation as-is
|
2020-12-10 20:47:53 +01:00
|
|
|
// false means higher performance, but incorrect mesh mapping if face angle is above 20 degrees
|
2021-03-10 00:32:35 +01:00
|
|
|
// this parameter is not valid in nodejs
|
2021-09-28 18:02:17 +02:00
|
|
|
maxDetected: 1, // maximum number of faces detected in the input
|
2020-11-08 18:26:45 +01:00
|
|
|
// should be set to the minimum number for performance
|
2021-10-23 15:38:52 +02:00
|
|
|
skipFrames: 99, // how many max frames to go without re-running the face bounding box detector
|
2021-05-18 17:26:16 +02:00
|
|
|
// only used when cacheSensitivity is not zero
|
2021-10-23 15:38:52 +02:00
|
|
|
skipTime: 2500, // how many ms to go without re-running the face bounding box detector
|
2021-10-22 22:09:52 +02:00
|
|
|
// only used when cacheSensitivity is not zero
|
2021-03-10 15:44:45 +01:00
|
|
|
minConfidence: 0.2, // threshold for discarding a prediction
|
2021-04-25 19:16:04 +02:00
|
|
|
iouThreshold: 0.1, // ammount of overlap between two detected objects before one object is removed
|
2021-03-12 04:04:44 +01:00
|
|
|
return: false, // return extracted face as tensor
|
2021-06-18 19:39:20 +02:00
|
|
|
// in which case user is reponsible for disposing the tensor
|
2020-10-12 01:22:43 +02:00
|
|
|
},
|
2020-11-08 18:26:45 +01:00
|
|
|
|
2020-10-12 01:22:43 +02:00
|
|
|
mesh: {
|
|
|
|
enabled: true,
|
2021-04-25 19:16:04 +02:00
|
|
|
modelPath: 'facemesh.json', // facemesh model, can be absolute path or relative to modelBasePath
|
2020-10-12 01:22:43 +02:00
|
|
|
},
|
2020-11-08 18:26:45 +01:00
|
|
|
|
2020-10-12 01:22:43 +02:00
|
|
|
iris: {
|
|
|
|
enabled: true,
|
2021-04-09 14:07:58 +02:00
|
|
|
modelPath: 'iris.json', // face iris model
|
|
|
|
// can be either absolute path or relative to modelBasePath
|
2020-10-12 01:22:43 +02:00
|
|
|
},
|
2020-11-08 18:26:45 +01:00
|
|
|
|
2021-09-28 19:48:29 +02:00
|
|
|
emotion: {
|
|
|
|
enabled: true,
|
|
|
|
minConfidence: 0.1, // threshold for discarding a prediction
|
2021-10-23 15:38:52 +02:00
|
|
|
skipFrames: 99, // how max many frames to go without re-running the detector
|
2021-09-28 19:48:29 +02:00
|
|
|
// only used when cacheSensitivity is not zero
|
2021-10-23 15:38:52 +02:00
|
|
|
skipTime: 1500, // how many ms to go without re-running the face bounding box detector
|
2021-10-22 22:09:52 +02:00
|
|
|
// only used when cacheSensitivity is not zero
|
2021-09-28 19:48:29 +02:00
|
|
|
modelPath: 'emotion.json', // face emotion model, can be absolute path or relative to modelBasePath
|
|
|
|
},
|
|
|
|
|
2021-03-21 19:18:51 +01:00
|
|
|
description: {
|
2021-03-24 16:08:49 +01:00
|
|
|
enabled: true, // to improve accuracy of face description extraction it is
|
2021-03-21 19:18:51 +01:00
|
|
|
// recommended to enable detector.rotation and mesh.enabled
|
2021-04-09 14:07:58 +02:00
|
|
|
modelPath: 'faceres.json', // face description model
|
|
|
|
// can be either absolute path or relative to modelBasePath
|
2021-10-23 15:38:52 +02:00
|
|
|
skipFrames: 99, // how many max frames to go without re-running the detector
|
2021-05-18 17:26:16 +02:00
|
|
|
// only used when cacheSensitivity is not zero
|
2021-10-23 15:38:52 +02:00
|
|
|
skipTime: 3000, // how many ms to go without re-running the face bounding box detector
|
2021-10-22 22:09:52 +02:00
|
|
|
// only used when cacheSensitivity is not zero
|
2021-04-25 00:43:59 +02:00
|
|
|
minConfidence: 0.1, // threshold for discarding a prediction
|
2021-03-21 19:18:51 +01:00
|
|
|
},
|
2021-10-13 16:56:56 +02:00
|
|
|
|
|
|
|
antispoof: {
|
|
|
|
enabled: false,
|
2021-10-23 15:38:52 +02:00
|
|
|
skipFrames: 99, // how max many frames to go without re-running the detector
|
2021-10-13 16:56:56 +02:00
|
|
|
// only used when cacheSensitivity is not zero
|
2021-10-23 15:38:52 +02:00
|
|
|
skipTime: 4000, // how many ms to go without re-running the face bounding box detector
|
2021-10-22 22:09:52 +02:00
|
|
|
// only used when cacheSensitivity is not zero
|
2021-10-13 16:56:56 +02:00
|
|
|
modelPath: 'antispoof.json', // face description model
|
|
|
|
// can be either absolute path or relative to modelBasePath
|
|
|
|
},
|
2020-10-12 01:22:43 +02:00
|
|
|
},
|
2020-11-08 18:26:45 +01:00
|
|
|
|
2020-10-12 01:22:43 +02:00
|
|
|
body: {
|
|
|
|
enabled: true,
|
2021-05-30 00:29:57 +02:00
|
|
|
modelPath: 'movenet-lightning.json', // body model, can be absolute path or relative to modelBasePath
|
|
|
|
// can be 'posenet', 'blazepose', 'efficientpose', 'movenet-lightning', 'movenet-thunder'
|
2021-09-27 19:58:13 +02:00
|
|
|
detector: {
|
|
|
|
modelPath: '', // optional body detector
|
|
|
|
},
|
2021-09-26 01:14:03 +02:00
|
|
|
maxDetected: -1, // maximum number of people detected in the input
|
2020-11-08 18:26:45 +01:00
|
|
|
// should be set to the minimum number for performance
|
2021-09-26 01:14:03 +02:00
|
|
|
// only valid for posenet and movenet-multipose as other models detects single pose
|
|
|
|
// set to -1 to autodetect based on number of detected faces
|
2021-10-14 18:26:59 +02:00
|
|
|
minConfidence: 0.3, // threshold for discarding a prediction
|
2021-10-04 23:03:36 +02:00
|
|
|
skipFrames: 1, // how many max frames to go without re-running the detector
|
2021-06-03 15:41:53 +02:00
|
|
|
// only used when cacheSensitivity is not zero
|
2021-10-23 15:38:52 +02:00
|
|
|
skipTime: 200, // how many ms to go without re-running the face bounding box detector
|
2021-10-22 22:09:52 +02:00
|
|
|
// only used when cacheSensitivity is not zero
|
2021-06-03 15:41:53 +02:00
|
|
|
},
|
2020-11-08 18:26:45 +01:00
|
|
|
|
2020-10-12 01:22:43 +02:00
|
|
|
hand: {
|
|
|
|
enabled: true,
|
2021-05-31 16:40:07 +02:00
|
|
|
rotation: true, // use best-guess rotated hand image or just box with rotation as-is
|
2020-12-10 21:46:45 +01:00
|
|
|
// false means higher performance, but incorrect finger mapping if hand is inverted
|
2021-09-24 15:55:27 +02:00
|
|
|
// only valid for `handdetect` variation
|
2021-10-23 15:38:52 +02:00
|
|
|
skipFrames: 99, // how many max frames to go without re-running the hand bounding box detector
|
2021-05-18 17:26:16 +02:00
|
|
|
// only used when cacheSensitivity is not zero
|
2021-10-22 22:09:52 +02:00
|
|
|
skipTime: 2000, // how many ms to go without re-running the face bounding box detector
|
|
|
|
// only used when cacheSensitivity is not zero
|
2021-10-11 04:29:20 +02:00
|
|
|
minConfidence: 0.50, // threshold for discarding a prediction
|
2021-09-02 14:50:16 +02:00
|
|
|
iouThreshold: 0.2, // ammount of overlap between two detected objects before one object is removed
|
2021-09-26 01:14:03 +02:00
|
|
|
maxDetected: -1, // maximum number of hands detected in the input
|
2020-11-08 18:26:45 +01:00
|
|
|
// should be set to the minimum number for performance
|
2021-09-26 01:14:03 +02:00
|
|
|
// set to -1 to autodetect based on number of detected faces
|
2020-11-08 15:56:02 +01:00
|
|
|
landmarks: true, // detect hand landmarks or just hand boundary box
|
2020-10-12 01:22:43 +02:00
|
|
|
detector: {
|
2021-09-28 18:02:17 +02:00
|
|
|
modelPath: 'handtrack.json', // hand detector model, can be absolute path or relative to modelBasePath
|
2021-09-22 22:00:43 +02:00
|
|
|
// can be 'handdetect' or 'handtrack'
|
2020-10-12 01:22:43 +02:00
|
|
|
},
|
|
|
|
skeleton: {
|
2021-04-25 19:16:04 +02:00
|
|
|
modelPath: 'handskeleton.json', // hand skeleton model, can be absolute path or relative to modelBasePath
|
2020-10-12 01:22:43 +02:00
|
|
|
},
|
|
|
|
},
|
2021-03-17 16:32:37 +01:00
|
|
|
|
|
|
|
object: {
|
|
|
|
enabled: false,
|
2021-05-19 14:27:28 +02:00
|
|
|
modelPath: 'mb3-centernet.json', // experimental: object detection model, can be absolute path or relative to modelBasePath
|
|
|
|
// can be 'mb3-centernet' or 'nanodet'
|
2021-04-25 19:16:04 +02:00
|
|
|
minConfidence: 0.2, // threshold for discarding a prediction
|
|
|
|
iouThreshold: 0.4, // ammount of overlap between two detected objects before one object is removed
|
|
|
|
maxDetected: 10, // maximum number of objects detected in the input
|
2021-10-23 15:38:52 +02:00
|
|
|
skipFrames: 99, // how many max frames to go without re-running the detector
|
2021-05-18 17:26:16 +02:00
|
|
|
// only used when cacheSensitivity is not zero
|
2021-10-23 15:38:52 +02:00
|
|
|
skipTime: 1000, // how many ms to go without re-running object detector
|
2021-10-22 22:09:52 +02:00
|
|
|
// only used when cacheSensitivity is not zero
|
2021-03-17 16:32:37 +01:00
|
|
|
},
|
2021-06-04 19:51:01 +02:00
|
|
|
|
|
|
|
segmentation: {
|
2021-06-05 23:51:46 +02:00
|
|
|
enabled: false, // controlls and configures all body segmentation module
|
|
|
|
// removes background from input containing person
|
|
|
|
// if segmentation is enabled it will run as preprocessing task before any other model
|
|
|
|
// alternatively leave it disabled and use it on-demand using human.segmentation method which can
|
|
|
|
// remove background or replace it with user-provided background
|
2021-06-04 19:51:01 +02:00
|
|
|
modelPath: 'selfie.json', // experimental: object detection model, can be absolute path or relative to modelBasePath
|
|
|
|
// can be 'selfie' or 'meet'
|
2021-09-22 21:16:14 +02:00
|
|
|
blur: 8, // blur segmentation output by n pixels for more realistic image
|
2021-06-04 19:51:01 +02:00
|
|
|
},
|
2020-10-12 01:22:43 +02:00
|
|
|
};
|
2021-09-24 15:55:27 +02:00
|
|
|
|
2021-03-17 23:23:19 +01:00
|
|
|
export { config as defaults };
|