human/types/config.d.ts

210 lines
8.8 KiB
TypeScript
Raw Normal View History

2021-03-17 23:57:00 +01:00
/**
* Configuration interface definition for **Human** library
*
* Contains all configurable parameters
*/
export interface Config {
2021-04-13 17:05:52 +02:00
/** Backend used for TFJS operations */
backend: null | '' | 'cpu' | 'wasm' | 'webgl' | 'humangl' | 'tensorflow';
/** Path to *.wasm files if backend is set to `wasm` */
2021-03-18 01:16:40 +01:00
wasmPath: string;
2021-04-13 17:05:52 +02:00
/** Print debug statements to console */
2021-03-18 01:16:40 +01:00
debug: boolean;
2021-04-13 17:05:52 +02:00
/** Perform model loading and inference concurrently or sequentially */
2021-03-18 01:16:40 +01:00
async: boolean;
2021-04-13 17:05:52 +02:00
/** Collect and print profiling data during inference operations */
2021-03-18 01:16:40 +01:00
profile: boolean;
2021-04-13 17:05:52 +02:00
/** Internal: Use aggressive GPU memory deallocator when backend is set to `webgl` or `humangl` */
2021-03-18 01:16:40 +01:00
deallocate: boolean;
2021-04-13 17:05:52 +02:00
/** Internal: Run all inference operations in an explicit local scope run to avoid memory leaks */
2021-03-18 01:16:40 +01:00
scoped: boolean;
2021-04-13 17:05:52 +02:00
/** Perform additional optimizations when input is video,
* - must be disabled for images
* - automatically disabled for Image, ImageData, ImageBitmap and Tensor inputs
* - skips boundary detection for every `skipFrames` frames specified for each model
* - while maintaining in-box detection since objects don't change definition as fast */
2021-03-18 01:16:40 +01:00
videoOptimized: boolean;
2021-04-13 17:05:52 +02:00
/** What to use for `human.warmup()`
* - warmup pre-initializes all models for faster inference but can take significant time on startup
* - only used for `webgl` and `humangl` backends
*/
warmup: 'none' | 'face' | 'full' | 'body';
/** Base model path (typically starting with file://, http:// or https://) for all models
* - individual modelPath values are joined to this path
*/
2021-04-09 14:07:58 +02:00
modelBasePath: string;
2021-04-13 17:05:52 +02:00
/** Run input through image filters before inference
* - image filters run with near-zero latency as they are executed on the GPU
*/
2021-03-17 23:57:00 +01:00
filter: {
2021-03-18 01:16:40 +01:00
enabled: boolean;
2021-04-13 17:05:52 +02:00
/** Resize input width
* - if both width and height are set to 0, there is no resizing
* - if just one is set, second one is scaled automatically
* - if both are set, values are used as-is
*/
2021-03-18 01:16:40 +01:00
width: number;
2021-04-13 17:05:52 +02:00
/** Resize input height
* - if both width and height are set to 0, there is no resizing
* - if just one is set, second one is scaled automatically
* - if both are set, values are used as-is
*/
2021-03-18 01:16:40 +01:00
height: number;
2021-04-13 17:05:52 +02:00
/** Return processed canvas imagedata in result */
2021-03-18 01:16:40 +01:00
return: boolean;
2021-04-19 22:19:03 +02:00
/** Flip input as mirror image */
flip: boolean;
2021-04-13 17:05:52 +02:00
/** Range: -1 (darken) to 1 (lighten) */
2021-03-18 01:16:40 +01:00
brightness: number;
2021-04-13 17:05:52 +02:00
/** Range: -1 (reduce contrast) to 1 (increase contrast) */
2021-03-18 01:16:40 +01:00
contrast: number;
2021-04-13 17:05:52 +02:00
/** Range: 0 (no sharpening) to 1 (maximum sharpening) */
2021-03-18 01:16:40 +01:00
sharpness: number;
2021-04-13 17:05:52 +02:00
/** Range: 0 (no blur) to N (blur radius in pixels) */
2021-03-18 01:16:40 +01:00
blur: number;
2021-04-13 17:05:52 +02:00
/** Range: -1 (reduce saturation) to 1 (increase saturation) */
2021-03-18 01:16:40 +01:00
saturation: number;
2021-04-13 17:05:52 +02:00
/** Range: 0 (no change) to 360 (hue rotation in degrees) */
2021-03-18 01:16:40 +01:00
hue: number;
2021-04-13 17:05:52 +02:00
/** Image negative */
2021-03-18 01:16:40 +01:00
negative: boolean;
2021-04-13 17:05:52 +02:00
/** Image sepia colors */
2021-03-18 01:16:40 +01:00
sepia: boolean;
2021-04-13 17:05:52 +02:00
/** Image vintage colors */
2021-03-18 01:16:40 +01:00
vintage: boolean;
2021-04-13 17:05:52 +02:00
/** Image kodachrome colors */
2021-03-18 01:16:40 +01:00
kodachrome: boolean;
2021-04-13 17:05:52 +02:00
/** Image technicolor colors */
2021-03-18 01:16:40 +01:00
technicolor: boolean;
2021-04-13 17:05:52 +02:00
/** Image polaroid camera effect */
2021-03-18 01:16:40 +01:00
polaroid: boolean;
2021-04-13 17:05:52 +02:00
/** Range: 0 (no pixelate) to N (number of pixels to pixelate) */
2021-03-18 01:16:40 +01:00
pixelate: number;
2021-03-17 23:57:00 +01:00
};
2021-04-13 17:05:52 +02:00
/** Controlls gesture detection */
2021-03-17 23:57:00 +01:00
gesture: {
2021-03-18 01:16:40 +01:00
enabled: boolean;
2021-03-17 23:57:00 +01:00
};
2021-04-13 17:05:52 +02:00
/** Controlls and configures all face-specific options:
* - face detection, face mesh detection, age, gender, emotion detection and face description
* Parameters:
* - enabled: true/false
* - modelPath: path for individual face model
* - rotation: use calculated rotated face image or just box with rotation as-is, false means higher performance, but incorrect mesh mapping on higher face angles
* - maxFaces: maximum number of faces detected in the input, should be set to the minimum number for performance
* - skipFrames: how many frames to go without re-running the face detector and just run modified face mesh analysis, only valid if videoOptimized is set to true
* - skipInitial: if previous detection resulted in no faces detected, should skipFrames be reset immediately to force new detection cycle
* - minConfidence: threshold for discarding a prediction
* - iouThreshold: threshold for deciding whether boxes overlap too much in non-maximum suppression
* - scoreThreshold: threshold for deciding when to remove boxes based on score in non-maximum suppression
* - return extracted face as tensor for futher user processing
*/
2021-03-17 23:57:00 +01:00
face: {
2021-03-18 01:16:40 +01:00
enabled: boolean;
2021-03-17 23:57:00 +01:00
detector: {
2021-03-18 01:16:40 +01:00
modelPath: string;
rotation: boolean;
maxFaces: number;
skipFrames: number;
skipInitial: boolean;
minConfidence: number;
iouThreshold: number;
scoreThreshold: number;
return: boolean;
2021-03-17 23:57:00 +01:00
};
mesh: {
2021-03-18 01:16:40 +01:00
enabled: boolean;
modelPath: string;
2021-03-17 23:57:00 +01:00
};
iris: {
2021-03-18 01:16:40 +01:00
enabled: boolean;
modelPath: string;
2021-03-17 23:57:00 +01:00
};
2021-03-21 19:18:51 +01:00
description: {
enabled: boolean;
modelPath: string;
skipFrames: number;
};
2021-03-17 23:57:00 +01:00
age: {
2021-03-18 01:16:40 +01:00
enabled: boolean;
modelPath: string;
skipFrames: number;
2021-03-17 23:57:00 +01:00
};
gender: {
2021-03-18 01:16:40 +01:00
enabled: boolean;
minConfidence: number;
modelPath: string;
skipFrames: number;
2021-03-17 23:57:00 +01:00
};
emotion: {
2021-03-18 01:16:40 +01:00
enabled: boolean;
minConfidence: number;
skipFrames: number;
modelPath: string;
2021-03-17 23:57:00 +01:00
};
embedding: {
2021-03-18 01:16:40 +01:00
enabled: boolean;
modelPath: string;
2021-03-17 23:57:00 +01:00
};
};
2021-04-13 17:05:52 +02:00
/** Controlls and configures all body detection specific options
* - enabled: true/false
* - modelPath: paths for both hand detector model and hand skeleton model
* - maxDetections: maximum number of people detected in the input, should be set to the minimum number for performance
* - scoreThreshold: threshold for deciding when to remove people based on score in non-maximum suppression
* - nmsRadius: threshold for deciding whether body parts overlap too much in non-maximum suppression
*/
2021-03-17 23:57:00 +01:00
body: {
2021-03-18 01:16:40 +01:00
enabled: boolean;
modelPath: string;
maxDetections: number;
scoreThreshold: number;
nmsRadius: number;
2021-03-17 23:57:00 +01:00
};
2021-04-13 17:05:52 +02:00
/** Controlls and configures all hand detection specific options
* - enabled: true/false
* - modelPath: paths for both hand detector model and hand skeleton model
* - rotation: use best-guess rotated hand image or just box with rotation as-is, false means higher performance, but incorrect finger mapping if hand is inverted
* - skipFrames: how many frames to go without re-running the hand bounding box detector and just run modified hand skeleton detector, only valid if videoOptimized is set to true
* - skipInitial: if previous detection resulted in no hands detected, should skipFrames be reset immediately to force new detection cycle
* - minConfidence: threshold for discarding a prediction
* - iouThreshold: threshold for deciding whether boxes overlap too much in non-maximum suppression
* - scoreThreshold: threshold for deciding when to remove boxes based on score in non-maximum suppression
* - maxHands: maximum number of hands detected in the input, should be set to the minimum number for performance
* - landmarks: detect hand landmarks or just hand boundary box
*/
2021-03-17 23:57:00 +01:00
hand: {
2021-03-18 01:16:40 +01:00
enabled: boolean;
rotation: boolean;
skipFrames: number;
skipInitial: boolean;
minConfidence: number;
iouThreshold: number;
scoreThreshold: number;
maxHands: number;
landmarks: boolean;
2021-03-17 23:57:00 +01:00
detector: {
2021-03-18 01:16:40 +01:00
modelPath: string;
2021-03-17 23:57:00 +01:00
};
skeleton: {
2021-03-18 01:16:40 +01:00
modelPath: string;
2021-03-17 23:57:00 +01:00
};
};
2021-04-13 17:05:52 +02:00
/** Controlls and configures all object detection specific options
* - minConfidence: minimum score that detection must have to return as valid object
* - iouThreshold: ammount of overlap between two detected objects before one object is removed
* - maxResults: maximum number of detections to return
* - skipFrames: run object detection every n input frames, only valid if videoOptimized is set to true
*/
2021-03-17 23:57:00 +01:00
object: {
2021-03-18 01:16:40 +01:00
enabled: boolean;
modelPath: string;
minConfidence: number;
iouThreshold: number;
maxResults: number;
skipFrames: number;
2021-03-17 23:57:00 +01:00
};
}
declare const config: Config;
export { config as defaults };