diff --git a/Configuration.md b/Configuration.md index 987eac1..3a19ccc 100644 --- a/Configuration.md +++ b/Configuration.md @@ -48,16 +48,16 @@ const config: Config = { // this disables per-model performance data but // slightly increases performance // cannot be used if profiling is enabled - profile: false, // enable tfjs profiling + profile: false, // internal: enable tfjs profiling // this has significant performance impact // only enable for debugging purposes // currently only implemented for age,gender,emotion models - deallocate: false, // aggresively deallocate gpu memory after each usage - // only valid for webgl backend and only during first call + deallocate: false, // internal: aggresively deallocate gpu memory after each usage + // only valid for webgl and humangl backend and only during first call // cannot be changed unless library is reloaded // this has significant performance impact // only enable on low-memory devices - scoped: false, // enable scoped runs + scoped: false, // internal: enable scoped runs // some models *may* have memory leaks, // this wrapps everything in a local scope at a cost of performance // typically not needed @@ -69,7 +69,9 @@ const config: Config = { warmup: 'face', // what to use for human.warmup(), can be 'none', 'face', 'full' // warmup pre-initializes all models for faster inference but can take // significant time on startup - filter: { + // only used for `webgl` and `humangl` backends + filter: { // run input through image filters before inference + // image filters run with near-zero latency as they are executed on the GPU enabled: true, // enable image pre-processing filters width: 0, // resize input width height: 0, // resize input height @@ -115,7 +117,7 @@ const config: Config = { // box for updated face analysis as the head probably hasn't moved much // in short time (10 * 1/25 = 0.25 sec) skipInitial: false, // if previous detection resulted in no faces detected, - // should skipFrames be reset immediately + // should skipFrames be reset immediately to force new detection cycle minConfidence: 0.2, // threshold for discarding a prediction iouThreshold: 0.1, // threshold for deciding whether boxes overlap too much in // non-maximum suppression (0.1 means drop if overlap 10%) @@ -153,6 +155,29 @@ const config: Config = { modelPath: 'emotion.json', // face emotion model // can be either absolute path or relative to modelBasePath }, + + age: { + enabled: false, // obsolete, replaced by description module + modelPath: 'age.json', // age model + // can be either absolute path or relative to modelBasePath + skipFrames: 33, // how many frames to go without re-running the detector + // only used for video inputs + }, + + gender: { + enabled: false, // obsolete, replaced by description module + minConfidence: 0.1, // threshold for discarding a prediction + modelPath: 'gender.json', // gender model + // can be either absolute path or relative to modelBasePath + skipFrames: 34, // how many frames to go without re-running the detector + // only used for video inputs + }, + + embedding: { + enabled: false, // obsolete, replaced by description module + modelPath: 'mobileface.json', // face descriptor model + // can be either absolute path or relative to modelBasePath + }, }, body: { @@ -180,8 +205,8 @@ const config: Config = { // e.g., if model is running st 25 FPS, we can re-use existing bounding // box for updated hand skeleton analysis as the hand probably // hasn't moved much in short time (10 * 1/25 = 0.25 sec) - skipInitial: false, // if previous detection resulted in no faces detected, - // should skipFrames be reset immediately + skipInitial: false, // if previous detection resulted in no hands detected, + // should skipFrames be reset immediately to force new detection cycle minConfidence: 0.1, // threshold for discarding a prediction iouThreshold: 0.1, // threshold for deciding whether boxes overlap too much // in non-maximum suppression diff --git a/Demos.md b/Demos.md index c75b9f4..60f52ad 100644 --- a/Demos.md +++ b/Demos.md @@ -63,7 +63,7 @@ const ui = { Additionally, some parameters are held inside `Human` instance: ```ts -human.draw.drawOptions = { +human.draw.options = { color: 'rgba(173, 216, 230, 0.3)', // 'lightblue' with light alpha channel labelColor: 'rgba(173, 216, 230, 1)', // 'lightblue' with dark alpha channel shadowColor: 'black', diff --git a/Home.md b/Home.md index 44a1c19..04a99da 100644 --- a/Home.md +++ b/Home.md @@ -32,8 +32,8 @@ Check out [**Live Demo**](https://vladmandic.github.io/human/demo/index.html) fo - [**Code Repository**](https://github.com/vladmandic/human) - [**NPM Package**](https://www.npmjs.com/package/@vladmandic/human) - [**Issues Tracker**](https://github.com/vladmandic/human/issues) -- [**API Specification: Human**](https://vladmandic.github.io/human/typedoc/classes/human.html) -- [**API Specification: Root**](https://vladmandic.github.io/human/typedoc/) +- [**TypeDoc API Specification: Human**](https://vladmandic.github.io/human/typedoc/classes/human.html) +- [**TypeDoc API Specification: Root**](https://vladmandic.github.io/human/typedoc/) - [**Change Log**](https://github.com/vladmandic/human/blob/main/CHANGELOG.md) ## Wiki pages diff --git a/Usage.md b/Usage.md index e5c330b..a296460 100644 --- a/Usage.md +++ b/Usage.md @@ -76,7 +76,7 @@ Additional helper functions inside `human.draw`: human.draw.gesture(canvas, result.gesture) // draw detected gesture results to canvas ``` -Style of drawing is configurable via `human.draw.drawOptions` object: +Style of drawing is configurable via `human.draw.options` object: ```js color: 'rgba(173, 216, 230, 0.3)', // 'lightblue' with light alpha channel