diff --git a/CHANGELOG.md b/CHANGELOG.md
index f07df2ac..737b367e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -9,11 +9,13 @@
## Changelog
+### **HEAD -> main** 2021/09/24 mandic00@live.com
+
+- new release
+
### **2.2.3** 2021/09/24 mandic00@live.com
-
-### **origin/main** 2021/09/23 mandic00@live.com
-
+- optimize model loading
- support segmentation for nodejs
- redo segmentation and handtracking
- prototype handtracking
diff --git a/README.md b/README.md
index 36e3e28c..f96998ae 100644
--- a/README.md
+++ b/README.md
@@ -42,6 +42,7 @@ Check out [**Live Demo**](https://vladmandic.github.io/human/demo/index.html) ap
- [*Live:* **Face Extraction and 3D Rendering**](https://vladmandic.github.io/human/demo/face3d/index.html)
- [*Live:* **Multithreaded Detection Showcasing Maximum Performance**](https://vladmandic.github.io/human/demo/multithread/index.html)
- [*Live:* **VR Model with Head, Face, Eye, Body and Hand tracking**](https://vladmandic.github.io/human-vrm/src/human-vrm.html)
+- [Examples galery](https://vladmandic.github.io/human/samples/samples.html)
## Project pages
@@ -75,6 +76,7 @@ Check out [**Live Demo**](https://vladmandic.github.io/human/demo/index.html) ap
- [**Platform Support**](https://github.com/vladmandic/human/wiki/Platforms)
- [**Diagnostic and Performance trace information**](https://github.com/vladmandic/human/wiki/Diag)
- [**List of Models & Credits**](https://github.com/vladmandic/human/wiki/Models)
+- [**Models Download Repository**](https://github.com/vladmandic/human-models)
- [**Security & Privacy Policy**](https://github.com/vladmandic/human/blob/main/SECURITY.md)
- [**License & Usage Restrictions**](https://github.com/vladmandic/human/blob/main/LICENSE)
@@ -86,6 +88,15 @@ Check out [**Live Demo**](https://vladmandic.github.io/human/demo/index.html) ap
+## Examples
+
+Visit [Examples galery](https://vladmandic.github.io/human/samples/samples.html) for more examples
+
+
+
+
+
+
## Options
All options as presented in the demo application...
@@ -95,52 +106,15 @@ All options as presented in the demo application...
-## Examples
-
-
-
-**Face Close-up:**
-
-
-
-
-**Face under a high angle:**
-
-
-
-
-**Full Person Details:**
-
-
-
-
-**Pose Detection:**
-
-
-
-
-**Body Segmentation and Background Replacement:**
-
-
-
-
-**Large Group:**
-
-
-
-
-**VR Model Tracking:**
-
-
-
-
**Results Browser:**
[ *Demo -> Display -> Show Results* ]

-**Face Similarity Matching:**
+## Advanced Examples
+
+1. **Face Similarity Matching:**
Extracts all faces from provided input images,
sorts them by similarity to selected face
and optionally matches detected face with database of known people to guess their names
@@ -150,13 +124,18 @@ and optionally matches detected face with database of known people to guess thei
-**Face3D OpenGL Rendering:**
+2. **Face3D OpenGL Rendering:**
> [demo/face3d](demo/face3d/index.html)

+3. **VR Model Tracking:**
+
+
+
+
**468-Point Face Mesh Defails:**
(view in full resolution to see keypoints)
diff --git a/assets/samples.jpg b/assets/samples.jpg
new file mode 100755
index 00000000..78cdd902
Binary files /dev/null and b/assets/samples.jpg differ
diff --git a/demo/nodejs/node-canvas.js b/demo/nodejs/node-canvas.js
index 3fa83e04..8637cfc2 100644
--- a/demo/nodejs/node-canvas.js
+++ b/demo/nodejs/node-canvas.js
@@ -12,7 +12,7 @@ const Human = require('../../dist/human.node.js'); // this is 'const Human = req
const config = { // just enable all and leave default settings
debug: false,
face: { enabled: true }, // includes mesh, iris, emotion, descriptor
- hand: { enabled: true },
+ hand: { enabled: true, maxDetected: 2, minConfidence: 0.5, detector: { modelPath: 'handtrack.json' } }, // use alternative hand model
body: { enabled: true },
object: { enabled: true },
gestures: { enabled: true },
diff --git a/package.json b/package.json
index c4fb57cd..6cb4ecbc 100644
--- a/package.json
+++ b/package.json
@@ -66,14 +66,14 @@
"@tensorflow/tfjs-layers": "^3.9.0",
"@tensorflow/tfjs-node": "^3.9.0",
"@tensorflow/tfjs-node-gpu": "^3.9.0",
- "@types/node": "^16.9.6",
+ "@types/node": "^16.10.1",
"@typescript-eslint/eslint-plugin": "^4.31.2",
"@typescript-eslint/parser": "^4.31.2",
"@vladmandic/build": "^0.5.3",
"@vladmandic/pilogger": "^0.3.3",
"canvas": "^2.8.0",
"dayjs": "^1.10.7",
- "esbuild": "^0.13.0",
+ "esbuild": "^0.13.2",
"eslint": "^7.32.0",
"eslint-config-airbnb-base": "^14.2.1",
"eslint-plugin-import": "^2.24.2",
diff --git a/samples/README.md b/samples/README.md
index 168a8b79..4b96a893 100644
--- a/samples/README.md
+++ b/samples/README.md
@@ -2,3 +2,11 @@
Sample Images used by `Human` library demos and automated tests
Not required for normal funcioning of library
+
+Samples were generated using default configuration without any fine-tuning using command:
+
+```shell
+node test/test-node-canvas.js samples/in/ samples/out/
+```
+
+Samples galery viewer:
diff --git a/samples/ai-body.jpg b/samples/in/ai-body.jpg
similarity index 100%
rename from samples/ai-body.jpg
rename to samples/in/ai-body.jpg
diff --git a/samples/ai-face.jpg b/samples/in/ai-face.jpg
similarity index 100%
rename from samples/ai-face.jpg
rename to samples/in/ai-face.jpg
diff --git a/samples/ai-upper.jpg b/samples/in/ai-upper.jpg
similarity index 100%
rename from samples/ai-upper.jpg
rename to samples/in/ai-upper.jpg
diff --git a/samples/groups/group1.jpg b/samples/in/group-1.jpg
similarity index 100%
rename from samples/groups/group1.jpg
rename to samples/in/group-1.jpg
diff --git a/samples/groups/group2.jpg b/samples/in/group-2.jpg
similarity index 100%
rename from samples/groups/group2.jpg
rename to samples/in/group-2.jpg
diff --git a/samples/groups/group3.jpg b/samples/in/group-3.jpg
similarity index 100%
rename from samples/groups/group3.jpg
rename to samples/in/group-3.jpg
diff --git a/samples/groups/group14.jpg b/samples/in/group-4.jpg
similarity index 100%
rename from samples/groups/group14.jpg
rename to samples/in/group-4.jpg
diff --git a/samples/groups/group13.jpg b/samples/in/group-5.jpg
similarity index 100%
rename from samples/groups/group13.jpg
rename to samples/in/group-5.jpg
diff --git a/samples/groups/group6.jpg b/samples/in/group-6.jpg
similarity index 100%
rename from samples/groups/group6.jpg
rename to samples/in/group-6.jpg
diff --git a/samples/groups/group7.jpg b/samples/in/group-7.jpg
similarity index 100%
rename from samples/groups/group7.jpg
rename to samples/in/group-7.jpg
diff --git a/samples/in/person-celeste.jpg b/samples/in/person-celeste.jpg
new file mode 100644
index 00000000..b8800ff0
Binary files /dev/null and b/samples/in/person-celeste.jpg differ
diff --git a/samples/in/person-linda.jpg b/samples/in/person-linda.jpg
new file mode 100644
index 00000000..46b8439c
Binary files /dev/null and b/samples/in/person-linda.jpg differ
diff --git a/samples/in/person-tetiana.jpg b/samples/in/person-tetiana.jpg
new file mode 100644
index 00000000..5f285319
Binary files /dev/null and b/samples/in/person-tetiana.jpg differ
diff --git a/samples/in/person-vlado.jpg b/samples/in/person-vlado.jpg
new file mode 100644
index 00000000..ee7ea6c6
Binary files /dev/null and b/samples/in/person-vlado.jpg differ
diff --git a/samples/out/ai-body.jpg b/samples/out/ai-body.jpg
new file mode 100644
index 00000000..2ed46162
Binary files /dev/null and b/samples/out/ai-body.jpg differ
diff --git a/samples/out/ai-face.jpg b/samples/out/ai-face.jpg
new file mode 100644
index 00000000..989ce2a7
Binary files /dev/null and b/samples/out/ai-face.jpg differ
diff --git a/samples/out/ai-upper.jpg b/samples/out/ai-upper.jpg
new file mode 100644
index 00000000..cd6d2a3d
Binary files /dev/null and b/samples/out/ai-upper.jpg differ
diff --git a/samples/out/group-1.jpg b/samples/out/group-1.jpg
new file mode 100644
index 00000000..ceac1af3
Binary files /dev/null and b/samples/out/group-1.jpg differ
diff --git a/samples/out/group-2.jpg b/samples/out/group-2.jpg
new file mode 100644
index 00000000..8b1e9344
Binary files /dev/null and b/samples/out/group-2.jpg differ
diff --git a/samples/out/group-3.jpg b/samples/out/group-3.jpg
new file mode 100644
index 00000000..6f2cd091
Binary files /dev/null and b/samples/out/group-3.jpg differ
diff --git a/samples/out/group-4.jpg b/samples/out/group-4.jpg
new file mode 100644
index 00000000..1bd2d4e4
Binary files /dev/null and b/samples/out/group-4.jpg differ
diff --git a/samples/out/group-5.jpg b/samples/out/group-5.jpg
new file mode 100644
index 00000000..66a44079
Binary files /dev/null and b/samples/out/group-5.jpg differ
diff --git a/samples/out/group-6.jpg b/samples/out/group-6.jpg
new file mode 100644
index 00000000..60a82fa8
Binary files /dev/null and b/samples/out/group-6.jpg differ
diff --git a/samples/out/group-7.jpg b/samples/out/group-7.jpg
new file mode 100644
index 00000000..10ca4fea
Binary files /dev/null and b/samples/out/group-7.jpg differ
diff --git a/samples/out/person-celeste.jpg b/samples/out/person-celeste.jpg
new file mode 100644
index 00000000..5d796e99
Binary files /dev/null and b/samples/out/person-celeste.jpg differ
diff --git a/samples/out/person-linda.jpg b/samples/out/person-linda.jpg
new file mode 100644
index 00000000..a3cd05a8
Binary files /dev/null and b/samples/out/person-linda.jpg differ
diff --git a/samples/out/person-tetiana.jpg b/samples/out/person-tetiana.jpg
new file mode 100644
index 00000000..abdf4bbb
Binary files /dev/null and b/samples/out/person-tetiana.jpg differ
diff --git a/samples/out/person-vlado.jpg b/samples/out/person-vlado.jpg
new file mode 100644
index 00000000..3b948a38
Binary files /dev/null and b/samples/out/person-vlado.jpg differ
diff --git a/samples/people/christina.jpg b/samples/people/christina.jpg
deleted file mode 100644
index b19f506f..00000000
Binary files a/samples/people/christina.jpg and /dev/null differ
diff --git a/samples/people/lauren.jpg b/samples/people/lauren.jpg
deleted file mode 100644
index 8cf7a4d4..00000000
Binary files a/samples/people/lauren.jpg and /dev/null differ
diff --git a/samples/people/lexi.jpg b/samples/people/lexi.jpg
deleted file mode 100644
index b262b757..00000000
Binary files a/samples/people/lexi.jpg and /dev/null differ
diff --git a/samples/people/tasia.jpg b/samples/people/tasia.jpg
deleted file mode 100644
index 772a0990..00000000
Binary files a/samples/people/tasia.jpg and /dev/null differ
diff --git a/samples/people/vlado1.jpg b/samples/people/vlado1.jpg
deleted file mode 100644
index d759e447..00000000
Binary files a/samples/people/vlado1.jpg and /dev/null differ
diff --git a/samples/people/vlado5.jpg b/samples/people/vlado5.jpg
deleted file mode 100644
index d0b87746..00000000
Binary files a/samples/people/vlado5.jpg and /dev/null differ
diff --git a/samples/samples.html b/samples/samples.html
new file mode 100644
index 00000000..aa8c310c
--- /dev/null
+++ b/samples/samples.html
@@ -0,0 +1,57 @@
+
+
+
+ Human Examples Gallery
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Human Examples Gallery
+
+
+

+
+
+
+
diff --git a/src/blazeface/coords.ts b/src/blazeface/coords.ts
index 552c168b..6f420fd9 100644
--- a/src/blazeface/coords.ts
+++ b/src/blazeface/coords.ts
@@ -1,3 +1,8 @@
+/**
+ * BlazeFace, FaceMesh & Iris model implementation
+ * See `facemesh.ts` for entry point
+ */
+
export const MESH_ANNOTATIONS = {
silhouette: [
10, 338, 297, 332, 284, 251, 389, 356, 454, 323, 361, 288,
diff --git a/src/draw.ts b/src/draw.ts
index b1144e72..5a1e4506 100644
--- a/src/draw.ts
+++ b/src/draw.ts
@@ -3,7 +3,7 @@
*/
import { TRI468 as triangulation } from './blazeface/coords';
-import { mergeDeep, now } from './helpers';
+import { mergeDeep, now } from './util';
import type { Result, FaceResult, BodyResult, HandResult, ObjectResult, GestureResult, PersonResult } from './result';
/**
diff --git a/src/efficientpose/efficientpose.ts b/src/efficientpose/efficientpose.ts
index caa412b0..b647808c 100644
--- a/src/efficientpose/efficientpose.ts
+++ b/src/efficientpose/efficientpose.ts
@@ -1,8 +1,10 @@
/**
- * EfficientPose Module
+ * EfficientPose model implementation
+ *
+ * Based on: [**EfficientPose**](https://github.com/daniegr/EfficientPose)
*/
-import { log, join } from '../helpers';
+import { log, join } from '../util';
import * as tf from '../../dist/tfjs.esm.js';
import type { BodyResult } from '../result';
import type { GraphModel, Tensor } from '../tfjs/types';
diff --git a/src/emotion/emotion.ts b/src/emotion/emotion.ts
index d3547779..f8012106 100644
--- a/src/emotion/emotion.ts
+++ b/src/emotion/emotion.ts
@@ -1,8 +1,10 @@
/**
- * Emotion Module
+ * Emotion model implementation
+ *
+ * [**Oarriaga**](https://github.com/oarriaga/face_classification)
*/
-import { log, join } from '../helpers';
+import { log, join } from '../util';
import type { Config } from '../config';
import type { GraphModel, Tensor } from '../tfjs/types';
import * as tf from '../../dist/tfjs.esm.js';
diff --git a/src/env.ts b/src/env.ts
index cbb0f5f4..9b71891c 100644
--- a/src/env.ts
+++ b/src/env.ts
@@ -1,6 +1,6 @@
import * as tf from '../dist/tfjs.esm.js';
import * as image from './image/image';
-import { mergeDeep } from './helpers';
+import { mergeDeep } from './util';
export type Env = {
browser: undefined | boolean,
diff --git a/src/face.ts b/src/face.ts
index f1835940..dc3b3adb 100644
--- a/src/face.ts
+++ b/src/face.ts
@@ -1,9 +1,9 @@
/**
- * Module that analyzes person age
- * Obsolete
+ * Face algorithm implementation
+ * Uses FaceMesh, Emotion and FaceRes models to create a unified pipeline
*/
-import { log, now } from './helpers';
+import { log, now } from './util';
import * as tf from '../dist/tfjs.esm.js';
import * as facemesh from './blazeface/facemesh';
import * as emotion from './emotion/emotion';
diff --git a/src/faceres/faceres.ts b/src/faceres/faceres.ts
index 770e994f..249e4962 100644
--- a/src/faceres/faceres.ts
+++ b/src/faceres/faceres.ts
@@ -1,10 +1,13 @@
/**
- * HSE-FaceRes Module
+ * FaceRes model implementation
+ *
* Returns Age, Gender, Descriptor
* Implements Face simmilarity function
+ *
+ * Based on: [**HSE-FaceRes**](https://github.com/HSE-asavchenko/HSE_FaceRec_tf)
*/
-import { log, join } from '../helpers';
+import { log, join } from '../util';
import * as tf from '../../dist/tfjs.esm.js';
import type { Tensor, GraphModel } from '../tfjs/types';
import type { Config } from '../config';
diff --git a/src/fingerpose/estimator.ts b/src/fingerpose/estimator.ts
index 8e3275e4..8c2c2884 100644
--- a/src/fingerpose/estimator.ts
+++ b/src/fingerpose/estimator.ts
@@ -1,3 +1,8 @@
+/**
+ * FingerPose algorithm implementation
+ * See `fingerpose.ts` for entry point
+ */
+
import { Finger, FingerCurl, FingerDirection } from './description';
const options = {
diff --git a/src/fingerpose/gesture.ts b/src/fingerpose/gesture.ts
index b56a8fbf..6ce56751 100644
--- a/src/fingerpose/gesture.ts
+++ b/src/fingerpose/gesture.ts
@@ -1,3 +1,8 @@
+/**
+ * FingerPose algorithm implementation
+ * See `fingerpose.ts` for entry point
+ */
+
export default class Gesture {
name;
curls;
diff --git a/src/fingerpose/gestures.ts b/src/fingerpose/gestures.ts
index 58049dca..f413d571 100644
--- a/src/fingerpose/gestures.ts
+++ b/src/fingerpose/gestures.ts
@@ -1,3 +1,8 @@
+/**
+ * FingerPose algorithm implementation
+ * See `fingerpose.ts` for entry point
+ */
+
import { Finger, FingerCurl, FingerDirection } from './description';
import Gesture from './gesture';
diff --git a/src/gesture/gesture.ts b/src/gesture/gesture.ts
index 7964d3e0..1609ea38 100644
--- a/src/gesture/gesture.ts
+++ b/src/gesture/gesture.ts
@@ -1,5 +1,5 @@
/**
- * Gesture detection module
+ * Gesture detection algorithm
*/
import type { GestureResult } from '../result';
diff --git a/src/handpose/anchors.ts b/src/handpose/anchors.ts
index 6a47ffa0..11d3602f 100644
--- a/src/handpose/anchors.ts
+++ b/src/handpose/anchors.ts
@@ -1,3 +1,8 @@
+/**
+ * HandPose model implementation constants
+ * See `handpose.ts` for entry point
+ */
+
export const anchors = [
{ x: 0.015625, y: 0.015625 },
{ x: 0.015625, y: 0.015625 },
diff --git a/src/handpose/box.ts b/src/handpose/box.ts
index 9cf034e7..79e4ca18 100644
--- a/src/handpose/box.ts
+++ b/src/handpose/box.ts
@@ -1,3 +1,8 @@
+/**
+ * HandPose model implementation
+ * See `handpose.ts` for entry point
+ */
+
import * as tf from '../../dist/tfjs.esm.js';
export function getBoxSize(box) {
diff --git a/src/handpose/handdetector.ts b/src/handpose/handdetector.ts
index ddd83af9..f78c9d26 100644
--- a/src/handpose/handdetector.ts
+++ b/src/handpose/handdetector.ts
@@ -1,3 +1,8 @@
+/**
+ * HandPose model implementation
+ * See `handpose.ts` for entry point
+ */
+
import * as tf from '../../dist/tfjs.esm.js';
import * as box from './box';
import * as anchors from './anchors';
diff --git a/src/handpose/handpipeline.ts b/src/handpose/handpipeline.ts
index cf07fdeb..57be4a2c 100644
--- a/src/handpose/handpipeline.ts
+++ b/src/handpose/handpipeline.ts
@@ -1,3 +1,8 @@
+/**
+ * HandPose model implementation
+ * See `handpose.ts` for entry point
+ */
+
import * as tf from '../../dist/tfjs.esm.js';
import * as box from './box';
import * as util from './util';
diff --git a/src/handpose/handpose.ts b/src/handpose/handpose.ts
index cecd72db..20e5bed4 100644
--- a/src/handpose/handpose.ts
+++ b/src/handpose/handpose.ts
@@ -1,8 +1,10 @@
/**
- * HandPose module entry point
+ * HandPose model implementation
+ *
+ * Based on: [**MediaPipe HandPose**](https://drive.google.com/file/d/1sv4sSb9BSNVZhLzxXJ0jBv9DqD-4jnAz/view)
*/
-import { log, join } from '../helpers';
+import { log, join } from '../util';
import * as tf from '../../dist/tfjs.esm.js';
import * as handdetector from './handdetector';
import * as handpipeline from './handpipeline';
diff --git a/src/handtrack/handtrack.ts b/src/handtrack/handtrack.ts
index 555cab23..4fce40e0 100644
--- a/src/handtrack/handtrack.ts
+++ b/src/handtrack/handtrack.ts
@@ -1,8 +1,12 @@
/**
- * Hand Detection and Segmentation
+ * HandTrack model implementation
+ *
+ * Based on:
+ * - Hand Detection & Skeleton: [**MediaPipe HandPose**](https://drive.google.com/file/d/1sv4sSb9BSNVZhLzxXJ0jBv9DqD-4jnAz/view)
+ * - Hand Tracking: [**HandTracking**](https://github.com/victordibia/handtracking)
*/
-import { log, join } from '../helpers';
+import { log, join } from '../util';
import * as tf from '../../dist/tfjs.esm.js';
import type { HandResult } from '../result';
import type { GraphModel, Tensor } from '../tfjs/types';
diff --git a/src/human.ts b/src/human.ts
index 5a663f0e..561404ed 100644
--- a/src/human.ts
+++ b/src/human.ts
@@ -2,7 +2,7 @@
* Human main module
*/
-import { log, now, mergeDeep, validate } from './helpers';
+import { log, now, mergeDeep, validate } from './util';
import { Config, defaults } from './config';
import type { Result, FaceResult, HandResult, BodyResult, ObjectResult, GestureResult, PersonResult } from './result';
import * as tf from '../dist/tfjs.esm.js';
@@ -168,7 +168,6 @@ export class Human {
this.config = JSON.parse(JSON.stringify(defaults));
Object.seal(this.config);
if (userConfig) this.config = mergeDeep(this.config, userConfig);
- validate(defaults, this.config);
this.tf = tf;
this.state = 'idle';
this.#numTensors = 0;
@@ -229,21 +228,25 @@ export class Human {
}
/** Reset configuration to default values */
- reset = () => {
+ reset() {
const currentBackend = this.config.backend; // save backend;
this.config = JSON.parse(JSON.stringify(defaults));
this.config.backend = currentBackend;
}
/** Validate current configuration schema */
- validate = (userConfig?: Partial) => validate(defaults, userConfig || this.config);
+ validate(userConfig?: Partial) {
+ return validate(defaults, userConfig || this.config);
+ }
/** Process input as return canvas and tensor
*
* @param input: {@link Input}
* @returns { tensor, canvas }
*/
- image = (input: Input) => image.process(input, this.config);
+ image(input: Input) {
+ return image.process(input, this.config);
+ }
/** Simmilarity method calculates simmilarity between two provided face descriptors (face embeddings)
* - Calculation is based on normalized Minkowski distance between two descriptors
diff --git a/src/image/image.ts b/src/image/image.ts
index 40c2b123..73e22f92 100644
--- a/src/image/image.ts
+++ b/src/image/image.ts
@@ -1,5 +1,5 @@
/**
- * Image Processing module used by Human
+ * Image Processing algorithm implementation
*/
import * as tf from '../../dist/tfjs.esm.js';
@@ -7,7 +7,7 @@ import * as fxImage from './imagefx';
import type { Tensor } from '../tfjs/types';
import type { Config } from '../config';
import { env } from '../env';
-import { log } from '../helpers';
+import { log } from '../util';
type Input = Tensor | ImageData | ImageBitmap | HTMLImageElement | HTMLMediaElement | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas | typeof Image | typeof env.Canvas;
@@ -84,11 +84,11 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
let targetHeight = originalHeight;
if (targetWidth > maxSize) {
targetWidth = maxSize;
- targetHeight = targetWidth * originalHeight / originalWidth;
+ targetHeight = Math.trunc(targetWidth * originalHeight / originalWidth);
}
if (targetHeight > maxSize) {
targetHeight = maxSize;
- targetWidth = targetHeight * originalWidth / originalHeight;
+ targetWidth = Math.trunc(targetHeight * originalWidth / originalHeight);
}
// create our canvas and resize it if needed
diff --git a/src/image/imagefx.ts b/src/image/imagefx.ts
index d0dd9f31..faa05129 100644
--- a/src/image/imagefx.ts
+++ b/src/image/imagefx.ts
@@ -1,6 +1,10 @@
-/*
-WebGLImageFilter by Dominic Szablewski:
-*/
+/**
+ * Image Filters in WebGL algoritm implementation
+ *
+ * Based on: [WebGLImageFilter](https://github.com/phoboslab/WebGLImageFilter)
+ *
+ * This module is written in ES5 JS and does not conform to code and style standards
+ */
// @ts-nocheck
diff --git a/src/interpolate.ts b/src/interpolate.ts
index 7aa4beed..43e3586d 100644
--- a/src/interpolate.ts
+++ b/src/interpolate.ts
@@ -1,5 +1,5 @@
/**
- * Module that interpolates results for smoother animations
+ * Results interpolation for smoothening of video detection results inbetween detected frames
*/
import type { Result, FaceResult, BodyResult, HandResult, ObjectResult, GestureResult, PersonResult } from './result';
diff --git a/src/models.ts b/src/models.ts
index 9e652e75..2e9595b2 100644
--- a/src/models.ts
+++ b/src/models.ts
@@ -1,4 +1,8 @@
-import { log } from './helpers';
+/**
+ * Loader and Validator for all models used by Human
+ */
+
+import { log } from './util';
import type { GraphModel } from './tfjs/types';
import * as facemesh from './blazeface/facemesh';
import * as faceres from './faceres/faceres';
diff --git a/src/movenet/movenet.ts b/src/movenet/movenet.ts
index 6fe1c026..9e1ce554 100644
--- a/src/movenet/movenet.ts
+++ b/src/movenet/movenet.ts
@@ -1,8 +1,10 @@
/**
- * EfficientPose Module
+ * MoveNet model implementation
+ *
+ * Based on: [**MoveNet**](https://blog.tensorflow.org/2021/05/next-generation-pose-detection-with-movenet-and-tensorflowjs.html)
*/
-import { log, join } from '../helpers';
+import { log, join } from '../util';
import * as tf from '../../dist/tfjs.esm.js';
import type { BodyResult } from '../result';
import type { GraphModel, Tensor } from '../tfjs/types';
diff --git a/src/object/centernet.ts b/src/object/centernet.ts
index f12c0631..b3a9355c 100644
--- a/src/object/centernet.ts
+++ b/src/object/centernet.ts
@@ -1,8 +1,10 @@
/**
- * CenterNet object detection module
+ * CenterNet object detection model implementation
+ *
+ * Based on: [**NanoDet**](https://github.com/RangiLyu/nanodet)
*/
-import { log, join } from '../helpers';
+import { log, join } from '../util';
import * as tf from '../../dist/tfjs.esm.js';
import { labels } from './labels';
import type { ObjectResult } from '../result';
diff --git a/src/object/labels.ts b/src/object/labels.ts
index 055fdf9f..86de19ca 100644
--- a/src/object/labels.ts
+++ b/src/object/labels.ts
@@ -1,5 +1,5 @@
/**
- * CoCo Labels used by object detection modules
+ * CoCo Labels used by object detection implementations
*/
export const labels = [
{ class: 1, label: 'person' },
diff --git a/src/object/nanodet.ts b/src/object/nanodet.ts
index eb21f396..fd491f9f 100644
--- a/src/object/nanodet.ts
+++ b/src/object/nanodet.ts
@@ -1,8 +1,10 @@
/**
- * NanoDet object detection module
+ * NanoDet object detection model implementation
+ *
+ * Based on: [**MB3-CenterNet**](https://github.com/610265158/mobilenetv3_centernet)
*/
-import { log, join } from '../helpers';
+import { log, join } from '../util';
import * as tf from '../../dist/tfjs.esm.js';
import { labels } from './labels';
import type { ObjectResult } from '../result';
diff --git a/src/persons.ts b/src/persons.ts
index 6cfe79b2..edd9755c 100644
--- a/src/persons.ts
+++ b/src/persons.ts
@@ -1,5 +1,5 @@
/**
- * Module that analyzes existing results and recombines them into a unified person object
+ * Analyze detection Results and sort&combine them into per-person view
*/
import type { FaceResult, BodyResult, HandResult, GestureResult, PersonResult } from './result';
diff --git a/src/posenet/poses.ts b/src/posenet/poses.ts
index 9a082f28..fcef4f25 100644
--- a/src/posenet/poses.ts
+++ b/src/posenet/poses.ts
@@ -1,3 +1,8 @@
+/**
+ * PoseNet body detection model implementation
+ * See `posenet.ts` for entry point
+ */
+
import * as utils from './utils';
import * as kpt from './keypoints';
diff --git a/src/posenet/utils.ts b/src/posenet/utils.ts
index cc34153d..a495a17e 100644
--- a/src/posenet/utils.ts
+++ b/src/posenet/utils.ts
@@ -1,3 +1,8 @@
+/**
+ * PoseNet body detection model implementation constants
+ * See `posenet.ts` for entry point
+ */
+
import * as kpt from './keypoints';
import type { BodyResult } from '../result';
diff --git a/src/profile.ts b/src/profile.ts
index a7a48460..eefecb79 100644
--- a/src/profile.ts
+++ b/src/profile.ts
@@ -1,8 +1,9 @@
/**
* Profiling calculations
+ * Debug only
*/
-import { log } from './helpers';
+import { log } from './util';
export const data = {};
diff --git a/src/segmentation/segmentation.ts b/src/segmentation/segmentation.ts
index 3adf9487..7f78163f 100644
--- a/src/segmentation/segmentation.ts
+++ b/src/segmentation/segmentation.ts
@@ -1,8 +1,12 @@
/**
- * EfficientPose Module
+ * Image segmentation for body detection model
+ *
+ * Based on:
+ * - [**MediaPipe Meet**](https://drive.google.com/file/d/1lnP1bRi9CSqQQXUHa13159vLELYDgDu0/preview)
+ * - [**MediaPipe Selfie**](https://drive.google.com/file/d/1dCfozqknMa068vVsO2j_1FgZkW_e3VWv/preview)
*/
-import { log, join } from '../helpers';
+import { log, join } from '../util';
import * as tf from '../../dist/tfjs.esm.js';
import * as image from '../image/image';
import type { GraphModel, Tensor } from '../tfjs/types';
diff --git a/src/age/age.ts b/src/ssrnet/age.ts
similarity index 88%
rename from src/age/age.ts
rename to src/ssrnet/age.ts
index 36a5d5d3..48ffa4fb 100644
--- a/src/age/age.ts
+++ b/src/ssrnet/age.ts
@@ -1,9 +1,12 @@
/**
- * Module that analyzes person age
- * Obsolete
+ * Age model implementation
+ *
+ * Based on: [**SSR-Net**](https://github.com/shamangary/SSR-Net)
+ *
+ * Obsolete and replaced by `faceres` that performs age/gender/descriptor analysis
*/
-import { log, join } from '../helpers';
+import { log, join } from '../util';
import * as tf from '../../dist/tfjs.esm.js';
import type { Config } from '../config';
import type { GraphModel, Tensor } from '../tfjs/types';
diff --git a/src/gender/gender.ts b/src/ssrnet/gender.ts
similarity index 94%
rename from src/gender/gender.ts
rename to src/ssrnet/gender.ts
index e432db10..a9b1d91b 100644
--- a/src/gender/gender.ts
+++ b/src/ssrnet/gender.ts
@@ -1,9 +1,12 @@
/**
- * Module that analyzes person gender
- * Obsolete
+ * Gender model implementation
+ *
+ * Based on: [**SSR-Net**](https://github.com/shamangary/SSR-Net)
+ *
+ * Obsolete and replaced by `faceres` that performs age/gender/descriptor analysis
*/
-import { log, join } from '../helpers';
+import { log, join } from '../util';
import * as tf from '../../dist/tfjs.esm.js';
import type { Config } from '../config';
import type { GraphModel, Tensor } from '../tfjs/types';
diff --git a/src/tfjs/backend.ts b/src/tfjs/backend.ts
index eb1d8102..7251b97e 100644
--- a/src/tfjs/backend.ts
+++ b/src/tfjs/backend.ts
@@ -1,4 +1,6 @@
-import { log, now } from '../helpers';
+/** TFJS backend initialization and customization */
+
+import { log, now } from '../util';
import * as humangl from './humangl';
import * as env from '../env';
import * as tf from '../../dist/tfjs.esm.js';
diff --git a/src/tfjs/humangl.ts b/src/tfjs/humangl.ts
index f4d5fb53..e3b8295e 100644
--- a/src/tfjs/humangl.ts
+++ b/src/tfjs/humangl.ts
@@ -1,9 +1,6 @@
-/**
- * Custom TFJS backend for Human based on WebGL
- * Not used by default
- */
+/** TFJS custom backend registration */
-import { log } from '../helpers';
+import { log } from '../util';
import * as tf from '../../dist/tfjs.esm.js';
import * as image from '../image/image';
import * as models from '../models';
diff --git a/src/tfjs/types.ts b/src/tfjs/types.ts
index d4937c22..45caaa78 100644
--- a/src/tfjs/types.ts
+++ b/src/tfjs/types.ts
@@ -1,6 +1,4 @@
-/**
- * Export common TensorFlow types
- */
+/** TFJS common types exports */
/**
* TensorFlow Tensor type
diff --git a/src/helpers.ts b/src/util.ts
similarity index 100%
rename from src/helpers.ts
rename to src/util.ts
diff --git a/src/warmup.ts b/src/warmup.ts
index 60e094fa..c19e7c0c 100644
--- a/src/warmup.ts
+++ b/src/warmup.ts
@@ -1,4 +1,8 @@
-import { log, now, mergeDeep } from './helpers';
+/**
+ * Warmup algorithm that uses embedded images to excercise loaded models for faster future inference
+ */
+
+import { log, now, mergeDeep } from './util';
import * as sample from './sample';
import * as tf from '../dist/tfjs.esm.js';
import * as image from './image/image';
diff --git a/test/test-main.js b/test/test-main.js
index 1c8cd97c..f9d494ea 100644
--- a/test/test-main.js
+++ b/test/test-main.js
@@ -196,7 +196,7 @@ async function test(Human, inputConfig) {
human.reset();
config.async = true;
config.cacheSensitivity = 0;
- res = await testDetect(human, 'samples/ai-body.jpg', 'default');
+ res = await testDetect(human, 'samples/in/ai-body.jpg', 'default');
if (!res || res?.face?.length !== 1 || res?.face[0].gender !== 'female') log('error', 'failed: default result face mismatch', res?.face?.length, res?.body?.length, res?.hand?.length, res?.gesture?.length);
else log('state', 'passed: default result face match');
@@ -205,13 +205,13 @@ async function test(Human, inputConfig) {
human.reset();
config.async = false;
config.cacheSensitivity = 0;
- res = await testDetect(human, 'samples/ai-body.jpg', 'default');
+ res = await testDetect(human, 'samples/in/ai-body.jpg', 'default');
if (!res || res?.face?.length !== 1 || res?.face[0].gender !== 'female') log('error', 'failed: default sync', res?.face?.length, res?.body?.length, res?.hand?.length, res?.gesture?.length);
else log('state', 'passed: default sync');
// test image processing
const img1 = await human.image(null);
- const img2 = await human.image(await getImage(human, 'samples/ai-face.jpg'));
+ const img2 = await human.image(await getImage(human, 'samples/in/ai-face.jpg'));
if (!img1 || !img2 || img1.tensor !== null || img2.tensor?.shape?.length !== 4) log('error', 'failed: image input', img1?.tensor?.shape, img2?.tensor?.shape);
else log('state', 'passed: image input', img1?.tensor?.shape, img2?.tensor?.shape);
@@ -225,9 +225,9 @@ async function test(Human, inputConfig) {
human.reset();
config.async = false;
config.cacheSensitivity = 0;
- let res1 = await testDetect(human, 'samples/ai-face.jpg', 'default');
- let res2 = await testDetect(human, 'samples/ai-body.jpg', 'default');
- let res3 = await testDetect(human, 'samples/ai-upper.jpg', 'default');
+ let res1 = await testDetect(human, 'samples/in/ai-face.jpg', 'default');
+ let res2 = await testDetect(human, 'samples/in/ai-body.jpg', 'default');
+ let res3 = await testDetect(human, 'samples/in/ai-upper.jpg', 'default');
const desc1 = res1 && res1.face && res1.face[0] && res1.face[0].embedding ? [...res1.face[0].embedding] : null;
const desc2 = res2 && res2.face && res2.face[0] && res2.face[0].embedding ? [...res2.face[0].embedding] : null;
const desc3 = res3 && res3.face && res3.face[0] && res3.face[0].embedding ? [...res3.face[0].embedding] : null;
@@ -257,7 +257,7 @@ async function test(Human, inputConfig) {
log('info', 'test object');
human.reset();
config.object = { enabled: true };
- res = await testDetect(human, 'samples/ai-body.jpg', 'default');
+ res = await testDetect(human, 'samples/in/ai-body.jpg', 'default');
if (!res || res?.object?.length !== 1 || res?.object[0]?.label !== 'person') log('error', 'failed: object result mismatch', res?.object?.length);
else log('state', 'passed: object result match');
@@ -268,7 +268,7 @@ async function test(Human, inputConfig) {
config.face = { detector: { minConfidence: 0.0001, maxDetected: 1 } };
config.body = { minConfidence: 0.0001, maxDetected: 1 };
config.hand = { minConfidence: 0.0001, maxDetected: 3 };
- res = await testDetect(human, 'samples/ai-body.jpg', 'default');
+ res = await testDetect(human, 'samples/in/ai-body.jpg', 'default');
if (!res || res?.face?.length !== 1 || res?.body?.length !== 1 || res?.hand?.length !== 3 || res?.gesture?.length !== 9) log('error', 'failed: sensitive result mismatch', res?.face?.length, res?.body?.length, res?.hand?.length, res?.gesture?.length);
else log('state', 'passed: sensitive result match');
@@ -293,7 +293,7 @@ async function test(Human, inputConfig) {
human.reset();
config.face = { mesh: { enabled: false }, iris: { enabled: false }, description: { enabled: false }, emotion: { enabled: false } };
config.hand = { landmarks: false };
- res = await testDetect(human, 'samples/ai-body.jpg', 'default');
+ res = await testDetect(human, 'samples/in/ai-body.jpg', 'default');
if (!res || res?.face?.length !== 1 || res?.face[0]?.gender || res?.face[0]?.age || res?.face[0]?.embedding) log('error', 'failed: detectors result face mismatch', res?.face);
else log('state', 'passed: detector result face match');
if (!res || res?.hand?.length !== 1 || res?.hand[0]?.landmarks) log('error', 'failed: detectors result hand mismatch', res?.hand?.length);
@@ -302,22 +302,22 @@ async function test(Human, inputConfig) {
// test posenet and movenet
log('info', 'test body variants');
config.body = { modelPath: 'posenet.json' };
- res = await testDetect(human, 'samples/ai-body.jpg', 'posenet');
+ res = await testDetect(human, 'samples/in/ai-body.jpg', 'posenet');
if (!res || res?.body?.length !== 1) log('error', 'failed: body posenet');
else log('state', 'passed: body posenet');
config.body = { modelPath: 'movenet-lightning.json' };
- res = await testDetect(human, 'samples/ai-body.jpg', 'movenet');
+ res = await testDetect(human, 'samples/in/ai-body.jpg', 'movenet');
if (!res || res?.body?.length !== 1) log('error', 'failed: body movenet');
else log('state', 'passed: body movenet');
// test handdetect and handtrack
log('info', 'test hand variants');
config.hand = { enabled: true, maxDetected: 2, minConfidence: 0.1, detector: { modelPath: 'handdetect.json' } };
- res = await testDetect(human, 'samples/ai-body.jpg', 'handdetect');
+ res = await testDetect(human, 'samples/in/ai-body.jpg', 'handdetect');
if (!res || res?.hand?.length !== 2) log('error', 'failed: hand handdetect');
else log('state', 'passed: hand handdetect');
config.hand = { enabled: true, maxDetected: 2, minConfidence: 0.1, detector: { modelPath: 'handtrack.json' } };
- res = await testDetect(human, 'samples/ai-body.jpg', 'handtrack');
+ res = await testDetect(human, 'samples/in/ai-body.jpg', 'handtrack');
if (!res || res?.hand?.length !== 2) log('error', 'failed: hand handdetect');
else log('state', 'passed: hand handdetect');
@@ -326,28 +326,28 @@ async function test(Human, inputConfig) {
const second = new Human(config);
await testDetect(human, null, 'default');
log('info', 'test: first instance');
- await testDetect(first, 'samples/ai-upper.jpg', 'default');
+ await testDetect(first, 'samples/in/ai-upper.jpg', 'default');
log('info', 'test: second instance');
- await testDetect(second, 'samples/ai-upper.jpg', 'default');
+ await testDetect(second, 'samples/in/ai-upper.jpg', 'default');
// test async multiple instances
log('info', 'test: concurrent');
await Promise.all([
- testDetect(human, 'samples/ai-face.jpg', 'default', false),
- testDetect(first, 'samples/ai-face.jpg', 'default', false),
- testDetect(second, 'samples/ai-face.jpg', 'default', false),
- testDetect(human, 'samples/ai-body.jpg', 'default', false),
- testDetect(first, 'samples/ai-body.jpg', 'default', false),
- testDetect(second, 'samples/ai-body.jpg', 'default', false),
- testDetect(human, 'samples/ai-upper.jpg', 'default', false),
- testDetect(first, 'samples/ai-upper.jpg', 'default', false),
- testDetect(second, 'samples/ai-upper.jpg', 'default', false),
+ testDetect(human, 'samples/in/ai-face.jpg', 'default', false),
+ testDetect(first, 'samples/in/ai-face.jpg', 'default', false),
+ testDetect(second, 'samples/in/ai-face.jpg', 'default', false),
+ testDetect(human, 'samples/in/ai-body.jpg', 'default', false),
+ testDetect(first, 'samples/in/ai-body.jpg', 'default', false),
+ testDetect(second, 'samples/in/ai-body.jpg', 'default', false),
+ testDetect(human, 'samples/in/ai-upper.jpg', 'default', false),
+ testDetect(first, 'samples/in/ai-upper.jpg', 'default', false),
+ testDetect(second, 'samples/in/ai-upper.jpg', 'default', false),
]);
// test monkey-patch
globalThis.Canvas = canvasJS.Canvas; // monkey-patch to use external canvas library
globalThis.ImageData = canvasJS.ImageData; // monkey-patch to use external canvas library
- const inputImage = await canvasJS.loadImage('samples/ai-face.jpg'); // load image using canvas library
+ const inputImage = await canvasJS.loadImage('samples/in/ai-face.jpg'); // load image using canvas library
const inputCanvas = new canvasJS.Canvas(inputImage.width, inputImage.height); // create canvas
const ctx = inputCanvas.getContext('2d');
ctx.drawImage(inputImage, 0, 0); // draw input image onto canvas
diff --git a/test/test-node-canvas.js b/test/test-node-canvas.js
new file mode 100644
index 00000000..0e0537f5
--- /dev/null
+++ b/test/test-node-canvas.js
@@ -0,0 +1,90 @@
+const fs = require('fs');
+const path = require('path');
+const process = require('process');
+const log = require('@vladmandic/pilogger');
+const canvas = require('canvas');
+const tf = require('@tensorflow/tfjs-node'); // for nodejs, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human
+const Human = require('../dist/human.node.js'); // this is 'const Human = require('../dist/human.node-gpu.js').default;'
+
+const config = { // just enable all and leave default settings
+ debug: true,
+ async: false,
+ cacheSensitivity: 0,
+ face: { enabled: true },
+ hand: { enabled: true },
+ body: { enabled: true },
+ object: { enabled: true },
+ gesture: { enabled: true },
+ /*
+ face: { enabled: true, detector: { minConfidence: 0.1 } },
+ hand: { enabled: true, maxDetected: 2, minConfidence: 0.1, detector: { modelPath: 'handtrack.json' } }, // use alternative hand model
+ body: { enabled: true, minConfidence: 0.1 },
+ object: { enabled: true, minConfidence: 0.1 },
+ gesture: { enabled: true },
+ */
+};
+
+async function main() {
+ log.header();
+
+ globalThis.Canvas = canvas.Canvas; // patch global namespace with canvas library
+ globalThis.ImageData = canvas.ImageData; // patch global namespace with canvas library
+
+ const human = new Human.Human(config); // create instance of human
+ log.info('Human:', human.version);
+ const configErrors = await human.validate();
+ if (configErrors.length > 0) log.error('Configuration errors:', configErrors);
+ await human.load(); // pre-load models
+ log.info('Loaded models:', Object.keys(human.models).filter((a) => human.models[a]));
+
+ const inDir = process.argv[2];
+ const outDir = process.argv[3];
+ if (process.argv.length !== 4) {
+ log.error('Parameters: missing');
+ return;
+ }
+ if (!fs.existsSync(inDir) || !fs.statSync(inDir).isDirectory() || !fs.existsSync(outDir) || !fs.statSync(outDir).isDirectory()) {
+ log.error('Invalid directory specified:', 'input:', fs.existsSync(inDir) ?? fs.statSync(inDir).isDirectory(), 'output:', fs.existsSync(outDir) ?? fs.statSync(outDir).isDirectory());
+ return;
+ }
+
+ const dir = fs.readdirSync(inDir);
+ const images = dir.filter((f) => fs.statSync(path.join(inDir, f)).isFile() && (f.toLocaleLowerCase().endsWith('.jpg') || f.toLocaleLowerCase().endsWith('.jpeg')));
+ log.info(`Processing folder: ${inDir} entries:`, dir.length, 'images', images.length);
+ for (const image of images) {
+ const inFile = path.join(inDir, image);
+ /*
+ const inputImage = await canvas.loadImage(inFile); // load image using canvas library
+ log.state('Loaded image:', inFile, inputImage.width, inputImage.height);
+ const inputCanvas = new canvas.Canvas(inputImage.width, inputImage.height); // create canvas
+ const inputCtx = inputCanvas.getContext('2d');
+ inputCtx.drawImage(inputImage, 0, 0); // draw input image onto canvas
+ */
+ const buffer = fs.readFileSync(inFile);
+ const tensor = human.tf.tidy(() => {
+ const decode = human.tf.node.decodeImage(buffer, 3);
+ const expand = human.tf.expandDims(decode, 0);
+ const cast = human.tf.cast(expand, 'float32');
+ return cast;
+ });
+ log.state('Loaded image:', inFile, tensor.shape);
+
+ const result = await human.detect(tensor);
+ tf.dispose(tensor);
+ log.data(`Detected: ${image}:`, 'Face:', result.face.length, 'Body:', result.body.length, 'Hand:', result.hand.length, 'Objects:', result.object.length, 'Gestures:', result.gesture.length);
+
+ const outputCanvas = new canvas.Canvas(tensor.shape[2], tensor.shape[1]); // create canvas
+ const outputCtx = outputCanvas.getContext('2d');
+ const inputImage = await canvas.loadImage(buffer); // load image using canvas library
+ outputCtx.drawImage(inputImage, 0, 0); // draw input image onto canvas
+ human.draw.all(outputCanvas, result); // use human build-in method to draw results as overlays on canvas
+ const outFile = path.join(outDir, image);
+ const outStream = fs.createWriteStream(outFile); // write canvas to new image file
+ outStream.on('finish', () => log.state('Output image:', outFile, outputCanvas.width, outputCanvas.height));
+ outStream.on('error', (err) => log.error('Output error:', outFile, err));
+ const stream = outputCanvas.createJPEGStream({ quality: 0.5, progressive: true, chromaSubsampling: true });
+ stream.pipe(outStream);
+ }
+}
+
+main();
diff --git a/wiki b/wiki
index a0497b6d..c4642bde 160000
--- a/wiki
+++ b/wiki
@@ -1 +1 @@
-Subproject commit a0497b6d14059099b2764b8f70390f4b6af8db9f
+Subproject commit c4642bde54506afd70a5fc32617414fa84b9fc0e