mirror of https://github.com/vladmandic/human
Compare commits
78 Commits
Author | SHA1 | Date |
---|---|---|
![]() |
a6fd9a41c1 | |
![]() |
7e7c6d2ea2 | |
![]() |
5208b9ec2d | |
![]() |
f515b9c20d | |
![]() |
5a51889edb | |
![]() |
745fd626a3 | |
![]() |
c1dc719a67 | |
![]() |
2b0a2fecc2 | |
![]() |
38922fe92d | |
![]() |
c80540a934 | |
![]() |
49b25830b4 | |
![]() |
df73c8247f | |
![]() |
dd186ab065 | |
![]() |
a2acfc433e | |
![]() |
644235433d | |
![]() |
42dfe18736 | |
![]() |
c5b7b43fca | |
![]() |
715210db51 | |
![]() |
9e2c612c1f | |
![]() |
862de3e6c8 | |
![]() |
1114014bfd | |
![]() |
001a3d58ea | |
![]() |
d7e66afe1f | |
![]() |
a2fedaba40 | |
![]() |
62396317f5 | |
![]() |
15a6de03de | |
![]() |
c55279ca82 | |
![]() |
6902405342 | |
![]() |
b0e6aa57de | |
![]() |
83964b02b1 | |
![]() |
9d1239301c | |
![]() |
709e5100d8 | |
![]() |
1ff7992563 | |
![]() |
6280f69299 | |
![]() |
c1bea7d585 | |
![]() |
957644e216 | |
![]() |
0e247768ff | |
![]() |
7b093c44d5 | |
![]() |
f0b7285d67 | |
![]() |
3e30aa6e42 | |
![]() |
ad54b34b07 | |
![]() |
d1bcd25b3d | |
![]() |
9a19d051a3 | |
![]() |
d1a3b3944e | |
![]() |
9dd8663e9e | |
![]() |
acf6bead21 | |
![]() |
73544e6c1b | |
![]() |
b72d592647 | |
![]() |
e72a7808fb | |
![]() |
e30d072ebf | |
![]() |
adbab08203 | |
![]() |
073c6c519d | |
![]() |
059ebe5e36 | |
![]() |
da3cf359fd | |
![]() |
c8571ad8e2 | |
![]() |
cca0102bbc | |
![]() |
97b6cb152c | |
![]() |
1bf65413fe | |
![]() |
770f433e1a | |
![]() |
fa908be5bb | |
![]() |
3aaea20eb4 | |
![]() |
eb53988f90 | |
![]() |
6fb4d04df3 | |
![]() |
870433ece2 | |
![]() |
e75bd0e26b | |
![]() |
bd994ffc77 | |
![]() |
22062e5b7c | |
![]() |
3191666d8d | |
![]() |
f82cdcc7f1 | |
![]() |
41e5541b5a | |
![]() |
35419b581e | |
![]() |
ddfc3c7e1b | |
![]() |
37f8175218 | |
![]() |
42217152f9 | |
![]() |
5de785558b | |
![]() |
ebc9c72567 | |
![]() |
cb3646652e | |
![]() |
5156b18f4f |
|
@ -1,9 +1,8 @@
|
|||
{
|
||||
"$schema": "https://developer.microsoft.com/json-schemas/api-extractor/v7/api-extractor.schema.json",
|
||||
"mainEntryPointFilePath": "types/lib/src/human.d.ts",
|
||||
"bundledPackages": ["@tensorflow/tfjs-core", "@tensorflow/tfjs-converter"],
|
||||
"compiler": {
|
||||
"skipLibCheck": false
|
||||
"skipLibCheck": true
|
||||
},
|
||||
"newlineKind": "lf",
|
||||
"dtsRollup": {
|
||||
|
|
|
@ -160,6 +160,15 @@
|
|||
"output": "demo/faceid/index.js",
|
||||
"sourcemap": true,
|
||||
"external": ["*/human.esm.js"]
|
||||
},
|
||||
{
|
||||
"name": "demo/tracker",
|
||||
"platform": "browser",
|
||||
"format": "esm",
|
||||
"input": "demo/tracker/index.ts",
|
||||
"output": "demo/tracker/index.js",
|
||||
"sourcemap": true,
|
||||
"external": ["*/human.esm.js"]
|
||||
}
|
||||
]
|
||||
},
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
{
|
||||
"globals": {},
|
||||
"globals": {
|
||||
},
|
||||
"rules": {
|
||||
"@typescript-eslint/no-require-imports":"off"
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"files": ["**/*.ts"],
|
||||
|
@ -34,6 +38,8 @@
|
|||
"@typescript-eslint/no-unsafe-call":"off",
|
||||
"@typescript-eslint/no-unsafe-member-access":"off",
|
||||
"@typescript-eslint/no-unsafe-return":"off",
|
||||
"@typescript-eslint/no-require-imports":"off",
|
||||
"@typescript-eslint/no-empty-object-type":"off",
|
||||
"@typescript-eslint/non-nullable-type-assertion-style":"off",
|
||||
"@typescript-eslint/prefer-for-of":"off",
|
||||
"@typescript-eslint/prefer-nullish-coalescing":"off",
|
||||
|
@ -155,9 +161,7 @@
|
|||
"node": false,
|
||||
"es2021": false
|
||||
},
|
||||
"extends": [
|
||||
"plugin:json/recommended"
|
||||
]
|
||||
"extends": []
|
||||
},
|
||||
{
|
||||
"files": ["**/*.html"],
|
||||
|
@ -173,6 +177,7 @@
|
|||
"extends": ["plugin:@html-eslint/recommended"],
|
||||
"rules": {
|
||||
"@html-eslint/element-newline":"off",
|
||||
"@html-eslint/attrs-newline":"off",
|
||||
"@html-eslint/indent": ["error", 2]
|
||||
}
|
||||
},
|
||||
|
@ -210,6 +215,7 @@
|
|||
"demo/helpers/*.js",
|
||||
"demo/typescript/*.js",
|
||||
"demo/faceid/*.js",
|
||||
"demo/tracker/*.js",
|
||||
"typedoc"
|
||||
]
|
||||
}
|
||||
|
|
|
@ -1,4 +1,9 @@
|
|||
node_modules/
|
||||
types/lib
|
||||
pnpm-lock.yaml
|
||||
package-lock.json
|
||||
*.swp
|
||||
samples/**/*.mp4
|
||||
samples/**/*.webm
|
||||
temp
|
||||
tmp
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
{
|
||||
"MD012": false,
|
||||
"MD013": false,
|
||||
"MD029": false,
|
||||
"MD033": false,
|
||||
"MD036": false,
|
||||
"MD041": false
|
||||
|
|
3
.npmrc
3
.npmrc
|
@ -1,4 +1,5 @@
|
|||
force=true
|
||||
production=true
|
||||
omit=dev
|
||||
legacy-peer-deps=true
|
||||
strict-peer-dependencies=false
|
||||
node-options='--no-deprecation'
|
||||
|
|
100
CHANGELOG.md
100
CHANGELOG.md
|
@ -1,6 +1,6 @@
|
|||
# @vladmandic/human
|
||||
|
||||
Version: **3.0.1**
|
||||
Version: **3.3.5**
|
||||
Description: **Human: AI-powered 3D Face Detection & Rotation Tracking, Face Description & Recognition, Body Pose Tracking, 3D Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction, Gesture Recognition**
|
||||
|
||||
Author: **Vladimir Mandic <mandic00@live.com>**
|
||||
|
@ -9,11 +9,103 @@
|
|||
|
||||
## Changelog
|
||||
|
||||
### **3.3.5** 2025/02/05 mandic00@live.com
|
||||
|
||||
|
||||
### **origin/main** 2024/10/24 mandic00@live.com
|
||||
|
||||
- add human.draw.tensor method
|
||||
|
||||
### **3.3.4** 2024/10/24 mandic00@live.com
|
||||
|
||||
|
||||
### **3.3.3** 2024/10/14 mandic00@live.com
|
||||
|
||||
- add loaded property to model stats and mark models not loaded correctly.
|
||||
- release build
|
||||
|
||||
### **3.3.2** 2024/09/11 mandic00@live.com
|
||||
|
||||
- full rebuild
|
||||
|
||||
### **3.3.1** 2024/09/11 mandic00@live.com
|
||||
|
||||
- add config.face.detector.square option
|
||||
- human 3.3 alpha test run
|
||||
- human 3.3 alpha with new build environment
|
||||
- release rebuild
|
||||
- fix flazeface tensor scale and update build platform
|
||||
|
||||
### **3.2.2** 2024/04/17 mandic00@live.com
|
||||
|
||||
|
||||
### **release: 3.2.1** 2024/02/15 mandic00@live.com
|
||||
|
||||
|
||||
### **3.2.1** 2024/02/15 mandic00@live.com
|
||||
|
||||
|
||||
### **3.2.0** 2023/12/06 mandic00@live.com
|
||||
|
||||
- set browser false when navigator object is empty
|
||||
- https://github.com/vladmandic/human/issues/402
|
||||
|
||||
### **release: 3.1.2** 2023/09/18 mandic00@live.com
|
||||
|
||||
- full rebuild
|
||||
|
||||
### **3.1.2** 2023/09/18 mandic00@live.com
|
||||
|
||||
- major toolkit upgrade
|
||||
- full rebuild
|
||||
- major toolkit upgrade
|
||||
|
||||
### **3.1.1** 2023/08/05 mandic00@live.com
|
||||
|
||||
- fixes plus tfjs upgrade for new release
|
||||
|
||||
### **3.0.7** 2023/06/12 mandic00@live.com
|
||||
|
||||
- full rebuild
|
||||
- fix memory leak in histogramequalization
|
||||
- initial work on tracker
|
||||
|
||||
### **3.0.6** 2023/03/21 mandic00@live.com
|
||||
|
||||
- add optional crop to multiple models
|
||||
- fix movenet-multipose
|
||||
- add electron detection
|
||||
- fix gender-ssrnet-imdb
|
||||
- add movenet-multipose workaround
|
||||
- rebuild and publish
|
||||
- add face.detector.minsize configurable setting
|
||||
- add affectnet
|
||||
|
||||
### **3.0.5** 2023/02/02 mandic00@live.com
|
||||
|
||||
- add gear-e models
|
||||
- detect react-native
|
||||
- redo blazeface annotations
|
||||
|
||||
### **3.0.4** 2023/01/29 mandic00@live.com
|
||||
|
||||
- make naviator calls safe
|
||||
- fix facedetector-only configs
|
||||
|
||||
### **3.0.3** 2023/01/07 mandic00@live.com
|
||||
|
||||
- full rebuild
|
||||
|
||||
### **3.0.2** 2023/01/06 mandic00@live.com
|
||||
|
||||
- default face.rotation disabled
|
||||
|
||||
### **release: 3.0.1** 2022/11/22 mandic00@live.com
|
||||
|
||||
|
||||
### **3.0.1** 2022/11/22 mandic00@live.com
|
||||
|
||||
|
||||
### **origin/main** 2022/11/22 mandic00@live.com
|
||||
|
||||
- support dynamic loads
|
||||
- polish demos
|
||||
- add facedetect demo and fix model async load
|
||||
- enforce markdown linting
|
||||
|
|
46
README.md
46
README.md
|
@ -4,7 +4,6 @@
|
|||

|
||||

|
||||

|
||||

|
||||
|
||||
# Human Library
|
||||
|
||||
|
@ -31,15 +30,20 @@
|
|||
|
||||
## Compatibility
|
||||
|
||||
- **Browser**:
|
||||
Compatible with both desktop and mobile platforms
|
||||
Compatible with *CPU*, *WebGL*, *WASM* backends
|
||||
Compatible with *WebWorker* execution
|
||||
Compatible with *WebView*
|
||||
- **NodeJS**:
|
||||
Compatibile with *WASM* backend for executions on architectures where *tensorflow* binaries are not available
|
||||
Compatible with *tfjs-node* using software execution via *tensorflow* shared libraries
|
||||
Compatible with *tfjs-node* using GPU-accelerated execution via *tensorflow* shared libraries and nVidia CUDA
|
||||
**Browser**:
|
||||
- Compatible with both desktop and mobile platforms
|
||||
- Compatible with *WebGPU*, *WebGL*, *WASM*, *CPU* backends
|
||||
- Compatible with *WebWorker* execution
|
||||
- Compatible with *WebView*
|
||||
- Primary platform: *Chromium*-based browsers
|
||||
- Secondary platform: *Firefox*, *Safari*
|
||||
|
||||
**NodeJS**:
|
||||
- Compatibile with *WASM* backend for executions on architectures where *tensorflow* binaries are not available
|
||||
- Compatible with *tfjs-node* using software execution via *tensorflow* shared libraries
|
||||
- Compatible with *tfjs-node* using GPU-accelerated execution via *tensorflow* shared libraries and nVidia CUDA
|
||||
- Supported versions are from **14.x** to **22.x**
|
||||
- NodeJS version **23.x** is not supported due to breaking changes and issues with `@tensorflow/tfjs`
|
||||
|
||||
<br>
|
||||
|
||||
|
@ -69,7 +73,7 @@
|
|||
|
||||
- **Full** [[*Live*]](https://vladmandic.github.io/human/demo/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo): Main browser demo app that showcases all Human capabilities
|
||||
- **Simple** [[*Live*]](https://vladmandic.github.io/human/demo/typescript/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/typescript): Simple demo in WebCam processing demo in TypeScript
|
||||
- **Embedded** [[*Live*]](https://vladmandic.github.io/human/demo/video/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/video/index.html): Even simpler demo with tiny code embedded in HTML file
|
||||
- **Embedded** [[*Live*]](https://vladmandic.github.io/human/demo/video/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/video/index.html): Even simpler demo with tiny code embedded in HTML file
|
||||
- **Face Detect** [[*Live*]](https://vladmandic.github.io/human/demo/facedetect/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facedetect): Extract faces from images and processes details
|
||||
- **Face Match** [[*Live*]](https://vladmandic.github.io/human/demo/facematch/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facematch): Extract faces from images, calculates face descriptors and similarities and matches them to known database
|
||||
- **Face ID** [[*Live*]](https://vladmandic.github.io/human/demo/faceid/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/faceid): Runs multiple checks to validate webcam input before performing face match to faces in IndexDB
|
||||
|
@ -85,14 +89,14 @@
|
|||
*NodeJS demos may require extra dependencies which are used to decode inputs*
|
||||
*See header of each demo to see its dependencies as they are not automatically installed with `Human`*
|
||||
|
||||
- **Main** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Process images from files, folders or URLs using native methods
|
||||
- **Canvas** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Process image from file or URL and draw results to a new image file using `node-canvas`
|
||||
- **Video** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Processing of video input using `ffmpeg`
|
||||
- **WebCam** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Processing of webcam screenshots using `fswebcam`
|
||||
- **Events** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Showcases usage of `Human` eventing to get notifications on processing
|
||||
- **Similarity** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Compares two input images for similarity of detected faces
|
||||
- **Face Match** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facematch): Parallel processing of face **match** in multiple child worker threads
|
||||
- **Multiple Workers** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Runs multiple parallel `human` by dispaching them to pool of pre-created worker processes
|
||||
- **Main** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node.js): Process images from files, folders or URLs using native methods
|
||||
- **Canvas** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-canvas.js): Process image from file or URL and draw results to a new image file using `node-canvas`
|
||||
- **Video** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-video.js): Processing of video input using `ffmpeg`
|
||||
- **WebCam** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-webcam.js): Processing of webcam screenshots using `fswebcam`
|
||||
- **Events** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-event.js): Showcases usage of `Human` eventing to get notifications on processing
|
||||
- **Similarity** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-similarity.js): Compares two input images for similarity of detected faces
|
||||
- **Face Match** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facematch/node-match.js): Parallel processing of face **match** in multiple child worker threads
|
||||
- **Multiple Workers** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/multithread/node-multiprocess.js): Runs multiple parallel `human` by dispaching them to pool of pre-created worker processes
|
||||
- **Dynamic Load** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Loads Human dynamically with multiple different desired backends
|
||||
|
||||
## Project pages
|
||||
|
@ -241,7 +245,7 @@ draw output on screen using internal draw helper functions
|
|||
```js
|
||||
// create instance of human with simple configuration using default values
|
||||
const config = { backend: 'webgl' };
|
||||
const human = new Human(config);
|
||||
const human = new Human.Human(config);
|
||||
// select input HTMLVideoElement and output HTMLCanvasElement from page
|
||||
const inputVideo = document.getElementById('video-id');
|
||||
const outputCanvas = document.getElementById('canvas-id');
|
||||
|
@ -444,7 +448,7 @@ For more info, see [**Configuration Details**](https://github.com/vladmandic/hum
|
|||
|
||||
<br><hr><br>
|
||||
|
||||
`Human` library is written in [TypeScript](https://www.typescriptlang.org/docs/handbook/intro.html) **4.9** using [TensorFlow/JS](https://www.tensorflow.org/js/) **4.1** and conforming to latest `JavaScript` [ECMAScript version 2022](https://262.ecma-international.org/) standard
|
||||
`Human` library is written in [TypeScript](https://www.typescriptlang.org/docs/handbook/intro.html) **5.1** using [TensorFlow/JS](https://www.tensorflow.org/js/) **4.10** and conforming to latest `JavaScript` [ECMAScript version 2022](https://262.ecma-international.org/) standard
|
||||
|
||||
Build target for distributables is `JavaScript` [EMCAScript version 2018](https://262.ecma-international.org/9.0/)
|
||||
|
||||
|
|
81
TODO.md
81
TODO.md
|
@ -2,21 +2,6 @@
|
|||
|
||||
## Work-in-Progress
|
||||
|
||||
|
||||
<hr><br>
|
||||
|
||||
## Exploring
|
||||
|
||||
- **Optical flow** for intelligent temporal interpolation
|
||||
<https://docs.opencv.org/3.3.1/db/d7f/tutorial_js_lucas_kanade.html>
|
||||
- **CLAHE** advanced histogram equalization for optimization of badly lit scenes
|
||||
- **TFLite** models
|
||||
<https://js.tensorflow.org/api_tflite/0.0.1-alpha.4/>
|
||||
- **Principal Components Analysis** for reduction of descriptor complexity
|
||||
<https://github.com/vladmandic/human-match/blob/main/src/pca.js>
|
||||
- **Temporal guidance** for face/body segmentation
|
||||
<https://github.com/PeterL1n/RobustVideoMatting>
|
||||
|
||||
<hr><br>
|
||||
|
||||
## Known Issues & Limitations
|
||||
|
@ -31,10 +16,9 @@ No issues with default model `FaceMesh`
|
|||
`NanoDet` model is not supported when using `WASM` backend due to missing kernel op in **TFJS**
|
||||
No issues with default model `MB3-CenterNet`
|
||||
|
||||
### WebGPU
|
||||
## Body Detection using MoveNet-MultiPose
|
||||
|
||||
Experimental support only until support is officially added in Chromium
|
||||
Enable via <chrome://flags/#enable-unsafe-webgpu>
|
||||
Model does not return valid detection scores (all other functionality is not impacted)
|
||||
|
||||
### Firefox
|
||||
|
||||
|
@ -47,61 +31,8 @@ Enable via `about:config` -> `gfx.offscreencanvas.enabled`
|
|||
No support for running in **web workers** as Safari still does not support `OffscreenCanvas`
|
||||
[Details](https://developer.mozilla.org/en-US/docs/Web/API/OffscreenCanvas#browser_compatibility)
|
||||
|
||||
## React-Native
|
||||
|
||||
`Human` support for **React-Native** is best-effort, but not part of the main development focus
|
||||
|
||||
<hr><br>
|
||||
|
||||
## Pending Release Changes
|
||||
|
||||
Optimizations:
|
||||
- Enabled high-resolution optimizations
|
||||
Internal limits are increased from **2k** to **4k**
|
||||
- Enhanced device capabilities detection
|
||||
See `human.env.[agent, wasm, webgl, webgpu]` for details
|
||||
- If `config.backend` is not set, Human will auto-select best backend
|
||||
based on device capabilities
|
||||
- Enhanced support for `webgpu`
|
||||
- Reduce build dependencies
|
||||
`Human` is now 30% smaller :)
|
||||
As usual, `Human` has **zero** runtime dependencies,
|
||||
all *devDependencies* are only to rebuild `Human` itself
|
||||
- Default hand skeleton model changed from `handlandmark-full` to `handlandmark-lite`
|
||||
Both models are still supported, this reduces default size and increases performance
|
||||
|
||||
Features:
|
||||
- Add [draw label templates](https://github.com/vladmandic/human/wiki/Draw)
|
||||
Allows easy customization of results labels drawn on canvas
|
||||
- Add `config.filter.autoBrightness` (*enabled by default*)
|
||||
Per-frame video on-the-fly brightness adjustments
|
||||
Which significantly increases performance and precision in poorly lit scenes
|
||||
- Add new demo [face detect]((https://vladmandic.github.io/human/demo/facedetect/index.html))
|
||||
- Improved `config.filter.equalization` (*disabled by default*)
|
||||
Image and video on-demand histogram equalization
|
||||
- Support selecting specific video source when multiple cameras are present
|
||||
See `human.webcam.enumerate()`
|
||||
- Updated algorithm to determine distance from camera based on iris size
|
||||
See `human.result.face[n].distance`
|
||||
|
||||
Architecture:
|
||||
- Upgrade to **TFJS 4.1** with **strong typing**
|
||||
see [notes](https://github.com/vladmandic/human#typedefs) on how to use
|
||||
- `TypeDef` refactoring
|
||||
- Re-architect `human.models` namespace for better dynamic model handling
|
||||
Added additional methods `load`, `list`, `loaded`, `reset`
|
||||
- Repack external typedefs
|
||||
Removes all external typedef dependencies
|
||||
- Refactor namespace exports
|
||||
Better [TypeDoc specs](https://vladmandic.github.io/human/typedoc/index.html)
|
||||
- Add named export for improved bundler support when using non-default imports
|
||||
- Cleanup Git history for `dist`/`typedef`/`types`
|
||||
- Cleanup `@vladmandic/human-models`
|
||||
- Support for **NodeJS v19**
|
||||
- Upgrade to **TypeScript 4.9**
|
||||
- Support for dynamic module load in **NodeJS**
|
||||
See <https://vladmandic.github.io/human/demo/nodejs/node-bench>
|
||||
|
||||
Breaking changes:
|
||||
- Replaced `result.face[n].iris` with `result.face[n].distance`
|
||||
- Replaced `human.getModelStats()` with `human.models.stats()`
|
||||
- Moved `human.similarity`, `human.distance` and `human.match` to namespace `human.match.*`
|
||||
- Obsolete `human.enhance()`
|
||||
- Obsolete `human.gl`
|
||||
- Renamed model `mb3-centernet` to `centernet`
|
||||
|
|
28
build.js
28
build.js
|
@ -121,18 +121,22 @@ async function main() {
|
|||
|
||||
// run api-extractor to create typedef rollup
|
||||
const extractorConfig = APIExtractor.ExtractorConfig.loadFileAndPrepare('.api-extractor.json');
|
||||
const extractorResult = APIExtractor.Extractor.invoke(extractorConfig, {
|
||||
localBuild: true,
|
||||
showVerboseMessages: false,
|
||||
messageCallback: (msg) => {
|
||||
msg.handled = true;
|
||||
if (msg.logLevel === 'none' || msg.logLevel === 'verbose' || msg.logLevel === 'info') return;
|
||||
if (msg.sourceFilePath?.includes('/node_modules/')) return;
|
||||
// if (apiExtractorIgnoreList.reduce((prev, curr) => prev || msg.messageId.includes(curr), false)) return; // those are external issues outside of human control
|
||||
log.data('API', { level: msg.logLevel, category: msg.category, id: msg.messageId, file: msg.sourceFilePath, line: msg.sourceFileLine, text: msg.text });
|
||||
},
|
||||
});
|
||||
log.state('API-Extractor:', { succeeeded: extractorResult.succeeded, errors: extractorResult.errorCount, warnings: extractorResult.warningCount });
|
||||
try {
|
||||
const extractorResult = APIExtractor.Extractor.invoke(extractorConfig, {
|
||||
localBuild: true,
|
||||
showVerboseMessages: false,
|
||||
messageCallback: (msg) => {
|
||||
msg.handled = true;
|
||||
if (msg.logLevel === 'none' || msg.logLevel === 'verbose' || msg.logLevel === 'info') return;
|
||||
if (msg.sourceFilePath?.includes('/node_modules/')) return;
|
||||
// if (apiExtractorIgnoreList.reduce((prev, curr) => prev || msg.messageId.includes(curr), false)) return; // those are external issues outside of human control
|
||||
log.data('API', { level: msg.logLevel, category: msg.category, id: msg.messageId, file: msg.sourceFilePath, line: msg.sourceFileLine, text: msg.text });
|
||||
},
|
||||
});
|
||||
log.state('API-Extractor:', { succeeeded: extractorResult.succeeded, errors: extractorResult.errorCount, warnings: extractorResult.warningCount });
|
||||
} catch (err) {
|
||||
log.error('API-Extractor:', err);
|
||||
}
|
||||
regExFile('types/human.d.ts', regEx);
|
||||
writeFile('export * from \'../types/human\';', 'dist/human.esm-nobundle.d.ts');
|
||||
writeFile('export * from \'../types/human\';', 'dist/human.esm.d.ts');
|
||||
|
|
|
@ -8,6 +8,7 @@ For details on other demos see Wiki: [**Demos**](https://github.com/vladmandic/h
|
|||
`index.html`: Full demo using `Human` ESM module running in Browsers,
|
||||
|
||||
Includes:
|
||||
|
||||
- Selectable inputs:
|
||||
- Sample images
|
||||
- Image via drag & drop
|
||||
|
|
|
@ -10,12 +10,13 @@ import { Human } from '../../dist/human.esm.js';
|
|||
let loader;
|
||||
|
||||
const humanConfig = { // user configuration for human, used to fine-tune behavior
|
||||
cacheSensitivity: 0,
|
||||
debug: true,
|
||||
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
|
||||
filter: { enabled: true, equalization: false, flip: false },
|
||||
face: {
|
||||
enabled: true,
|
||||
detector: { rotation: true, maxDetected: 100, minConfidence: 0.2, return: true },
|
||||
detector: { rotation: false, maxDetected: 100, minConfidence: 0.2, return: true, square: false },
|
||||
iris: { enabled: true },
|
||||
description: { enabled: true },
|
||||
emotion: { enabled: true },
|
||||
|
@ -93,7 +94,7 @@ function addFace(face, source) {
|
|||
e.preventDefault();
|
||||
document.getElementById('description').innerHTML = canvas.title;
|
||||
};
|
||||
human.tf.browser.toPixels(face.tensor, canvas);
|
||||
human.draw.tensor(face.tensor, canvas);
|
||||
human.tf.dispose(face.tensor);
|
||||
return canvas;
|
||||
}
|
||||
|
@ -149,7 +150,7 @@ async function main() {
|
|||
showLoader('compiling models');
|
||||
await human.warmup();
|
||||
showLoader('loading images');
|
||||
const images = ['group-1.jpg', 'group-2.jpg', 'group-3.jpg', 'group-4.jpg', 'group-5.jpg', 'group-6.jpg', 'group-7.jpg', 'solvay1927.jpg', 'stock-group-1.jpg', 'stock-group-2.jpg'];
|
||||
const images = ['group-1.jpg', 'group-2.jpg', 'group-3.jpg', 'group-4.jpg', 'group-5.jpg', 'group-6.jpg', 'group-7.jpg', 'solvay1927.jpg', 'stock-group-1.jpg', 'stock-group-2.jpg', 'stock-models-6.jpg', 'stock-models-7.jpg'];
|
||||
const imageUris = images.map((a) => `../../samples/in/${a}`);
|
||||
for (let i = 0; i < imageUris.length; i++) addImage(imageUris[i]);
|
||||
initDragAndDrop();
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -11,13 +11,13 @@ import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
|
|||
import * as indexDb from './indexdb'; // methods to deal with indexdb
|
||||
|
||||
const humanConfig = { // user configuration for human, used to fine-tune behavior
|
||||
cacheSensitivity: 0,
|
||||
cacheSensitivity: 0.01,
|
||||
modelBasePath: '../../models',
|
||||
filter: { enabled: true, equalization: true }, // lets run with histogram equilizer
|
||||
debug: true,
|
||||
face: {
|
||||
enabled: true,
|
||||
detector: { rotation: true, return: true, cropFactor: 1.6, mask: false }, // return tensor is used to get detected face image
|
||||
detector: { rotation: true, return: true, mask: false }, // return tensor is used to get detected face image
|
||||
description: { enabled: true }, // default model for face descriptor extraction is faceres
|
||||
// mobilefacenet: { enabled: true, modelPath: 'https://vladmandic.github.io/human-models/models/mobilefacenet.json' }, // alternative model
|
||||
// insightface: { enabled: true, modelPath: 'https://vladmandic.github.io/insightface/models/insightface-mobilenet-swish.json' }, // alternative model
|
||||
|
@ -46,7 +46,6 @@ const options = {
|
|||
distanceMax: 1.0, // farthest that face is allowed to be to the cammera in cm
|
||||
mask: humanConfig.face.detector.mask,
|
||||
rotation: humanConfig.face.detector.rotation,
|
||||
cropFactor: humanConfig.face.detector.cropFactor,
|
||||
...matchOptions,
|
||||
};
|
||||
|
||||
|
@ -241,7 +240,7 @@ async function detectFace() {
|
|||
if (!current?.face?.tensor || !current?.face?.embedding) return false;
|
||||
console.log('face record:', current.face); // eslint-disable-line no-console
|
||||
log(`detected face: ${current.face.gender} ${current.face.age || 0}y distance ${100 * (current.face.distance || 0)}cm/${Math.round(100 * (current.face.distance || 0) / 2.54)}in`);
|
||||
await human.tf.browser.toPixels(current.face.tensor, dom.canvas);
|
||||
await human.draw.tensor(current.face.tensor, dom.canvas);
|
||||
if (await indexDb.count() === 0) {
|
||||
log('face database is empty: nothing to compare face with');
|
||||
document.body.style.background = 'black';
|
||||
|
|
|
@ -11,7 +11,7 @@ const userConfig = {
|
|||
backend: 'humangl',
|
||||
async: true,
|
||||
warmup: 'none',
|
||||
cacheSensitivity: 0,
|
||||
cacheSensitivity: 0.01,
|
||||
debug: true,
|
||||
modelBasePath: '../../models/',
|
||||
deallocate: true,
|
||||
|
@ -22,7 +22,6 @@ const userConfig = {
|
|||
},
|
||||
face: {
|
||||
enabled: true,
|
||||
// detector: { rotation: false, return: true, maxDetected: 50, iouThreshold: 0.206, minConfidence: 0.122 },
|
||||
detector: { return: true, rotation: true, maxDetected: 50, iouThreshold: 0.01, minConfidence: 0.2 },
|
||||
mesh: { enabled: true },
|
||||
iris: { enabled: false },
|
||||
|
@ -71,7 +70,7 @@ async function selectFaceCanvas(face) {
|
|||
if (face.tensor) {
|
||||
title('Sorting Faces by Similarity');
|
||||
const c = document.getElementById('orig');
|
||||
await human.tf.browser.toPixels(face.tensor, c);
|
||||
await human.draw.tensor(face.tensor, c);
|
||||
const arr = db.map((rec) => rec.embedding);
|
||||
const res = await human.match.find(face.embedding, arr);
|
||||
log('Match:', db[res.index].name);
|
||||
|
@ -98,7 +97,7 @@ async function selectFaceCanvas(face) {
|
|||
canvas.tag.similarity = similarity;
|
||||
// get best match
|
||||
// draw the canvas
|
||||
await human.tf.browser.toPixels(current.tensor, canvas);
|
||||
await human.draw.tensor(current.tensor, canvas);
|
||||
const ctx = canvas.getContext('2d');
|
||||
ctx.font = 'small-caps 1rem "Lato"';
|
||||
ctx.fillStyle = 'rgba(0, 0, 0, 1)';
|
||||
|
@ -145,7 +144,7 @@ async function addFaceCanvas(index, res, fileName) {
|
|||
gender: ${Math.round(100 * res.face[i].genderScore)}% ${res.face[i].gender}
|
||||
emotion: ${emotion}
|
||||
`.replace(/ /g, ' ');
|
||||
await human.tf.browser.toPixels(res.face[i].tensor, canvas);
|
||||
await human.draw.tensor(res.face[i].tensor, canvas);
|
||||
const ctx = canvas.getContext('2d');
|
||||
if (!ctx) return;
|
||||
ctx.font = 'small-caps 0.8rem "Lato"';
|
||||
|
|
|
@ -223,7 +223,7 @@ async function calcSimmilarity(result) {
|
|||
log('setting face compare baseline:', result.face[0]);
|
||||
if (result.face[0].tensor) {
|
||||
const c = document.getElementById('orig');
|
||||
human.tf.browser.toPixels(result.face[0].tensor, c);
|
||||
human.draw.tensor(result.face[0].tensor, c);
|
||||
} else {
|
||||
document.getElementById('compare-canvas').getContext('2d').drawImage(compare.original.canvas, 0, 0, 200, 200);
|
||||
}
|
||||
|
|
|
@ -8,8 +8,8 @@
|
|||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||
const childProcess = require('child_process'); // eslint-disable-line camelcase
|
||||
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||
// note that main process does not import human or tfjs at all, it's all done from worker process
|
||||
|
||||
const workerFile = 'demo/multithread/node-multiprocess-worker.js';
|
||||
|
|
|
@ -7,7 +7,7 @@ const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpu
|
|||
const canvas = require('canvas'); // eslint-disable-line node/no-unpublished-require
|
||||
|
||||
const config = {
|
||||
cacheSensitivity: 0,
|
||||
cacheSensitivity: 0.01,
|
||||
wasmPlatformFetch: true,
|
||||
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
|
||||
};
|
||||
|
|
|
@ -16,7 +16,7 @@ const humanConfig = {
|
|||
};
|
||||
|
||||
async function main(inputFile) {
|
||||
global.fetch = (await import('node-fetch')).default; // eslint-disable-line node/no-unpublished-import, import/no-unresolved, node/no-missing-import
|
||||
global.fetch = (await import('node-fetch')).default; // eslint-disable-line node/no-unpublished-import, import/no-unresolved, node/no-missing-import, node/no-extraneous-import
|
||||
const human = new Human.Human(humanConfig); // create instance of human using default configuration
|
||||
log.info('Human:', human.version, 'TF:', tf.version_core);
|
||||
await human.load(); // optional as models would be loaded on-demand first time they are required
|
||||
|
|
|
@ -13,6 +13,7 @@ const Human = require('../../dist/human.node.js'); // use this when using human
|
|||
const humanConfig = {
|
||||
// add any custom config here
|
||||
debug: true,
|
||||
body: { enabled: false },
|
||||
};
|
||||
|
||||
async function detect(inputFile) {
|
||||
|
|
|
@ -11,10 +11,11 @@
|
|||
* Working version of `ffmpeg` must be present on the system
|
||||
*/
|
||||
|
||||
const process = require('process');
|
||||
const spawn = require('child_process').spawn;
|
||||
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||
// in nodejs environments tfjs-node is required to be loaded before human
|
||||
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||
// const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
|
||||
const Pipe2Jpeg = require('pipe2jpeg'); // eslint-disable-line node/no-missing-require, import/no-unresolved
|
||||
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
|
||||
|
@ -22,7 +23,8 @@ const Human = require('../../dist/human.node.js'); // use this when using human
|
|||
|
||||
let count = 0; // counter
|
||||
let busy = false; // busy flag
|
||||
const inputFile = './test.mp4';
|
||||
let inputFile = './test.mp4';
|
||||
if (process.argv.length === 3) inputFile = process.argv[2];
|
||||
|
||||
const humanConfig = {
|
||||
modelBasePath: 'file://models/',
|
||||
|
@ -59,15 +61,16 @@ const ffmpegParams = [
|
|||
'pipe:1', // output to unix pipe that is then captured by pipe2jpeg
|
||||
];
|
||||
|
||||
async function process(jpegBuffer) {
|
||||
async function detect(jpegBuffer) {
|
||||
if (busy) return; // skip processing if busy
|
||||
busy = true;
|
||||
const tensor = human.tf.node.decodeJpeg(jpegBuffer, 3); // decode jpeg buffer to raw tensor
|
||||
log.state('input frame:', ++count, 'size:', jpegBuffer.length, 'decoded shape:', tensor.shape);
|
||||
const res = await human.detect(tensor);
|
||||
log.data('gesture', JSON.stringify(res.gesture));
|
||||
// do processing here
|
||||
tf.dispose(tensor); // must dispose tensor
|
||||
human.tf.dispose(tensor); // must dispose tensor
|
||||
// start custom processing here
|
||||
log.data('frame', { frame: ++count, size: jpegBuffer.length, shape: tensor.shape, face: res?.face?.length, body: res?.body?.length, hand: res?.hand?.length, gesture: res?.gesture?.length });
|
||||
if (res?.face?.[0]) log.data('person', { score: [res.face[0].boxScore, res.face[0].faceScore], age: res.face[0].age || 0, gender: [res.face[0].genderScore || 0, res.face[0].gender], emotion: res.face[0].emotion?.[0] });
|
||||
// at the of processing mark loop as not busy so it can process next frame
|
||||
busy = false;
|
||||
}
|
||||
|
||||
|
@ -75,8 +78,9 @@ async function main() {
|
|||
log.header();
|
||||
await human.tf.ready();
|
||||
// pre-load models
|
||||
log.info('human:', human.version, 'tf:', tf.version_core);
|
||||
pipe2jpeg.on('jpeg', (jpegBuffer) => process(jpegBuffer));
|
||||
log.info({ human: human.version, tf: human.tf.version_core });
|
||||
log.info({ input: inputFile });
|
||||
pipe2jpeg.on('data', (jpegBuffer) => detect(jpegBuffer));
|
||||
|
||||
const ffmpeg = spawn('ffmpeg', ffmpegParams, { stdio: ['ignore', 'pipe', 'ignore'] });
|
||||
ffmpeg.on('error', (error) => log.error('ffmpeg error:', error));
|
||||
|
|
|
@ -2,10 +2,10 @@
|
|||
* Human demo for NodeJS
|
||||
*/
|
||||
|
||||
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const process = require('process');
|
||||
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||
|
||||
// in nodejs environments tfjs-node is required to be loaded before human
|
||||
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||
|
@ -92,7 +92,7 @@ async function detect(input) {
|
|||
try {
|
||||
result = await human.detect(tensor, myConfig);
|
||||
} catch (err) {
|
||||
log.error('caught');
|
||||
log.error('caught', err);
|
||||
}
|
||||
|
||||
// dispose image tensor as we no longer need it
|
||||
|
|
|
@ -20,7 +20,7 @@ const config = { // just enable all and leave default settings
|
|||
modelBasePath: 'file://models',
|
||||
debug: true,
|
||||
softwareKernels: true, // slower but enhanced precision since face rotation can work in software mode in nodejs environments
|
||||
cacheSensitivity: 0,
|
||||
cacheSensitivity: 0.01,
|
||||
face: { enabled: true, detector: { maxDetected: 100, minConfidence: 0.1 } },
|
||||
object: { enabled: true, maxDetected: 100, minConfidence: 0.1 },
|
||||
gesture: { enabled: true },
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
body { font-size: 1rem; font-family: "CenturyGothic", "Segoe UI", sans-serif; font-variant: small-caps; width: -webkit-fill-available; height: 100%; background: black; color: white; overflow: hidden; margin: 0; }
|
||||
select { font-size: 1rem; font-family: "CenturyGothic", "Segoe UI", sans-serif; font-variant: small-caps; background: gray; color: white; border: none; }
|
||||
</style>
|
||||
<script src="../segmentation/index.js" type="module"></script>
|
||||
<script src="../segmentation/index.js" type="module"></script>
|
||||
</head>
|
||||
<body>
|
||||
<noscript><h1>javascript is required</h1></noscript>
|
||||
|
|
|
@ -85,7 +85,7 @@ async function main() {
|
|||
return;
|
||||
}
|
||||
dom.fps.innerText = `fps: ${Math.round(10000 / (t1 - t0)) / 10}`; // mark performance
|
||||
human.tf.browser.toPixels(rgba, dom.output); // draw raw output
|
||||
human.draw.tensor(rgba, dom.output); // draw raw output
|
||||
human.tf.dispose(rgba); // dispose tensors
|
||||
ctxMerge.globalCompositeOperation = 'source-over';
|
||||
ctxMerge.drawImage(dom.background, 0, 0); // draw original video to first stacked canvas
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
## Tracker
|
||||
|
||||
### Based on
|
||||
|
||||
<https://github.com/opendatacam/node-moving-things-tracker>
|
||||
|
||||
### Build
|
||||
|
||||
- remove reference to `lodash`:
|
||||
> `isEqual` in <tracker.js>
|
||||
- replace external lib:
|
||||
> curl https://raw.githubusercontent.com/ubilabs/kd-tree-javascript/master/kdTree.js -o lib/kdTree-min.js
|
||||
- build with `esbuild`:
|
||||
> node_modules/.bin/esbuild --bundle tracker.js --format=esm --platform=browser --target=esnext --keep-names --tree-shaking=false --analyze --outfile=/home/vlado/dev/human/demo/tracker/tracker.js --banner:js="/* eslint-disable */"
|
||||
|
||||
### Usage
|
||||
|
||||
computeDistance(item1, item2)
|
||||
disableKeepInMemory()
|
||||
enableKeepInMemory()
|
||||
getAllTrackedItems()
|
||||
getJSONDebugOfTrackedItems(roundInt = true)
|
||||
getJSONOfAllTrackedItems()
|
||||
getJSONOfTrackedItems(roundInt = true)
|
||||
getTrackedItemsInMOTFormat(frameNb)
|
||||
reset()
|
||||
setParams(newParams)
|
||||
updateTrackedItemsWithNewFrame(detectionsOfThisFrame, frameNb)
|
|
@ -0,0 +1,65 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>Human</title>
|
||||
<meta name="viewport" content="width=device-width" id="viewport">
|
||||
<meta name="keywords" content="Human">
|
||||
<meta name="application-name" content="Human">
|
||||
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||
<meta name="theme-color" content="#000000">
|
||||
<link rel="manifest" href="../manifest.webmanifest">
|
||||
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
|
||||
<link rel="apple-touch-icon" href="../../assets/icon.png">
|
||||
<script src="./index.js" type="module"></script>
|
||||
<style>
|
||||
html { font-family: 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
|
||||
body { margin: 0; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
|
||||
body::-webkit-scrollbar { display: none; }
|
||||
input[type="file"] { font-family: 'Segoe UI'; font-size: 14px; font-variant: small-caps; }
|
||||
::-webkit-file-upload-button { background: #333333; color: white; border: 0; border-radius: 0; padding: 6px 16px; box-shadow: 4px 4px 4px #222222; font-family: 'Segoe UI'; font-size: 14px; font-variant: small-caps; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div style="display: flex">
|
||||
<video id="video" playsinline style="width: 25vw" controls controlslist="nofullscreen nodownload noremoteplayback" disablepictureinpicture loop></video>
|
||||
<canvas id="canvas" style="width: 75vw"></canvas>
|
||||
</div>
|
||||
<div class="uploader" style="padding: 8px">
|
||||
<input type="file" name="inputvideo" id="inputvideo" accept="video/*"></input>
|
||||
<input type="checkbox" id="interpolation" name="interpolation"></input>
|
||||
<label for="tracker">interpolation</label>
|
||||
</div>
|
||||
<form id="config" style="padding: 8px; line-height: 1.6rem;">
|
||||
tracker |
|
||||
<input type="checkbox" id="tracker" name="tracker" checked></input>
|
||||
<label for="tracker">enabled</label> |
|
||||
<input type="checkbox" id="keepInMemory" name="keepInMemory"></input>
|
||||
<label for="keepInMemory">keepInMemory</label> |
|
||||
<br>
|
||||
tracker source |
|
||||
<input type="radio" id="box-face" name="box" value="face" checked>
|
||||
<label for="box-face">face</label> |
|
||||
<input type="radio" id="box-body" name="box" value="body">
|
||||
<label for="box-face">body</label> |
|
||||
<input type="radio" id="box-object" name="box" value="object">
|
||||
<label for="box-face">object</label> |
|
||||
<br>
|
||||
tracker config |
|
||||
<input type="range" id="unMatchedFramesTolerance" name="unMatchedFramesTolerance" min="0" max="300" step="1", value="60"></input>
|
||||
<label for="unMatchedFramesTolerance">unMatchedFramesTolerance</label> |
|
||||
<input type="range" id="iouLimit" name="unMatchedFramesTolerance" min="0" max="1" step="0.01", value="0.1"></input>
|
||||
<label for="iouLimit">iouLimit</label> |
|
||||
<input type="range" id="distanceLimit" name="unMatchedFramesTolerance" min="0" max="1" step="0.01", value="0.1"></input>
|
||||
<label for="distanceLimit">distanceLimit</label> |
|
||||
<input type="radio" id="matchingAlgorithm-kdTree" name="matchingAlgorithm" value="kdTree" checked>
|
||||
<label for="matchingAlgorithm-kdTree">kdTree</label> |
|
||||
<input type="radio" id="matchingAlgorithm-munkres" name="matchingAlgorithm" value="munkres">
|
||||
<label for="matchingAlgorithm-kdTree">munkres</label> |
|
||||
</form>
|
||||
<pre id="status" style="position: absolute; top: 12px; right: 20px; background-color: grey; padding: 8px; box-shadow: 2px 2px black"></pre>
|
||||
<pre id="log" style="padding: 8px"></pre>
|
||||
<div id="performance" style="position: absolute; bottom: 0; width: 100%; padding: 8px; font-size: 0.8rem;"></div>
|
||||
</body>
|
||||
</html>
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,208 @@
|
|||
/**
|
||||
* Human demo for browsers
|
||||
* @default Human Library
|
||||
* @summary <https://github.com/vladmandic/human>
|
||||
* @author <https://github.com/vladmandic>
|
||||
* @copyright <https://github.com/vladmandic>
|
||||
* @license MIT
|
||||
*/
|
||||
|
||||
import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
|
||||
import tracker from './tracker.js';
|
||||
|
||||
const humanConfig: Partial<H.Config> = { // user configuration for human, used to fine-tune behavior
|
||||
debug: true,
|
||||
backend: 'webgl',
|
||||
// cacheSensitivity: 0,
|
||||
// cacheModels: false,
|
||||
// warmup: 'none',
|
||||
modelBasePath: 'https://vladmandic.github.io/human-models/models',
|
||||
filter: { enabled: true, equalization: false, flip: false },
|
||||
face: {
|
||||
enabled: true,
|
||||
detector: { rotation: false, maxDetected: 10, minConfidence: 0.3 },
|
||||
mesh: { enabled: true },
|
||||
attention: { enabled: false },
|
||||
iris: { enabled: false },
|
||||
description: { enabled: false },
|
||||
emotion: { enabled: false },
|
||||
antispoof: { enabled: false },
|
||||
liveness: { enabled: false },
|
||||
},
|
||||
body: { enabled: false, maxDetected: 6, modelPath: 'movenet-multipose.json' },
|
||||
hand: { enabled: false },
|
||||
object: { enabled: false, maxDetected: 10 },
|
||||
segmentation: { enabled: false },
|
||||
gesture: { enabled: false },
|
||||
};
|
||||
|
||||
interface TrackerConfig {
|
||||
unMatchedFramesTolerance: number, // number of frame when an object is not matched before considering it gone; ignored if fastDelete is set
|
||||
iouLimit: number, // exclude things from beeing matched if their IOU less than; 1 means total overlap; 0 means no overlap
|
||||
fastDelete: boolean, // remove new objects immediately if they could not be matched in the next frames; if set, ignores unMatchedFramesTolerance
|
||||
distanceLimit: number, // distance limit for matching; if values need to be excluded from matching set their distance to something greater than the distance limit
|
||||
matchingAlgorithm: 'kdTree' | 'munkres', // algorithm used to match tracks with new detections
|
||||
}
|
||||
|
||||
interface TrackerResult {
|
||||
id: number,
|
||||
confidence: number,
|
||||
bearing: number,
|
||||
isZombie: boolean,
|
||||
name: string,
|
||||
x: number,
|
||||
y: number,
|
||||
w: number,
|
||||
h: number,
|
||||
}
|
||||
|
||||
const trackerConfig: TrackerConfig = {
|
||||
unMatchedFramesTolerance: 100,
|
||||
iouLimit: 0.05,
|
||||
fastDelete: false,
|
||||
distanceLimit: 1e4,
|
||||
matchingAlgorithm: 'kdTree',
|
||||
};
|
||||
|
||||
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
|
||||
|
||||
const dom = { // grab instances of dom objects so we dont have to look them up later
|
||||
video: document.getElementById('video') as HTMLVideoElement,
|
||||
canvas: document.getElementById('canvas') as HTMLCanvasElement,
|
||||
log: document.getElementById('log') as HTMLPreElement,
|
||||
fps: document.getElementById('status') as HTMLPreElement,
|
||||
tracker: document.getElementById('tracker') as HTMLInputElement,
|
||||
interpolation: document.getElementById('interpolation') as HTMLInputElement,
|
||||
config: document.getElementById('config') as HTMLFormElement,
|
||||
ctx: (document.getElementById('canvas') as HTMLCanvasElement).getContext('2d') as CanvasRenderingContext2D,
|
||||
};
|
||||
const timestamp = { detect: 0, draw: 0, tensors: 0, start: 0 }; // holds information used to calculate performance and possible memory leaks
|
||||
const fps = { detectFPS: 0, drawFPS: 0, frames: 0, averageMs: 0 }; // holds calculated fps information for both detect and screen refresh
|
||||
|
||||
const log = (...msg) => { // helper method to output messages
|
||||
dom.log.innerText += msg.join(' ') + '\n';
|
||||
console.log(...msg); // eslint-disable-line no-console
|
||||
};
|
||||
const status = (msg) => dom.fps.innerText = msg; // print status element
|
||||
|
||||
async function detectionLoop() { // main detection loop
|
||||
if (!dom.video.paused && dom.video.readyState >= 2) {
|
||||
if (timestamp.start === 0) timestamp.start = human.now();
|
||||
// log('profiling data:', await human.profile(dom.video));
|
||||
await human.detect(dom.video, humanConfig); // actual detection; were not capturing output in a local variable as it can also be reached via human.result
|
||||
const tensors = human.tf.memory().numTensors; // check current tensor usage for memory leaks
|
||||
if (tensors - timestamp.tensors !== 0) log('allocated tensors:', tensors - timestamp.tensors); // printed on start and each time there is a tensor leak
|
||||
timestamp.tensors = tensors;
|
||||
fps.detectFPS = Math.round(1000 * 1000 / (human.now() - timestamp.detect)) / 1000;
|
||||
fps.frames++;
|
||||
fps.averageMs = Math.round(1000 * (human.now() - timestamp.start) / fps.frames) / 1000;
|
||||
}
|
||||
timestamp.detect = human.now();
|
||||
requestAnimationFrame(detectionLoop); // start new frame immediately
|
||||
}
|
||||
|
||||
function drawLoop() { // main screen refresh loop
|
||||
if (!dom.video.paused && dom.video.readyState >= 2) {
|
||||
const res: H.Result = dom.interpolation.checked ? human.next(human.result) : human.result; // interpolate results if enabled
|
||||
let tracking: H.FaceResult[] | H.BodyResult[] | H.ObjectResult[] = [];
|
||||
if (human.config.face.enabled) tracking = res.face;
|
||||
else if (human.config.body.enabled) tracking = res.body;
|
||||
else if (human.config.object.enabled) tracking = res.object;
|
||||
else log('unknown object type');
|
||||
let data: TrackerResult[] = [];
|
||||
if (dom.tracker.checked) {
|
||||
const items = tracking.map((obj) => ({
|
||||
x: obj.box[0] + obj.box[2] / 2,
|
||||
y: obj.box[1] + obj.box[3] / 2,
|
||||
w: obj.box[2],
|
||||
h: obj.box[3],
|
||||
name: obj.label || (human.config.face.enabled ? 'face' : 'body'),
|
||||
confidence: obj.score,
|
||||
}));
|
||||
tracker.updateTrackedItemsWithNewFrame(items, fps.frames);
|
||||
data = tracker.getJSONOfTrackedItems(true) as TrackerResult[];
|
||||
}
|
||||
human.draw.canvas(dom.video, dom.canvas); // copy input video frame to output canvas
|
||||
for (let i = 0; i < tracking.length; i++) {
|
||||
// @ts-ignore
|
||||
const name = tracking[i].label || (human.config.face.enabled ? 'face' : 'body');
|
||||
dom.ctx.strokeRect(tracking[i].box[0], tracking[i].box[1], tracking[i].box[1], tracking[i].box[2]);
|
||||
dom.ctx.fillText(`id: ${tracking[i].id} ${Math.round(100 * tracking[i].score)}% ${name}`, tracking[i].box[0] + 4, tracking[i].box[1] + 16);
|
||||
if (data[i]) {
|
||||
dom.ctx.fillText(`t: ${data[i].id} ${Math.round(100 * data[i].confidence)}% ${data[i].name} ${data[i].isZombie ? 'zombie' : ''}`, tracking[i].box[0] + 4, tracking[i].box[1] + 34);
|
||||
}
|
||||
}
|
||||
}
|
||||
const now = human.now();
|
||||
fps.drawFPS = Math.round(1000 * 1000 / (now - timestamp.draw)) / 1000;
|
||||
timestamp.draw = now;
|
||||
status(dom.video.paused ? 'paused' : `fps: ${fps.detectFPS.toFixed(1).padStart(5, ' ')} detect | ${fps.drawFPS.toFixed(1).padStart(5, ' ')} draw`); // write status
|
||||
setTimeout(drawLoop, 30); // use to slow down refresh from max refresh rate to target of 30 fps
|
||||
}
|
||||
|
||||
async function handleVideo(file: File) {
|
||||
const url = URL.createObjectURL(file);
|
||||
dom.video.src = url;
|
||||
await dom.video.play();
|
||||
log('loaded video:', file.name, 'resolution:', [dom.video.videoWidth, dom.video.videoHeight], 'duration:', dom.video.duration);
|
||||
dom.canvas.width = dom.video.videoWidth;
|
||||
dom.canvas.height = dom.video.videoHeight;
|
||||
dom.ctx.strokeStyle = 'white';
|
||||
dom.ctx.fillStyle = 'white';
|
||||
dom.ctx.font = '16px Segoe UI';
|
||||
dom.video.playbackRate = 0.25;
|
||||
}
|
||||
|
||||
function initInput() {
|
||||
document.body.addEventListener('dragenter', (evt) => evt.preventDefault());
|
||||
document.body.addEventListener('dragleave', (evt) => evt.preventDefault());
|
||||
document.body.addEventListener('dragover', (evt) => evt.preventDefault());
|
||||
document.body.addEventListener('drop', async (evt) => {
|
||||
evt.preventDefault();
|
||||
if (evt.dataTransfer) evt.dataTransfer.dropEffect = 'copy';
|
||||
const file = evt.dataTransfer?.files?.[0];
|
||||
if (file) await handleVideo(file);
|
||||
log(dom.video.readyState);
|
||||
});
|
||||
(document.getElementById('inputvideo') as HTMLInputElement).onchange = async (evt) => {
|
||||
evt.preventDefault();
|
||||
const file = evt.target?.['files']?.[0];
|
||||
if (file) await handleVideo(file);
|
||||
};
|
||||
dom.config.onchange = () => {
|
||||
trackerConfig.distanceLimit = (document.getElementById('distanceLimit') as HTMLInputElement).valueAsNumber;
|
||||
trackerConfig.iouLimit = (document.getElementById('iouLimit') as HTMLInputElement).valueAsNumber;
|
||||
trackerConfig.unMatchedFramesTolerance = (document.getElementById('unMatchedFramesTolerance') as HTMLInputElement).valueAsNumber;
|
||||
trackerConfig.unMatchedFramesTolerance = (document.getElementById('unMatchedFramesTolerance') as HTMLInputElement).valueAsNumber;
|
||||
trackerConfig.matchingAlgorithm = (document.getElementById('matchingAlgorithm-kdTree') as HTMLInputElement).checked ? 'kdTree' : 'munkres';
|
||||
tracker.setParams(trackerConfig);
|
||||
if ((document.getElementById('keepInMemory') as HTMLInputElement).checked) tracker.enableKeepInMemory();
|
||||
else tracker.disableKeepInMemory();
|
||||
tracker.reset();
|
||||
log('tracker config change', JSON.stringify(trackerConfig));
|
||||
humanConfig.face!.enabled = (document.getElementById('box-face') as HTMLInputElement).checked; // eslint-disable-line @typescript-eslint/no-non-null-assertion
|
||||
humanConfig.body!.enabled = (document.getElementById('box-body') as HTMLInputElement).checked; // eslint-disable-line @typescript-eslint/no-non-null-assertion
|
||||
humanConfig.object!.enabled = (document.getElementById('box-object') as HTMLInputElement).checked; // eslint-disable-line @typescript-eslint/no-non-null-assertion
|
||||
};
|
||||
dom.tracker.onchange = (evt) => {
|
||||
log('tracker', (evt.target as HTMLInputElement).checked ? 'enabled' : 'disabled');
|
||||
tracker.setParams(trackerConfig);
|
||||
tracker.reset();
|
||||
};
|
||||
}
|
||||
|
||||
async function main() { // main entry point
|
||||
log('human version:', human.version, '| tfjs version:', human.tf.version['tfjs-core']);
|
||||
log('platform:', human.env.platform, '| agent:', human.env.agent);
|
||||
status('loading...');
|
||||
await human.load(); // preload all models
|
||||
log('backend:', human.tf.getBackend(), '| available:', human.env.backends);
|
||||
log('models loaded:', human.models.loaded());
|
||||
status('initializing...');
|
||||
await human.warmup(); // warmup function to initialize backend for future faster detection
|
||||
initInput(); // initialize input
|
||||
await detectionLoop(); // start detection loop
|
||||
drawLoop(); // start draw loop
|
||||
}
|
||||
|
||||
window.onload = main;
|
File diff suppressed because it is too large
Load Diff
|
@ -4,6 +4,6 @@
|
|||
author: <https://github.com/vladmandic>'
|
||||
*/
|
||||
|
||||
import*as m from"../../dist/human.esm.js";var v=1920,b={modelBasePath:"../../models",filter:{enabled:!0,equalization:!1,flip:!1},face:{enabled:!0,detector:{rotation:!0},mesh:{enabled:!0},attention:{enabled:!1},iris:{enabled:!0},description:{enabled:!0},emotion:{enabled:!0},antispoof:{enabled:!0},liveness:{enabled:!0}},body:{enabled:!0},hand:{enabled:!1},object:{enabled:!1},segmentation:{enabled:!1},gesture:{enabled:!0}},e=new m.Human(b);e.env.perfadd=!1;e.draw.options.font='small-caps 18px "Lato"';e.draw.options.lineHeight=20;var a={video:document.getElementById("video"),canvas:document.getElementById("canvas"),log:document.getElementById("log"),fps:document.getElementById("status"),perf:document.getElementById("performance")},n={detect:0,draw:0,tensors:0,start:0},s={detectFPS:0,drawFPS:0,frames:0,averageMs:0},o=(...t)=>{a.log.innerText+=t.join(" ")+`
|
||||
`,console.log(...t)},r=t=>a.fps.innerText=t,g=t=>a.perf.innerText="tensors:"+e.tf.memory().numTensors.toString()+" | performance: "+JSON.stringify(t).replace(/"|{|}/g,"").replace(/,/g," | ");async function f(){if(!a.video.paused){n.start===0&&(n.start=e.now()),await e.detect(a.video);let t=e.tf.memory().numTensors;t-n.tensors!==0&&o("allocated tensors:",t-n.tensors),n.tensors=t,s.detectFPS=Math.round(1e3*1e3/(e.now()-n.detect))/1e3,s.frames++,s.averageMs=Math.round(1e3*(e.now()-n.start)/s.frames)/1e3,s.frames%100===0&&!a.video.paused&&o("performance",{...s,tensors:n.tensors})}n.detect=e.now(),requestAnimationFrame(f)}async function p(){var d,i,c;if(!a.video.paused){let l=e.next(e.result),u=await e.image(a.video);e.draw.canvas(u.canvas,a.canvas);let w={bodyLabels:`person confidence [score] and ${(c=(i=(d=e.result)==null?void 0:d.body)==null?void 0:i[0])==null?void 0:c.keypoints.length} keypoints`};await e.draw.all(a.canvas,l,w),g(l.performance)}let t=e.now();s.drawFPS=Math.round(1e3*1e3/(t-n.draw))/1e3,n.draw=t,r(a.video.paused?"paused":`fps: ${s.detectFPS.toFixed(1).padStart(5," ")} detect | ${s.drawFPS.toFixed(1).padStart(5," ")} draw`),setTimeout(p,30)}async function y(){let d=(await e.webcam.enumerate())[0].deviceId;await e.webcam.start({element:a.video,crop:!1,width:v,id:d}),a.canvas.width=e.webcam.width,a.canvas.height=e.webcam.height,a.canvas.onclick=async()=>{e.webcam.paused?await e.webcam.play():e.webcam.pause()}}async function h(){o("human version:",e.version,"| tfjs version:",e.tf.version["tfjs-core"]),o("platform:",e.env.platform,"| agent:",e.env.agent),r("loading..."),await e.load(),o("backend:",e.tf.getBackend(),"| available:",e.env.backends),o("models stats:",e.models.stats()),o("models loaded:",e.models.loaded()),o("environment",e.env),r("initializing..."),await e.warmup(),await y(),await f(),await p()}window.onload=h;
|
||||
import*as m from"../../dist/human.esm.js";var v=1920,b={debug:!0,backend:"webgl",modelBasePath:"https://vladmandic.github.io/human-models/models/",filter:{enabled:!0,equalization:!1,flip:!1},face:{enabled:!0,detector:{rotation:!1},mesh:{enabled:!0},attention:{enabled:!1},iris:{enabled:!0},description:{enabled:!0},emotion:{enabled:!0},antispoof:{enabled:!0},liveness:{enabled:!0}},body:{enabled:!1},hand:{enabled:!1},object:{enabled:!1},segmentation:{enabled:!1},gesture:{enabled:!0}},e=new m.Human(b);e.env.perfadd=!1;e.draw.options.font='small-caps 18px "Lato"';e.draw.options.lineHeight=20;e.draw.options.drawPoints=!0;var a={video:document.getElementById("video"),canvas:document.getElementById("canvas"),log:document.getElementById("log"),fps:document.getElementById("status"),perf:document.getElementById("performance")},n={detect:0,draw:0,tensors:0,start:0},s={detectFPS:0,drawFPS:0,frames:0,averageMs:0},o=(...t)=>{a.log.innerText+=t.join(" ")+`
|
||||
`,console.log(...t)},i=t=>a.fps.innerText=t,g=t=>a.perf.innerText="tensors:"+e.tf.memory().numTensors.toString()+" | performance: "+JSON.stringify(t).replace(/"|{|}/g,"").replace(/,/g," | ");async function f(){if(!a.video.paused){n.start===0&&(n.start=e.now()),await e.detect(a.video);let t=e.tf.memory().numTensors;t-n.tensors!==0&&o("allocated tensors:",t-n.tensors),n.tensors=t,s.detectFPS=Math.round(1e3*1e3/(e.now()-n.detect))/1e3,s.frames++,s.averageMs=Math.round(1e3*(e.now()-n.start)/s.frames)/1e3,s.frames%100===0&&!a.video.paused&&o("performance",{...s,tensors:n.tensors})}n.detect=e.now(),requestAnimationFrame(f)}async function u(){var d,r,c;if(!a.video.paused){let l=e.next(e.result),w=await e.image(a.video);e.draw.canvas(w.canvas,a.canvas);let p={bodyLabels:`person confidence [score] and ${(c=(r=(d=e.result)==null?void 0:d.body)==null?void 0:r[0])==null?void 0:c.keypoints.length} keypoints`};await e.draw.all(a.canvas,l,p),g(l.performance)}let t=e.now();s.drawFPS=Math.round(1e3*1e3/(t-n.draw))/1e3,n.draw=t,i(a.video.paused?"paused":`fps: ${s.detectFPS.toFixed(1).padStart(5," ")} detect | ${s.drawFPS.toFixed(1).padStart(5," ")} draw`),setTimeout(u,30)}async function h(){let d=(await e.webcam.enumerate())[0].deviceId,r=await e.webcam.start({element:a.video,crop:!1,width:v,id:d});o(r),a.canvas.width=e.webcam.width,a.canvas.height=e.webcam.height,a.canvas.onclick=async()=>{e.webcam.paused?await e.webcam.play():e.webcam.pause()}}async function y(){o("human version:",e.version,"| tfjs version:",e.tf.version["tfjs-core"]),o("platform:",e.env.platform,"| agent:",e.env.agent),i("loading..."),await e.load(),o("backend:",e.tf.getBackend(),"| available:",e.env.backends),o("models stats:",e.models.stats()),o("models loaded:",e.models.loaded()),o("environment",e.env),i("initializing..."),await e.warmup(),await h(),await f(),await u()}window.onload=y;
|
||||
//# sourceMappingURL=index.js.map
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -12,12 +12,16 @@ import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
|
|||
const width = 1920; // used by webcam config as well as human maximum resultion // can be anything, but resolutions higher than 4k will disable internal optimizations
|
||||
|
||||
const humanConfig: Partial<H.Config> = { // user configuration for human, used to fine-tune behavior
|
||||
// backend: 'webgpu',
|
||||
modelBasePath: '../../models',
|
||||
debug: true,
|
||||
backend: 'webgl',
|
||||
// cacheSensitivity: 0,
|
||||
// cacheModels: false,
|
||||
// warmup: 'none',
|
||||
// modelBasePath: '../../models',
|
||||
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
|
||||
filter: { enabled: true, equalization: false, flip: false },
|
||||
face: { enabled: true, detector: { rotation: true }, mesh: { enabled: true }, attention: { enabled: false }, iris: { enabled: true }, description: { enabled: true }, emotion: { enabled: true }, antispoof: { enabled: true }, liveness: { enabled: true } },
|
||||
body: { enabled: true },
|
||||
// hand: { enabled: true },
|
||||
face: { enabled: true, detector: { rotation: false }, mesh: { enabled: true }, attention: { enabled: false }, iris: { enabled: true }, description: { enabled: true }, emotion: { enabled: true }, antispoof: { enabled: true }, liveness: { enabled: true } },
|
||||
body: { enabled: false },
|
||||
hand: { enabled: false },
|
||||
object: { enabled: false },
|
||||
segmentation: { enabled: false },
|
||||
|
@ -29,6 +33,7 @@ const human = new H.Human(humanConfig); // create instance of human with overrid
|
|||
human.env.perfadd = false; // is performance data showing instant or total values
|
||||
human.draw.options.font = 'small-caps 18px "Lato"'; // set font used to draw labels when using draw methods
|
||||
human.draw.options.lineHeight = 20;
|
||||
human.draw.options.drawPoints = true; // draw points on face mesh
|
||||
// human.draw.options.fillPolygons = true;
|
||||
|
||||
const dom = { // grab instances of dom objects so we dont have to look them up later
|
||||
|
@ -85,7 +90,8 @@ async function drawLoop() { // main screen refresh loop
|
|||
async function webCam() {
|
||||
const devices = await human.webcam.enumerate();
|
||||
const id = devices[0].deviceId; // use first available video source
|
||||
await human.webcam.start({ element: dom.video, crop: false, width, id }); // use human webcam helper methods and associate webcam stream with a dom element
|
||||
const webcamStatus = await human.webcam.start({ element: dom.video, crop: false, width, id }); // use human webcam helper methods and associate webcam stream with a dom element
|
||||
log(webcamStatus);
|
||||
dom.canvas.width = human.webcam.width;
|
||||
dom.canvas.height = human.webcam.height;
|
||||
dom.canvas.onclick = async () => { // pause when clicked on screen and resume on next click
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -4,4 +4,4 @@
|
|||
author: <https://github.com/vladmandic>'
|
||||
*/
|
||||
|
||||
var e="4.1.0";var s="4.1.0";var t="4.1.0";var n="4.1.0";var r="4.1.0";var i="0.0.1-alpha.16";var h={tfjs:e,"tfjs-core":e,"tfjs-converter":s,"tfjs-backend-cpu":t,"tfjs-backend-webgl":n,"tfjs-backend-wasm":r,"tfjs-backend-webgpu":i};export{h as version};
|
||||
var e="4.22.0";var s="4.22.0";var t="4.22.0";var n="4.22.0";var r="4.22.0";var i="4.22.0";var h={tfjs:e,"tfjs-core":e,"tfjs-converter":s,"tfjs-backend-cpu":t,"tfjs-backend-webgl":n,"tfjs-backend-wasm":r,"tfjs-backend-webgpu":i};export{h as version};
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
"liveness": 592976,
|
||||
"models": 0,
|
||||
"movenet-lightning": 4650216,
|
||||
"affectnet-mobilenet": 6920630,
|
||||
"age": 161240,
|
||||
"blazeface-back": 538928,
|
||||
"blazeface-front": 402048,
|
||||
|
@ -25,6 +26,8 @@
|
|||
"facemesh-detection-full": 1026192,
|
||||
"facemesh-detection-short": 201268,
|
||||
"faceres-deep": 13957620,
|
||||
"gear-e1": 112438,
|
||||
"gear-e2": 112438,
|
||||
"gear": 1498916,
|
||||
"gender-ssrnet-imdb": 161236,
|
||||
"gender": 201808,
|
||||
|
|
81
package.json
81
package.json
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "@vladmandic/human",
|
||||
"version": "3.0.1",
|
||||
"version": "3.3.5",
|
||||
"description": "Human: AI-powered 3D Face Detection & Rotation Tracking, Face Description & Recognition, Body Pose Tracking, 3D Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction, Gesture Recognition",
|
||||
"sideEffects": false,
|
||||
"main": "dist/human.node.js",
|
||||
|
@ -8,22 +8,24 @@
|
|||
"browser": "dist/human.esm.js",
|
||||
"types": "types/human.d.ts",
|
||||
"exports": {
|
||||
"node": {
|
||||
"require": "./dist/human.node.js",
|
||||
"import": "./dist/human.node.js",
|
||||
"module": "./dist/human.node.js"
|
||||
},
|
||||
"require": "./dist/human.node.js",
|
||||
"import": "./dist/human.esm.js",
|
||||
"node": "./dist/human.node.js",
|
||||
"script": "./dist/human.js",
|
||||
"module": "./dist/human.esm.js",
|
||||
"types": "./types/human.d.ts",
|
||||
"dist/human": "./dist/human.js",
|
||||
"dist/human.js": "./dist/human.js",
|
||||
"dist/human.esm": "./dist/human.esm.js",
|
||||
"dist/human.esm.js": "./dist/human.esm.js",
|
||||
"dist/human.esm-nobundle": "./dist/human.esm-nobundle.js",
|
||||
"dist/human.esm-nobundle.js": "./dist/human.esm-nobundle.js",
|
||||
"dist/human.node": "./dist/human.node.js",
|
||||
"dist/human.node.js": "./dist/human.node.js",
|
||||
"dist/human.node-wasm": "./dist/human.node-wasm.js",
|
||||
"dist/human.node-wasm.js": "./dist/human.node-wasm.js",
|
||||
"dist/human.node-gpu.js": "./dist/human.node-gpu.js"
|
||||
"dist/human.node-gpu": "./dist/human.node-gpu.js",
|
||||
"dist/human.node-gpu.js": "./dist/human.node-gpu.js",
|
||||
"require": "./dist/human.node.js",
|
||||
"import": "./dist/human.esm.js"
|
||||
},
|
||||
"author": "Vladimir Mandic <mandic00@live.com>",
|
||||
"bugs": {
|
||||
|
@ -72,37 +74,40 @@
|
|||
"tensorflow"
|
||||
],
|
||||
"devDependencies": {
|
||||
"@html-eslint/eslint-plugin": "^0.15.0",
|
||||
"@html-eslint/parser": "^0.15.0",
|
||||
"@microsoft/api-extractor": "^7.33.6",
|
||||
"@tensorflow/tfjs-backend-cpu": "^4.1.0",
|
||||
"@tensorflow/tfjs-backend-wasm": "^4.1.0",
|
||||
"@tensorflow/tfjs-backend-webgl": "^4.1.0",
|
||||
"@tensorflow/tfjs-backend-webgpu": "0.0.1-alpha.16",
|
||||
"@tensorflow/tfjs-converter": "^4.1.0",
|
||||
"@tensorflow/tfjs-core": "^4.1.0",
|
||||
"@tensorflow/tfjs-node": "^4.1.0",
|
||||
"@tensorflow/tfjs-node-gpu": "^4.1.0",
|
||||
"@types/node": "^18.11.9",
|
||||
"@types/offscreencanvas": "^2019.7.0",
|
||||
"@typescript-eslint/eslint-plugin": "^5.44.0",
|
||||
"@typescript-eslint/parser": "^5.44.0",
|
||||
"@vladmandic/build": "^0.7.14",
|
||||
"@vladmandic/pilogger": "^0.4.6",
|
||||
"@html-eslint/eslint-plugin": "^0.26.0",
|
||||
"@html-eslint/parser": "^0.26.0",
|
||||
"@microsoft/api-extractor": "^7.49.2",
|
||||
"@tensorflow/tfjs-backend-cpu": "^4.22.0",
|
||||
"@tensorflow/tfjs-backend-wasm": "^4.22.0",
|
||||
"@tensorflow/tfjs-backend-webgl": "^4.22.0",
|
||||
"@tensorflow/tfjs-backend-webgpu": "4.22.0",
|
||||
"@tensorflow/tfjs-converter": "^4.22.0",
|
||||
"@tensorflow/tfjs-core": "^4.22.0",
|
||||
"@tensorflow/tfjs-data": "^4.22.0",
|
||||
"@tensorflow/tfjs-layers": "^4.22.0",
|
||||
"@tensorflow/tfjs-node": "^4.22.0",
|
||||
"@tensorflow/tfjs-node-gpu": "^4.22.0",
|
||||
"@types/emscripten": "^1.40.0",
|
||||
"@types/node": "^22.13.1",
|
||||
"@types/offscreencanvas": "^2019.7.3",
|
||||
"@typescript-eslint/eslint-plugin": "^8.8.1",
|
||||
"@typescript-eslint/parser": "^8.8.1",
|
||||
"@vladmandic/build": "^0.10.2",
|
||||
"@vladmandic/pilogger": "^0.5.1",
|
||||
"@vladmandic/tfjs": "github:vladmandic/tfjs",
|
||||
"canvas": "^2.10.2",
|
||||
"esbuild": "^0.15.15",
|
||||
"eslint": "8.28.0",
|
||||
"canvas": "^3.1.0",
|
||||
"esbuild": "^0.24.2",
|
||||
"eslint": "8.57.0",
|
||||
"eslint-config-airbnb-base": "^15.0.0",
|
||||
"eslint-plugin-html": "^7.1.0",
|
||||
"eslint-plugin-import": "^2.26.0",
|
||||
"eslint-plugin-json": "^3.1.0",
|
||||
"eslint-plugin-markdown": "^3.0.0",
|
||||
"eslint-plugin-html": "^8.1.2",
|
||||
"eslint-plugin-import": "^2.31.0",
|
||||
"eslint-plugin-json": "^4.0.1",
|
||||
"eslint-plugin-markdown": "^5.1.0",
|
||||
"eslint-plugin-node": "^11.1.0",
|
||||
"eslint-plugin-promise": "^6.1.1",
|
||||
"rimraf": "^3.0.2",
|
||||
"tslib": "^2.4.1",
|
||||
"typedoc": "0.23.21",
|
||||
"typescript": "4.9.3"
|
||||
"eslint-plugin-promise": "^7.1.0",
|
||||
"rimraf": "^6.0.1",
|
||||
"tslib": "^2.8.1",
|
||||
"typedoc": "0.27.6",
|
||||
"typescript": "5.7.3"
|
||||
}
|
||||
}
|
||||
|
|
Binary file not shown.
Before Width: | Height: | Size: 178 KiB After Width: | Height: | Size: 164 KiB |
Binary file not shown.
Before Width: | Height: | Size: 145 KiB After Width: | Height: | Size: 150 KiB |
Binary file not shown.
Before Width: | Height: | Size: 59 KiB After Width: | Height: | Size: 59 KiB |
|
@ -36,6 +36,7 @@ export async function loadPose(config: Config): Promise<GraphModel> {
|
|||
if (!model) {
|
||||
model = await loadModel(config.body.modelPath);
|
||||
const inputs = model?.['executor'] ? Object.values(model.modelSignature['inputs']) : undefined;
|
||||
// @ts-ignore model signature properties are not typed and inputs are unreliable for this model
|
||||
inputSize = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[1].size) : 0;
|
||||
} else if (config.debug) log('cached model:', model['modelUrl']);
|
||||
return model;
|
||||
|
|
|
@ -44,6 +44,7 @@ export async function loadDetector(config: Config): Promise<GraphModel> {
|
|||
if (!model && config.body['detector'] && config.body['detector'].modelPath || '') {
|
||||
model = await loadModel(config.body['detector'].modelPath);
|
||||
const inputs = model?.['executor'] ? Object.values(model.modelSignature['inputs']) : undefined;
|
||||
// @ts-ignore model signature properties are not typed and inputs are unreliable for this model
|
||||
inputSize = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[1].size) : 0;
|
||||
} else if (config.debug && model) log('cached model:', model['modelUrl']);
|
||||
createAnchors();
|
||||
|
|
|
@ -39,6 +39,8 @@ export async function load(config: Config): Promise<GraphModel> {
|
|||
} else if (config.debug) log('cached model:', model['modelUrl']);
|
||||
inputSize = (model?.['executor'] && model?.inputs?.[0].shape) ? model.inputs[0].shape[2] : 0;
|
||||
if (inputSize < 64) inputSize = 256;
|
||||
// @ts-ignore private property
|
||||
if (tf.env().flagRegistry.WEBGL_USE_SHAPES_UNIFORMS) tf.env().set('WEBGL_USE_SHAPES_UNIFORMS', false); // default=false <https://github.com/tensorflow/tfjs/issues/5205>
|
||||
return model;
|
||||
}
|
||||
|
||||
|
@ -84,8 +86,8 @@ function parseMultiPose(res, config, image) {
|
|||
const bodies: BodyResult[] = [];
|
||||
for (let id = 0; id < res[0].length; id++) {
|
||||
const kpt = res[0][id];
|
||||
const totalScore = Math.round(100 * kpt[51 + 4]) / 100;
|
||||
if (totalScore > config.body.minConfidence) {
|
||||
const boxScore = Math.round(100 * kpt[51 + 4]) / 100;
|
||||
if (boxScore > config.body.minConfidence) {
|
||||
const keypoints: BodyKeypoint[] = [];
|
||||
for (let i = 0; i < 17; i++) {
|
||||
const score = kpt[3 * i + 2];
|
||||
|
@ -99,10 +101,10 @@ function parseMultiPose(res, config, image) {
|
|||
});
|
||||
}
|
||||
}
|
||||
const newBox = box.calc(keypoints.map((pt) => pt.position), [image.shape[2], image.shape[1]]);
|
||||
// const newBox = box.calc(keypoints.map((pt) => pt.position), [image.shape[2], image.shape[1]]);
|
||||
// movenet-multipose has built-in box details
|
||||
// const boxRaw: Box = [kpt[51 + 1], kpt[51 + 0], kpt[51 + 3] - kpt[51 + 1], kpt[51 + 2] - kpt[51 + 0]];
|
||||
// const box: Box = [Math.trunc(boxRaw[0] * (image.shape[2] || 0)), Math.trunc(boxRaw[1] * (image.shape[1] || 0)), Math.trunc(boxRaw[2] * (image.shape[2] || 0)), Math.trunc(boxRaw[3] * (image.shape[1] || 0))];
|
||||
const boxRaw: Box = [kpt[51 + 1], kpt[51 + 0], kpt[51 + 3] - kpt[51 + 1], kpt[51 + 2] - kpt[51 + 0]];
|
||||
const boxNorm: Box = [Math.trunc(boxRaw[0] * (image.shape[2] || 0)), Math.trunc(boxRaw[1] * (image.shape[1] || 0)), Math.trunc(boxRaw[2] * (image.shape[2] || 0)), Math.trunc(boxRaw[3] * (image.shape[1] || 0))];
|
||||
const annotations: Record<BodyAnnotation, Point[][]> = {} as Record<BodyAnnotation, Point[][]>;
|
||||
for (const [name, indexes] of Object.entries(coords.connected)) {
|
||||
const pt: Point[][] = [];
|
||||
|
@ -113,7 +115,8 @@ function parseMultiPose(res, config, image) {
|
|||
}
|
||||
annotations[name] = pt;
|
||||
}
|
||||
const body: BodyResult = { id, score: totalScore, box: newBox.box, boxRaw: newBox.boxRaw, keypoints: [...keypoints], annotations };
|
||||
// const body: BodyResult = { id, score: totalScore, box: newBox.box, boxRaw: newBox.boxRaw, keypoints: [...keypoints], annotations };
|
||||
const body: BodyResult = { id, score: boxScore, box: boxNorm, boxRaw, keypoints: [...keypoints], annotations };
|
||||
fix.bodyParts(body);
|
||||
bodies.push(body);
|
||||
}
|
||||
|
@ -135,39 +138,6 @@ export async function predict(input: Tensor, config: Config): Promise<BodyResult
|
|||
return new Promise(async (resolve) => {
|
||||
const t: Record<string, Tensor> = {};
|
||||
skipped = 0;
|
||||
// run detection on squared input and cached boxes
|
||||
/*
|
||||
cache.bodies = []; // reset bodies result
|
||||
if (cache.boxes.length >= (config.body.maxDetected || 0)) { // if we have enough cached boxes run detection using cache
|
||||
for (let i = 0; i < cache.boxes.length; i++) { // run detection based on cached boxes
|
||||
t.crop = tf.image.cropAndResize(input, [cache.boxes[i]], [0], [inputSize, inputSize], 'bilinear');
|
||||
t.cast = tf.cast(t.crop, 'int32');
|
||||
// t.input = prepareImage(input);
|
||||
t.res = model?.execute(t.cast) as Tensor;
|
||||
const res = await t.res.array();
|
||||
const newBodies = (t.res.shape[2] === 17) ? await parseSinglePose(res, config, input, cache.boxes[i]) : await parseMultiPose(res, config, input, cache.boxes[i]);
|
||||
cache.bodies = cache.bodies.concat(newBodies);
|
||||
Object.keys(t).forEach((tensor) => tf.dispose(t[tensor]));
|
||||
}
|
||||
}
|
||||
if (cache.bodies.length !== config.body.maxDetected) { // did not find enough bodies based on cached boxes so run detection on full frame
|
||||
t.input = prepareImage(input);
|
||||
t.res = model?.execute(t.input) as Tensor;
|
||||
const res = await t.res.array();
|
||||
cache.bodies = (t.res.shape[2] === 17) ? await parseSinglePose(res, config, input, [0, 0, 1, 1]) : await parseMultiPose(res, config, input, [0, 0, 1, 1]);
|
||||
for (const body of cache.bodies) rescaleBody(body, [input.shape[2] || 1, input.shape[1] || 1]);
|
||||
Object.keys(t).forEach((tensor) => tf.dispose(t[tensor]));
|
||||
}
|
||||
cache.boxes.length = 0; // reset cache
|
||||
for (let i = 0; i < cache.bodies.length; i++) {
|
||||
if (cache.bodies[i].keypoints.length > (coords.kpt.length / 2)) { // only update cache if we detected at least half keypoints
|
||||
const scaledBox = box.scale(cache.bodies[i].boxRaw, boxExpandFact);
|
||||
const cropBox = box.crop(scaledBox);
|
||||
cache.boxes.push(cropBox);
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
// run detection on squared input and no cached boxes
|
||||
t.input = fix.padInput(input, inputSize);
|
||||
t.res = model?.execute(t.input) as Tensor;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/* eslint-disable no-multi-spaces */
|
||||
|
||||
/** Possible TensorFlow backends */
|
||||
export type BackendEnum = '' | 'cpu' | 'wasm' | 'webgl' | 'humangl' | 'tensorflow' | 'webgpu';
|
||||
export type BackendEnum = '' | 'cpu' | 'wasm' | 'webgl' | 'humangl' | 'tensorflow' | 'webgpu' | 'none';
|
||||
|
||||
/** Possible values for `human.warmup` */
|
||||
export type WarmupEnum = '' | 'none' | 'face' | 'full' | 'body';
|
||||
|
@ -33,8 +33,14 @@ export interface FaceDetectorConfig extends GenericConfig {
|
|||
maxDetected: number,
|
||||
/** minimum confidence for a detected face before results are discarded */
|
||||
minConfidence: number,
|
||||
/** minimum size in pixels of a detected face box before resutls are discared */
|
||||
minSize: number,
|
||||
/** minimum overlap between two detected faces before one is discarded */
|
||||
iouThreshold: number,
|
||||
/** how much should face box be enlarged over the min/max facial coordinates */
|
||||
scale: number,
|
||||
/** automatically pad image to square */
|
||||
square: boolean,
|
||||
/** should child models perform on masked image of a face */
|
||||
mask: boolean,
|
||||
/** should face detection return processed and cropped face tensor that can with an external model for addtional processing?
|
||||
|
@ -49,7 +55,10 @@ export interface FaceMeshConfig extends GenericConfig {
|
|||
}
|
||||
|
||||
/** Iris part of face configuration */
|
||||
export interface FaceIrisConfig extends GenericConfig {}
|
||||
export interface FaceIrisConfig extends GenericConfig {
|
||||
/** how much should iris box be enlarged over the min/max iris coordinates */
|
||||
scale: number,
|
||||
}
|
||||
|
||||
/** Attention part of face configuration */
|
||||
export interface FaceAttentionConfig extends GenericConfig {}
|
||||
|
@ -374,12 +383,14 @@ const config: Config = {
|
|||
enabled: true,
|
||||
detector: {
|
||||
modelPath: 'blazeface.json',
|
||||
rotation: true,
|
||||
rotation: false,
|
||||
maxDetected: 1,
|
||||
skipFrames: 99,
|
||||
skipTime: 2500,
|
||||
minConfidence: 0.2,
|
||||
minSize: 0,
|
||||
iouThreshold: 0.1,
|
||||
scale: 1.4,
|
||||
mask: false,
|
||||
return: false,
|
||||
},
|
||||
|
@ -394,6 +405,7 @@ const config: Config = {
|
|||
},
|
||||
iris: {
|
||||
enabled: true,
|
||||
scale: 2.3,
|
||||
modelPath: 'iris.json',
|
||||
},
|
||||
emotion: {
|
||||
|
|
|
@ -20,6 +20,7 @@ export function body(inCanvas: AnyCanvas, result: BodyResult[], drawOptions?: Pa
|
|||
rect(ctx, result[i].box[0], result[i].box[1], result[i].box[2], result[i].box[3], localOptions);
|
||||
if (localOptions.drawLabels && (localOptions.bodyLabels?.length > 0)) {
|
||||
let l = localOptions.bodyLabels.slice();
|
||||
l = replace(l, '[id]', result[i].id.toFixed(0));
|
||||
l = replace(l, '[score]', 100 * result[i].score);
|
||||
labels(ctx, l, result[i].box[0], result[i].box[1], localOptions);
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
* Module that implements helper draw functions, exposed as human.draw
|
||||
*/
|
||||
|
||||
import * as tf from 'dist/tfjs.esm.js';
|
||||
import { mergeDeep, now } from '../util/util';
|
||||
import { env } from '../util/env';
|
||||
import { getCanvasContext, rect } from './primitives';
|
||||
|
@ -14,6 +15,7 @@ import { gesture } from './gesture';
|
|||
import { defaultLabels } from './labels';
|
||||
import type { Result, PersonResult } from '../result';
|
||||
import type { AnyCanvas, DrawOptions } from '../exports';
|
||||
import type { Tensor2D } from '../tfjs/types';
|
||||
|
||||
let drawTime = 0;
|
||||
|
||||
|
@ -60,6 +62,22 @@ export function canvas(input: AnyCanvas | HTMLImageElement | HTMLVideoElement, o
|
|||
ctx.drawImage(input, 0, 0);
|
||||
}
|
||||
|
||||
/** draw processed canvas */
|
||||
export async function tensor(input: Tensor2D, output: HTMLCanvasElement) {
|
||||
if (!input || !output) return;
|
||||
if (!env.browser) return;
|
||||
// const backend = tf.getBackend();
|
||||
// if (backend === 'webgpu') tf.browser.draw(input, output);
|
||||
// else await tf.browser.toPixels(input, output);
|
||||
await tf.browser.toPixels(input, output);
|
||||
// const ctx = getCanvasContext(output) as CanvasRenderingContext2D;
|
||||
// if (!ctx) return;
|
||||
// const image = await process(input);
|
||||
// result.canvas = image.canvas;
|
||||
// human.tf.dispose(image.tensor);
|
||||
// ctx.drawImage(image.canvas, 0, 0);
|
||||
}
|
||||
|
||||
/** meta-function that performs draw for: canvas, face, body, hand */
|
||||
export async function all(inCanvas: AnyCanvas, result: Result, drawOptions?: Partial<DrawOptions>) {
|
||||
if (!result?.performance || !inCanvas) return null;
|
||||
|
|
|
@ -11,6 +11,7 @@ let localOptions: DrawOptions;
|
|||
function drawLabels(f: FaceResult, ctx: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D) {
|
||||
if (!localOptions.drawLabels || (localOptions.faceLabels?.length === 0)) return;
|
||||
let l = localOptions.faceLabels.slice();
|
||||
l = replace(l, '[id]', f.id.toFixed(0));
|
||||
if (f.score) l = replace(l, '[score]', 100 * f.score);
|
||||
if (f.gender) l = replace(l, '[gender]', f.gender);
|
||||
if (f.genderScore) l = replace(l, '[genderScore]', 100 * f.genderScore);
|
||||
|
@ -118,13 +119,22 @@ function drawFacePolygons(f: FaceResult, ctx: CanvasRenderingContext2D | Offscre
|
|||
}
|
||||
|
||||
function drawFacePoints(f: FaceResult, ctx: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D) {
|
||||
if (localOptions.drawPoints && f.mesh.length >= 468) {
|
||||
for (let i = 0; i < f.mesh.length; i++) {
|
||||
point(ctx, f.mesh[i][0], f.mesh[i][1], f.mesh[i][2], localOptions);
|
||||
if (localOptions.drawAttention) {
|
||||
if (facemeshConstants.LANDMARKS_REFINEMENT_LIPS_CONFIG.includes(i)) point(ctx, f.mesh[i][0], f.mesh[i][1], (f.mesh[i][2] as number) + 127, localOptions);
|
||||
if (facemeshConstants.LANDMARKS_REFINEMENT_LEFT_EYE_CONFIG.includes(i)) point(ctx, f.mesh[i][0], f.mesh[i][1], (f.mesh[i][2] as number) - 127, localOptions);
|
||||
if (facemeshConstants.LANDMARKS_REFINEMENT_RIGHT_EYE_CONFIG.includes(i)) point(ctx, f.mesh[i][0], f.mesh[i][1], (f.mesh[i][2] as number) - 127, localOptions);
|
||||
if (localOptions.drawPoints) {
|
||||
if (f?.mesh.length >= 468) {
|
||||
for (let i = 0; i < f.mesh.length; i++) {
|
||||
point(ctx, f.mesh[i][0], f.mesh[i][1], f.mesh[i][2], localOptions);
|
||||
if (localOptions.drawAttention) {
|
||||
if (facemeshConstants.LANDMARKS_REFINEMENT_LIPS_CONFIG.includes(i)) point(ctx, f.mesh[i][0], f.mesh[i][1], (f.mesh[i][2] as number) + 127, localOptions);
|
||||
if (facemeshConstants.LANDMARKS_REFINEMENT_LEFT_EYE_CONFIG.includes(i)) point(ctx, f.mesh[i][0], f.mesh[i][1], (f.mesh[i][2] as number) - 127, localOptions);
|
||||
if (facemeshConstants.LANDMARKS_REFINEMENT_RIGHT_EYE_CONFIG.includes(i)) point(ctx, f.mesh[i][0], f.mesh[i][1], (f.mesh[i][2] as number) - 127, localOptions);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (const [k, v] of Object.entries(f?.annotations || {})) {
|
||||
if (!v?.[0]) continue;
|
||||
const pt = v[0];
|
||||
point(ctx, pt[0], pt[1], 0, localOptions);
|
||||
if (localOptions.drawLabels) labels(ctx, k, pt[0], pt[1], localOptions);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@ export function hand(inCanvas: AnyCanvas, result: HandResult[], drawOptions?: Pa
|
|||
rect(ctx, h.box[0], h.box[1], h.box[2], h.box[3], localOptions);
|
||||
if (localOptions.drawLabels && (localOptions.handLabels?.length > 0)) {
|
||||
let l = localOptions.handLabels.slice();
|
||||
l = replace(l, '[id]', h.id.toFixed(0));
|
||||
l = replace(l, '[label]', h.label);
|
||||
l = replace(l, '[score]', 100 * h.score);
|
||||
labels(ctx, l, h.box[0], h.box[1], localOptions);
|
||||
|
|
|
@ -19,6 +19,7 @@ export function object(inCanvas: AnyCanvas, result: ObjectResult[], drawOptions?
|
|||
rect(ctx, h.box[0], h.box[1], h.box[2], h.box[3], localOptions);
|
||||
if (localOptions.drawLabels && (localOptions.objectLabels?.length > 0)) {
|
||||
let l = localOptions.objectLabels.slice();
|
||||
l = replace(l, '[id]', h.id.toFixed(0));
|
||||
l = replace(l, '[label]', h.label);
|
||||
l = replace(l, '[score]', 100 * h.score);
|
||||
labels(ctx, l, h.box[0], h.box[1], localOptions);
|
||||
|
|
|
@ -7,7 +7,7 @@ export const getCanvasContext = (input: AnyCanvas) => {
|
|||
if (!input) log('draw error: invalid canvas');
|
||||
else if (!input.getContext) log('draw error: canvas context not defined');
|
||||
else {
|
||||
const ctx = input.getContext('2d');
|
||||
const ctx = input.getContext('2d', { willReadFrequently: true });
|
||||
if (!ctx) log('draw error: cannot get canvas context');
|
||||
else return ctx;
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ export * from './result';
|
|||
/**
|
||||
* Explict reexport of main @tensorflow/tfjs types
|
||||
*/
|
||||
export type { Tensor, Tensor4D, GraphModel, Rank } from './tfjs/types';
|
||||
export type { Tensor, Tensor1D, Tensor2D, Tensor3D, Tensor4D, TensorLike, GraphModel, Rank } from './tfjs/types';
|
||||
|
||||
// re-export types
|
||||
export type { DrawOptions } from './draw/options';
|
||||
|
|
|
@ -60,7 +60,9 @@ export const calculateFaceAngle = (face: FaceResult, imageSize: [number, number]
|
|||
let thetaZ: number;
|
||||
if (r10 < 1) { // YZX calculation
|
||||
if (r10 > -1) {
|
||||
thetaZ = Math.asin(r10);
|
||||
// thetaZ = Math.asin(r10);
|
||||
const cosThetaZ = Math.sqrt(r00 * r00 + r20 * r20); // <https://github.com/vladmandic/human/issues/464>
|
||||
thetaZ = Math.atan2(r10, cosThetaZ);
|
||||
thetaY = Math.atan2(-r20, r00);
|
||||
thetaX = Math.atan2(-r12, r11);
|
||||
} else {
|
||||
|
@ -76,9 +78,9 @@ export const calculateFaceAngle = (face: FaceResult, imageSize: [number, number]
|
|||
if (Number.isNaN(thetaX)) thetaX = 0;
|
||||
if (Number.isNaN(thetaY)) thetaY = 0;
|
||||
if (Number.isNaN(thetaZ)) thetaZ = 0;
|
||||
return { pitch: 2 * -thetaX, yaw: 2 * -thetaY, roll: 2 * -thetaZ };
|
||||
// return { pitch: 2 * -thetaX, yaw: 2 * -thetaY, roll: 2 * -thetaZ };
|
||||
return { pitch: -thetaX, yaw: -thetaY, roll: -thetaZ };
|
||||
};
|
||||
|
||||
/*
|
||||
const meshToEulerAngle = (mesh) => { // simple Euler angle calculation based existing 3D mesh
|
||||
const radians = (a1, a2, b1, b2) => Math.atan2(b2 - a2, b1 - a1);
|
||||
|
|
|
@ -4,7 +4,7 @@ export function calculateCameraDistance(face: FaceResult, width: number): number
|
|||
// iris points are [center, left, top, right, bottom]
|
||||
// average size of human iris is 11.7mm - fairly constant for all ages/genders/races
|
||||
const f = face?.annotations;
|
||||
if (!f) return 0;
|
||||
if (!f?.leftEyeIris || !f?.rightEyeIris) return 0;
|
||||
// get size of left and right iris in pixels, pick larger one as its likely to be more accurate and normalize to 0..1 range instead of pixels
|
||||
const irisSize = Math.max(Math.abs(f.leftEyeIris[3][0] - f.leftEyeIris[1][0]), Math.abs(f.rightEyeIris[3][0] - f.rightEyeIris[1][0])) / width;
|
||||
// distance of eye from camera in meters
|
||||
|
@ -15,7 +15,7 @@ export function calculateCameraDistance(face: FaceResult, width: number): number
|
|||
export function calculateEyesDistance(face: FaceResult, width: number): number {
|
||||
// average distance between eyes is 65mm - fairly constant for typical adult male, but varies otherwise
|
||||
const f = face?.annotations;
|
||||
if (!f) return 0;
|
||||
if (!f?.leftEyeIris || !f?.rightEyeIris) return 0;
|
||||
// get size of left and right iris in pixels, pick larger one as its likely to be more accurate and normalize to 0..1 range instead of pixels
|
||||
const irisSize = Math.max(Math.abs(f.leftEyeIris[3][0] - f.leftEyeIris[1][0]), Math.abs(f.rightEyeIris[3][0] - f.rightEyeIris[1][0])) / width;
|
||||
// pixel x and y distance of centers of left and right iris, you can use edges instead
|
||||
|
|
|
@ -14,13 +14,12 @@ import { env } from '../util/env';
|
|||
import type { Point } from '../result';
|
||||
|
||||
const keypointsCount = 6;
|
||||
const faceBoxScaleFactor = 1.4;
|
||||
let model: GraphModel | null;
|
||||
let anchors: Tensor | null = null;
|
||||
let inputSize = 0;
|
||||
let inputSizeT: Tensor | null = null;
|
||||
|
||||
interface DetectBox { startPoint: Point, endPoint: Point, landmarks: Point[], confidence: number }
|
||||
export interface DetectBox { startPoint: Point, endPoint: Point, landmarks: Point[], confidence: number, size: [number, number] }
|
||||
|
||||
export const size = () => inputSize;
|
||||
|
||||
|
@ -52,13 +51,24 @@ function decodeBoxes(boxOutputs: Tensor) {
|
|||
return boxes;
|
||||
}
|
||||
|
||||
export async function getBoxes(inputImage: Tensor4D, config: Config) {
|
||||
export async function getBoxes(inputImage: Tensor4D, config: Config): Promise<DetectBox[]> {
|
||||
// sanity check on input
|
||||
if ((!inputImage) || (inputImage['isDisposedInternal']) || (inputImage.shape.length !== 4) || (inputImage.shape[1] < 1) || (inputImage.shape[2] < 1)) return [];
|
||||
const t: Record<string, Tensor> = {};
|
||||
t.resized = tf.image.resizeBilinear(inputImage, [inputSize, inputSize]);
|
||||
let pad = [0, 0];
|
||||
let scale = [1, 1];
|
||||
if (config?.face?.detector?.square) {
|
||||
const xy = Math.max(inputImage.shape[2], inputImage.shape[1]);
|
||||
pad = [Math.floor((xy - inputImage.shape[2]) / 2), Math.floor((xy - inputImage.shape[1]) / 2)];
|
||||
t.padded = tf.pad(inputImage, [[0, 0], [pad[1], pad[1]], [pad[0], pad[0]], [0, 0]]);
|
||||
scale = [inputImage.shape[2] / xy, inputImage.shape[1] / xy];
|
||||
pad = [pad[0] / inputSize, pad[1] / inputSize];
|
||||
} else {
|
||||
t.padded = inputImage.clone();
|
||||
}
|
||||
t.resized = tf.image.resizeBilinear(t.padded as Tensor4D, [inputSize, inputSize]);
|
||||
t.div = tf.div(t.resized, constants.tf127);
|
||||
t.normalized = tf.sub(t.div, constants.tf05);
|
||||
t.normalized = tf.sub(t.div, constants.tf1);
|
||||
const res = model?.execute(t.normalized) as Tensor[];
|
||||
if (Array.isArray(res) && res.length > 2) { // pinto converted model?
|
||||
const sorted = res.sort((a, b) => a.size - b.size);
|
||||
|
@ -89,16 +99,24 @@ export async function getBoxes(inputImage: Tensor4D, config: Config) {
|
|||
b.squeeze = tf.squeeze(b.slice);
|
||||
b.landmarks = tf.reshape(b.squeeze, [keypointsCount, -1]);
|
||||
const points = await b.bbox.data();
|
||||
const unpadded = [ // TODO fix this math
|
||||
points[0] * scale[0] - pad[0],
|
||||
points[1] * scale[1] - pad[1],
|
||||
points[2] * scale[0] - pad[0],
|
||||
points[3] * scale[1] - pad[1],
|
||||
];
|
||||
const rawBox = {
|
||||
startPoint: [points[0], points[1]] as Point,
|
||||
endPoint: [points[2], points[3]] as Point,
|
||||
startPoint: [unpadded[0], unpadded[1]] as Point,
|
||||
endPoint: [unpadded[2], unpadded[3]] as Point,
|
||||
landmarks: (await b.landmarks.array()) as Point[],
|
||||
confidence,
|
||||
};
|
||||
const scaledBox = util.scaleBoxCoordinates(rawBox, [(inputImage.shape[2] || 0) / inputSize, (inputImage.shape[1] || 0) / inputSize]);
|
||||
const enlargedBox = util.enlargeBox(scaledBox, config.face['scale'] || faceBoxScaleFactor);
|
||||
b.anchor = tf.slice(anchors as Tensor, [nms[i], 0], [1, 2]);
|
||||
const anchor = await b.anchor.data();
|
||||
const scaledBox = util.scaleBoxCoordinates(rawBox, [(inputImage.shape[2] || 0) / inputSize, (inputImage.shape[1] || 0) / inputSize], anchor);
|
||||
const enlargedBox = util.enlargeBox(scaledBox, config.face.detector?.scale || 1.4);
|
||||
const squaredBox = util.squarifyBox(enlargedBox);
|
||||
boxes.push(squaredBox);
|
||||
if (squaredBox.size[0] > (config.face.detector?.['minSize'] || 0) && squaredBox.size[1] > (config.face.detector?.['minSize'] || 0)) boxes.push(squaredBox);
|
||||
Object.keys(b).forEach((tensor) => tf.dispose(b[tensor]));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,7 +40,6 @@ export const detectFace = async (instance: Human /* instance of human */, input:
|
|||
|
||||
const faceRes: FaceResult[] = [];
|
||||
instance.state = 'run:face';
|
||||
|
||||
const faces: FaceResult[] = await facemesh.predict(input, instance.config);
|
||||
instance.performance.face = env.perfadd ? (instance.performance.face || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
||||
if (!input.shape || input.shape.length !== 4) return [];
|
||||
|
|
|
@ -9,13 +9,11 @@ import type { Config } from '../config';
|
|||
type Box = [number, number, number, number];
|
||||
|
||||
export class FaceBoxes {
|
||||
enlarge: number;
|
||||
model: GraphModel;
|
||||
config: Config;
|
||||
inputSize: 0;
|
||||
|
||||
constructor(model, config: Config) {
|
||||
this.enlarge = 1.1;
|
||||
this.model = model;
|
||||
this.config = config;
|
||||
this.inputSize = model.inputs[0].shape ? model.inputs[0].shape[2] : 0;
|
||||
|
@ -23,6 +21,7 @@ export class FaceBoxes {
|
|||
|
||||
async estimateFaces(input, config) {
|
||||
if (config) this.config = config;
|
||||
const enlarge = this.config.face.detector?.minConfidence || 0.1;
|
||||
const results: { confidence: number, box: Box, boxRaw: Box, image: Tensor }[] = [];
|
||||
const resizeT = tf.image.resizeBilinear(input, [this.inputSize, this.inputSize]);
|
||||
const castT = resizeT.toInt();
|
||||
|
@ -38,7 +37,7 @@ export class FaceBoxes {
|
|||
resizeT.dispose();
|
||||
for (let i = 0; i < boxes.length; i++) {
|
||||
if (scores[i] && scores[i] > (this.config.face.detector?.minConfidence || 0.1)) {
|
||||
const crop = [boxes[i][0] / this.enlarge, boxes[i][1] / this.enlarge, boxes[i][2] * this.enlarge, boxes[i][3] * this.enlarge];
|
||||
const crop = [boxes[i][0] / enlarge, boxes[i][1] / enlarge, boxes[i][2] * enlarge, boxes[i][3] * enlarge];
|
||||
const boxRaw: Box = [crop[1], crop[0], (crop[3]) - (crop[1]), (crop[2]) - (crop[0])];
|
||||
const box: Box = [
|
||||
parseInt((boxRaw[0] * input.shape[2]).toString()),
|
||||
|
|
|
@ -20,8 +20,7 @@ import { env } from '../util/env';
|
|||
import type { GraphModel, Tensor, Tensor4D } from '../tfjs/types';
|
||||
import type { FaceResult, FaceLandmark, Point } from '../result';
|
||||
import type { Config } from '../config';
|
||||
|
||||
interface DetectBox { startPoint: Point, endPoint: Point, landmarks: Point[], confidence: number }
|
||||
import type { DetectBox } from './blazeface';
|
||||
|
||||
const cache = {
|
||||
boxes: [] as DetectBox[],
|
||||
|
@ -33,7 +32,6 @@ let model: GraphModel | null = null;
|
|||
let inputSize = 0;
|
||||
|
||||
export async function predict(input: Tensor4D, config: Config): Promise<FaceResult[]> {
|
||||
if (!model?.['executor']) return [];
|
||||
// reset cached boxes
|
||||
const skipTime = (config.face.detector?.skipTime || 0) > (now() - cache.timestamp);
|
||||
const skipFrame = cache.skipped < (config.face.detector?.skipFrames || 0);
|
||||
|
@ -61,6 +59,7 @@ export async function predict(input: Tensor4D, config: Config): Promise<FaceResu
|
|||
score: 0,
|
||||
boxScore: 0,
|
||||
faceScore: 0,
|
||||
size: [0, 0],
|
||||
// contoursRaw: [],
|
||||
// contours: [],
|
||||
annotations: {} as Record<FaceLandmark, Point[]>,
|
||||
|
@ -74,18 +73,14 @@ export async function predict(input: Tensor4D, config: Config): Promise<FaceResu
|
|||
if (equilized) face.tensor = equilized;
|
||||
}
|
||||
face.boxScore = Math.round(100 * box.confidence) / 100;
|
||||
if (!config.face.mesh?.enabled) { // mesh not enabled, return resuts from detector only
|
||||
if (!config.face.mesh?.enabled || !model?.['executor']) { // mesh not enabled or not loaded, return resuts from detector only
|
||||
face.box = util.clampBox(box, input);
|
||||
face.boxRaw = util.getRawBox(box, input);
|
||||
face.score = face.boxScore;
|
||||
face.mesh = box.landmarks.map((pt) => [
|
||||
((box.startPoint[0] + box.endPoint[0])) / 2 + ((box.endPoint[0] + box.startPoint[0]) * pt[0] / blazeface.size()),
|
||||
((box.startPoint[1] + box.endPoint[1])) / 2 + ((box.endPoint[1] + box.startPoint[1]) * pt[1] / blazeface.size()),
|
||||
]);
|
||||
face.size = box.size;
|
||||
face.mesh = box.landmarks;
|
||||
face.meshRaw = face.mesh.map((pt) => [pt[0] / (input.shape[2] || 0), pt[1] / (input.shape[1] || 0), (pt[2] || 0) / size]);
|
||||
for (const key of Object.keys(coords.blazeFaceLandmarks)) {
|
||||
face.annotations[key] = [face.mesh[coords.blazeFaceLandmarks[key] as number]]; // add annotations
|
||||
}
|
||||
for (const key of Object.keys(coords.blazeFaceLandmarks)) face.annotations[key] = [face.mesh[coords.blazeFaceLandmarks[key] as number]]; // add annotations
|
||||
} else if (!model) { // mesh enabled, but not loaded
|
||||
if (config.debug) log('face mesh detection requested, but model is not loaded');
|
||||
} else { // mesh enabled
|
||||
|
@ -100,14 +95,12 @@ export async function predict(input: Tensor4D, config: Config): Promise<FaceResu
|
|||
face.faceScore = Math.round(100 * faceConfidence[0]) / 100;
|
||||
if (face.faceScore < (config.face.detector?.minConfidence || 1)) { // low confidence in detected mesh
|
||||
box.confidence = face.faceScore; // reset confidence of cached box
|
||||
if (config.face.mesh.keepInvalid) {
|
||||
if (config.face.mesh['keepInvalid']) {
|
||||
face.box = util.clampBox(box, input);
|
||||
face.boxRaw = util.getRawBox(box, input);
|
||||
face.size = box.size;
|
||||
face.score = face.boxScore;
|
||||
face.mesh = box.landmarks.map((pt) => [
|
||||
((box.startPoint[0] + box.endPoint[0])) / 2 + ((box.endPoint[0] + box.startPoint[0]) * pt[0] / blazeface.size()),
|
||||
((box.startPoint[1] + box.endPoint[1])) / 2 + ((box.endPoint[1] + box.startPoint[1]) * pt[1] / blazeface.size()),
|
||||
]);
|
||||
face.mesh = box.landmarks;
|
||||
face.meshRaw = face.mesh.map((pt) => [pt[0] / (input.shape[2] || 1), pt[1] / (input.shape[1] || 1), (pt[2] || 0) / size]);
|
||||
for (const key of Object.keys(coords.blazeFaceLandmarks)) {
|
||||
face.annotations[key] = [face.mesh[coords.blazeFaceLandmarks[key] as number]]; // add annotations
|
||||
|
@ -121,15 +114,21 @@ export async function predict(input: Tensor4D, config: Config): Promise<FaceResu
|
|||
if (config.face.attention?.enabled) {
|
||||
rawCoords = await attention.augment(rawCoords, results); // augment iris results using attention model results
|
||||
} else if (config.face.iris?.enabled) {
|
||||
rawCoords = await iris.augmentIris(rawCoords, face.tensor, inputSize); // run iris model and augment results
|
||||
rawCoords = await iris.augmentIris(rawCoords, face.tensor, inputSize, config); // run iris model and augment results
|
||||
}
|
||||
face.mesh = util.transformRawCoords(rawCoords, box, angle, rotationMatrix, inputSize); // get processed mesh
|
||||
face.meshRaw = face.mesh.map((pt) => [pt[0] / (input.shape[2] || 0), pt[1] / (input.shape[1] || 0), (pt[2] || 0) / size]);
|
||||
for (const key of Object.keys(coords.meshAnnotations)) face.annotations[key] = coords.meshAnnotations[key].map((index) => face.mesh[index]); // add annotations
|
||||
face.score = face.faceScore;
|
||||
const calculatedBox = { ...util.calculateFaceBox(face.mesh, box), confidence: box.confidence, landmarks: box.landmarks };
|
||||
const calculatedBox = {
|
||||
...util.calculateFaceBox(face.mesh, box),
|
||||
confidence: box.confidence,
|
||||
landmarks: box.landmarks,
|
||||
size: box.size,
|
||||
};
|
||||
face.box = util.clampBox(calculatedBox, input);
|
||||
face.boxRaw = util.getRawBox(calculatedBox, input);
|
||||
face.size = calculatedBox.size;
|
||||
/*
|
||||
const contoursT = results.find((t) => t.shape[t.shape.length - 1] === 266) as Tensor;
|
||||
const contoursData = contoursT && await contoursT.data(); // 133 x 2d points
|
||||
|
|
|
@ -31,10 +31,19 @@ export const getRawBox = (box, input): Box => (box ? [
|
|||
(box.endPoint[1] - box.startPoint[1]) / (input.shape[1] || 0),
|
||||
] : [0, 0, 0, 0]);
|
||||
|
||||
export const scaleBoxCoordinates = (box, factor) => {
|
||||
export const scaleBoxCoordinates = (box, factor, anchor) => {
|
||||
const startPoint: Point = [box.startPoint[0] * factor[0], box.startPoint[1] * factor[1]];
|
||||
const endPoint: Point = [box.endPoint[0] * factor[0], box.endPoint[1] * factor[1]];
|
||||
return { startPoint, endPoint, landmarks: box.landmarks, confidence: box.confidence };
|
||||
// const centerPoint = [(startPoint[0] + endPoint[0]) / 2, (startPoint[1] + endPoint[1]) / 2];
|
||||
const landmarks = box.landmarks.map((pt) => [(pt[0] + anchor[0]) * factor[0], (pt[1] + anchor[1]) * factor[1]]);
|
||||
/**
|
||||
face.mesh = box.landmarks.map((pt) => [
|
||||
((box.startPoint[0] + box.endPoint[0]) / 2) + (pt[0] * input.shape[2] / blazeface.size()),
|
||||
((box.startPoint[1] + box.endPoint[1]) / 2) + (pt[1] * input.shape[1] / blazeface.size()),
|
||||
]);
|
||||
*/
|
||||
|
||||
return { startPoint, endPoint, landmarks, confidence: box.confidence };
|
||||
};
|
||||
|
||||
export const cutAndResize = (box, image, cropSize) => {
|
||||
|
@ -51,20 +60,36 @@ export const enlargeBox = (box, factor) => {
|
|||
const center = getBoxCenter(box);
|
||||
const size = getBoxSize(box);
|
||||
const halfSize: [number, number] = [factor * size[0] / 2, factor * size[1] / 2];
|
||||
return { startPoint: [center[0] - halfSize[0], center[1] - halfSize[1]] as Point, endPoint: [center[0] + halfSize[0], center[1] + halfSize[1]] as Point, landmarks: box.landmarks, confidence: box.confidence };
|
||||
return {
|
||||
startPoint: [center[0] - halfSize[0], center[1] - halfSize[1]] as Point,
|
||||
endPoint: [center[0] + halfSize[0], center[1] + halfSize[1]] as Point,
|
||||
landmarks: box.landmarks,
|
||||
confidence: box.confidence,
|
||||
size,
|
||||
};
|
||||
};
|
||||
|
||||
export const squarifyBox = (box) => {
|
||||
const centers = getBoxCenter(box);
|
||||
const size = getBoxSize(box);
|
||||
const halfSize = Math.max(...size) / 2;
|
||||
return { startPoint: [Math.round(centers[0] - halfSize), Math.round(centers[1] - halfSize)] as Point, endPoint: [Math.round(centers[0] + halfSize), Math.round(centers[1] + halfSize)] as Point, landmarks: box.landmarks, confidence: box.confidence };
|
||||
return {
|
||||
startPoint: [Math.round(centers[0] - halfSize), Math.round(centers[1] - halfSize)] as Point,
|
||||
endPoint: [Math.round(centers[0] + halfSize), Math.round(centers[1] + halfSize)] as Point,
|
||||
landmarks: box.landmarks,
|
||||
confidence: box.confidence,
|
||||
size: [Math.round(size[0]), Math.round(size[1])] as [number, number],
|
||||
};
|
||||
};
|
||||
|
||||
export const calculateLandmarksBoundingBox = (landmarks) => {
|
||||
const x = landmarks.map((d) => d[0]);
|
||||
const y = landmarks.map((d) => d[1]);
|
||||
return { startPoint: [Math.min(...x), Math.min(...y)] as Point, endPoint: [Math.max(...x), Math.max(...y)] as Point, landmarks };
|
||||
return {
|
||||
startPoint: [Math.min(...x), Math.min(...y)] as Point,
|
||||
endPoint: [Math.max(...x), Math.max(...y)] as Point,
|
||||
landmarks,
|
||||
};
|
||||
};
|
||||
|
||||
export const fixedRotationMatrix = [[1, 0, 0], [0, 1, 0], [0, 0, 1]];
|
||||
|
|
|
@ -32,10 +32,17 @@ export async function load(config: Config): Promise<GraphModel> {
|
|||
return model;
|
||||
}
|
||||
|
||||
export function enhance(input): Tensor {
|
||||
export function enhance(input, config: Config): Tensor {
|
||||
const tensor = (input.image || input.tensor || input) as Tensor4D; // input received from detector is already normalized to 0..1, input is also assumed to be straightened
|
||||
if (!model?.inputs[0].shape) return tensor; // model has no shape so no point continuing
|
||||
const crop: Tensor = tf.image.resizeBilinear(tensor, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
|
||||
let crop: Tensor;
|
||||
if (config.face.description?.['crop'] > 0) { // optional crop
|
||||
const cropval = config.face.description?.['crop'];
|
||||
const box = [[cropval, cropval, 1 - cropval, 1 - cropval]];
|
||||
crop = tf.image.cropAndResize(tensor, box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]);
|
||||
} else {
|
||||
crop = tf.image.resizeBilinear(tensor, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
|
||||
}
|
||||
const norm: Tensor = tf.mul(crop, constants.tf255);
|
||||
tf.dispose(crop);
|
||||
return norm;
|
||||
|
@ -75,7 +82,7 @@ export async function predict(image: Tensor4D, config: Config, idx: number, coun
|
|||
skipped = 0;
|
||||
return new Promise(async (resolve) => {
|
||||
if (config.face.description?.enabled) {
|
||||
const enhanced = enhance(image);
|
||||
const enhanced = enhance(image, config);
|
||||
const resT = model?.execute(enhanced) as Tensor[];
|
||||
lastTime = now();
|
||||
tf.dispose(enhanced);
|
||||
|
|
|
@ -11,8 +11,6 @@ import type { Point } from '../result';
|
|||
let model: GraphModel | null;
|
||||
let inputSize = 0;
|
||||
|
||||
const irisEnlarge = 2.3;
|
||||
|
||||
const leftOutline = coords.meshAnnotations.leftEyeLower0;
|
||||
const rightOutline = coords.meshAnnotations.rightEyeLower0;
|
||||
|
||||
|
@ -62,8 +60,8 @@ export const getLeftToRightEyeDepthDifference = (rawCoords) => {
|
|||
};
|
||||
|
||||
// Returns a box describing a cropped region around the eye fit for passing to the iris model.
|
||||
export const getEyeBox = (rawCoords, face, eyeInnerCornerIndex, eyeOuterCornerIndex, meshSize, flip = false) => {
|
||||
const box = util.squarifyBox(util.enlargeBox(util.calculateLandmarksBoundingBox([rawCoords[eyeInnerCornerIndex], rawCoords[eyeOuterCornerIndex]]), irisEnlarge));
|
||||
export const getEyeBox = (rawCoords, face, eyeInnerCornerIndex, eyeOuterCornerIndex, meshSize, flip = false, scale = 2.3) => {
|
||||
const box = util.squarifyBox(util.enlargeBox(util.calculateLandmarksBoundingBox([rawCoords[eyeInnerCornerIndex], rawCoords[eyeOuterCornerIndex]]), scale));
|
||||
const boxSize = util.getBoxSize(box);
|
||||
let crop = tf.image.cropAndResize(face, [[
|
||||
box.startPoint[1] / meshSize,
|
||||
|
@ -110,10 +108,10 @@ export const getAdjustedIrisCoords = (rawCoords, irisCoords, direction) => {
|
|||
});
|
||||
};
|
||||
|
||||
export async function augmentIris(rawCoords, face, meshSize) {
|
||||
export async function augmentIris(rawCoords, face, meshSize, config: Config) {
|
||||
if (!model?.['executor']) return rawCoords;
|
||||
const { box: leftEyeBox, boxSize: leftEyeBoxSize, crop: leftEyeCrop } = getEyeBox(rawCoords, face, eyeLandmarks.leftBounds[0], eyeLandmarks.leftBounds[1], meshSize, true);
|
||||
const { box: rightEyeBox, boxSize: rightEyeBoxSize, crop: rightEyeCrop } = getEyeBox(rawCoords, face, eyeLandmarks.rightBounds[0], eyeLandmarks.rightBounds[1], meshSize, true);
|
||||
const { box: leftEyeBox, boxSize: leftEyeBoxSize, crop: leftEyeCrop } = getEyeBox(rawCoords, face, eyeLandmarks.leftBounds[0], eyeLandmarks.leftBounds[1], meshSize, true, config.face.iris?.scale || 2.3);
|
||||
const { box: rightEyeBox, boxSize: rightEyeBoxSize, crop: rightEyeCrop } = getEyeBox(rawCoords, face, eyeLandmarks.rightBounds[0], eyeLandmarks.rightBounds[1], meshSize, true, config.face.iris?.scale || 2.3);
|
||||
const combined = tf.concat([leftEyeCrop, rightEyeCrop]);
|
||||
tf.dispose(leftEyeCrop);
|
||||
tf.dispose(rightEyeCrop);
|
||||
|
|
|
@ -17,7 +17,8 @@ export function distance(descriptor1: Descriptor, descriptor2: Descriptor, optio
|
|||
const diff = (!options.order || options.order === 2) ? (descriptor1[i] - descriptor2[i]) : (Math.abs(descriptor1[i] - descriptor2[i]));
|
||||
sum += (!options.order || options.order === 2) ? (diff * diff) : (diff ** options.order);
|
||||
}
|
||||
return (options.multiplier || 20) * sum;
|
||||
const dist = Math.round(100 * (options.multiplier || 20) * sum) / 100;
|
||||
return dist;
|
||||
}
|
||||
|
||||
// invert distance to similarity, normalize to given range and clamp
|
||||
|
@ -25,7 +26,7 @@ const normalizeDistance = (dist, order, min, max) => {
|
|||
if (dist === 0) return 1; // short circuit for identical inputs
|
||||
const root = order === 2 ? Math.sqrt(dist) : dist ** (1 / order); // take root of distance
|
||||
const norm = (1 - (root / 100) - min) / (max - min); // normalize to range
|
||||
const clamp = Math.max(Math.min(norm, 1), 0); // clamp to 0..1
|
||||
const clamp = Math.round(100 * Math.max(Math.min(norm, 1), 0)) / 100; // clamp to 0..1
|
||||
return clamp;
|
||||
};
|
||||
|
||||
|
|
|
@ -13,17 +13,24 @@ import { loadModel } from '../tfjs/load';
|
|||
import { env } from '../util/env';
|
||||
import { constants } from '../tfjs/constants';
|
||||
|
||||
const annotations = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral'];
|
||||
let annotations: string[] = [];
|
||||
let model: GraphModel | null;
|
||||
const last: { score: number, emotion: Emotion }[][] = [];
|
||||
let lastCount = 0;
|
||||
let lastTime = 0;
|
||||
let skipped = Number.MAX_SAFE_INTEGER;
|
||||
let rgb = false;
|
||||
|
||||
export async function load(config: Config): Promise<GraphModel> {
|
||||
if (env.initial) model = null;
|
||||
if (!model) model = await loadModel(config.face.emotion?.modelPath);
|
||||
else if (config.debug) log('cached model:', model['modelUrl']);
|
||||
if (!model) {
|
||||
model = await loadModel(config.face.emotion?.modelPath);
|
||||
rgb = model?.inputs?.[0].shape?.[3] === 3;
|
||||
if (!rgb) annotations = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']; // oarriaga and gear
|
||||
else annotations = ['angry', 'disgust', 'fear', 'happy', 'neutral', 'sad', 'surprise']; // affectnet
|
||||
} else if (config.debug) {
|
||||
log('cached model:', model['modelUrl']);
|
||||
}
|
||||
return model;
|
||||
}
|
||||
|
||||
|
@ -41,20 +48,30 @@ export async function predict(image: Tensor4D, config: Config, idx: number, coun
|
|||
if (config.face.emotion?.enabled) {
|
||||
const t: Record<string, Tensor> = {};
|
||||
const inputSize = model?.inputs[0].shape ? model.inputs[0].shape[2] : 0;
|
||||
t.resize = tf.image.resizeBilinear(image, [inputSize, inputSize], false);
|
||||
// const box = [[0.15, 0.15, 0.85, 0.85]]; // empyrical values for top, left, bottom, right
|
||||
// const resize = tf.image.cropAndResize(image, box, [0], [inputSize, inputSize]);
|
||||
// [t.red, t.green, t.blue] = tf.split(t.resize, 3, 3);
|
||||
// weighted rgb to grayscale: https://www.mathworks.com/help/matlab/ref/rgb2gray.html
|
||||
// t.redNorm = tf.mul(t.red, rgb[0]);
|
||||
// t.greenNorm = tf.mul(t.green, rgb[1]);
|
||||
// t.blueNorm = tf.mul(t.blue, rgb[2]);
|
||||
// t.grayscale = tf.addN([t.redNorm, t.greenNorm, t.blueNorm]);
|
||||
t.channels = tf.mul(t.resize, constants.rgb);
|
||||
t.grayscale = tf.sum(t.channels, 3, true);
|
||||
t.grayscaleSub = tf.sub(t.grayscale, constants.tf05);
|
||||
t.grayscaleMul = tf.mul(t.grayscaleSub, constants.tf2);
|
||||
t.emotion = model?.execute(t.grayscaleMul) as Tensor; // result is already in range 0..1, no need for additional activation
|
||||
if (config.face.emotion?.['crop'] > 0) { // optional crop
|
||||
const crop = config.face.emotion?.['crop'];
|
||||
const box = [[crop, crop, 1 - crop, 1 - crop]];
|
||||
t.resize = tf.image.cropAndResize(image, box, [0], [inputSize, inputSize]);
|
||||
} else {
|
||||
t.resize = tf.image.resizeBilinear(image, [inputSize, inputSize], false);
|
||||
}
|
||||
if (rgb) {
|
||||
t.mul = tf.mul(t.resize, 255);
|
||||
t.normalize = tf.sub(t.mul, [103.939, 116.779, 123.68]); // affectnet uses specific norm values
|
||||
t.emotion = model?.execute(t.normalize) as Tensor; // result is already in range 0..1, no need for additional activation
|
||||
} else {
|
||||
// [t.red, t.green, t.blue] = tf.split(t.resize, 3, 3);
|
||||
// weighted rgb to grayscale: https://www.mathworks.com/help/matlab/ref/rgb2gray.html
|
||||
// t.redNorm = tf.mul(t.red, rgb[0]);
|
||||
// t.greenNorm = tf.mul(t.green, rgb[1]);
|
||||
// t.blueNorm = tf.mul(t.blue, rgb[2]);
|
||||
// t.grayscale = tf.addN([t.redNorm, t.greenNorm, t.blueNorm]);
|
||||
t.channels = tf.mul(t.resize, constants.rgb);
|
||||
t.grayscale = tf.sum(t.channels, 3, true);
|
||||
t.grayscaleSub = tf.sub(t.grayscale, constants.tf05);
|
||||
t.grayscaleMul = tf.mul(t.grayscaleSub, constants.tf2);
|
||||
t.emotion = model?.execute(t.grayscaleMul) as Tensor; // result is already in range 0..1, no need for additional activation
|
||||
}
|
||||
lastTime = now();
|
||||
const data = await t.emotion.data();
|
||||
for (let i = 0; i < data.length; i++) {
|
||||
|
|
|
@ -41,7 +41,11 @@ export async function predict(image: Tensor4D, config: Config, idx: number, coun
|
|||
if (!model?.inputs[0].shape) return;
|
||||
const t: Record<string, Tensor> = {};
|
||||
// t.resize = tf.image.resizeBilinear(image, [model?.inputs[0].shape[2], model?.inputs[0].shape[1]], false);
|
||||
const box = [[0.0, 0.10, 0.90, 0.90]]; // empyrical values for top, left, bottom, right
|
||||
let box = [[0.0, 0.10, 0.90, 0.90]]; // empyrical values for top, left, bottom, right
|
||||
if (config.face.gear?.['crop'] > 0) { // optional crop config value
|
||||
const crop = config.face.gear?.['crop'];
|
||||
box = [[crop, crop, 1 - crop, 1 - crop]];
|
||||
}
|
||||
t.resize = tf.image.cropAndResize(image, box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]);
|
||||
const obj: GearType = { age: 0, gender: 'unknown', genderScore: 0, race: [] };
|
||||
if (config.face.gear?.enabled) [t.age, t.gender, t.race] = model.execute(t.resize, ['age_output', 'gender_output', 'race_output']) as Tensor[];
|
||||
|
|
|
@ -37,7 +37,13 @@ export async function predict(image: Tensor4D, config: Config, idx: number, coun
|
|||
return new Promise(async (resolve) => {
|
||||
if (!model?.inputs || !model.inputs[0] || !model.inputs[0].shape) return;
|
||||
const t: Record<string, Tensor> = {};
|
||||
t.resize = tf.image.resizeBilinear(image, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
|
||||
if (config.face['ssrnet']?.['crop'] > 0) { // optional crop
|
||||
const crop = config.face['ssrnet']?.['crop'];
|
||||
const box = [[crop, crop, 1 - crop, 1 - crop]];
|
||||
t.resize = tf.image.cropAndResize(image, box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]);
|
||||
} else {
|
||||
t.resize = tf.image.resizeBilinear(image, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
|
||||
}
|
||||
t.enhance = tf.mul(t.resize, constants.tf255);
|
||||
const obj = { age: 0 };
|
||||
if (config.face['ssrnet']?.enabled) t.age = model.execute(t.enhance) as Tensor;
|
||||
|
|
|
@ -41,14 +41,25 @@ export async function predict(image: Tensor4D, config: Config, idx, count): Prom
|
|||
return new Promise(async (resolve) => {
|
||||
if (!model?.inputs[0].shape) return;
|
||||
const t: Record<string, Tensor> = {};
|
||||
t.resize = tf.image.resizeBilinear(image, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
|
||||
if (config.face['ssrnet']?.['crop'] > 0) { // optional crop
|
||||
const crop = config.face['ssrnet']?.['crop'];
|
||||
const box = [[crop, crop, 1 - crop, 1 - crop]];
|
||||
t.resize = tf.image.cropAndResize(image, box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]);
|
||||
} else {
|
||||
t.resize = tf.image.resizeBilinear(image, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
|
||||
}
|
||||
t.enhance = tf.tidy(() => {
|
||||
const [red, green, blue] = tf.split(t.resize, 3, 3);
|
||||
const redNorm = tf.mul(red, rgb[0]);
|
||||
const greenNorm = tf.mul(green, rgb[1]);
|
||||
const blueNorm = tf.mul(blue, rgb[2]);
|
||||
const grayscale = tf.addN([redNorm, greenNorm, blueNorm]);
|
||||
const normalize = tf.mul(tf.sub(grayscale, constants.tf05), 2); // range grayscale:-1..1
|
||||
let normalize: Tensor;
|
||||
if (model?.inputs?.[0].shape?.[3] === 1) {
|
||||
const [red, green, blue] = tf.split(t.resize, 3, 3);
|
||||
const redNorm = tf.mul(red, rgb[0]);
|
||||
const greenNorm = tf.mul(green, rgb[1]);
|
||||
const blueNorm = tf.mul(blue, rgb[2]);
|
||||
const grayscale = tf.addN([redNorm, greenNorm, blueNorm]);
|
||||
normalize = tf.mul(tf.sub(grayscale, constants.tf05), 2); // range grayscale:-1..1
|
||||
} else {
|
||||
normalize = tf.mul(tf.sub(t.resize, constants.tf05), 2); // range rgb:-1..1
|
||||
}
|
||||
return normalize;
|
||||
});
|
||||
const obj: { gender: Gender, genderScore: number } = { gender: 'unknown', genderScore: 0 };
|
||||
|
|
|
@ -99,9 +99,9 @@ export const iris = (res: FaceResult[]): GestureResult[] => {
|
|||
const rightIrisCenterX = Math.abs(res[i].mesh[33][0] - res[i].annotations.rightEyeIris[0][0]) / res[i].box[2];
|
||||
if (leftIrisCenterX > 0.06 || rightIrisCenterX > 0.06) center = false;
|
||||
if (leftIrisCenterX > rightIrisCenterX) { // check eye with bigger offset
|
||||
if (leftIrisCenterX > 0.05) gestures.push({ iris: i, gesture: 'looking right' });
|
||||
if (rightIrisCenterX > 0.04) gestures.push({ iris: i, gesture: 'looking right' });
|
||||
} else {
|
||||
if (rightIrisCenterX > 0.05) gestures.push({ iris: i, gesture: 'looking left' });
|
||||
if (leftIrisCenterX > 0.04) gestures.push({ iris: i, gesture: 'looking left' });
|
||||
}
|
||||
|
||||
const rightIrisCenterY = Math.abs(res[i].mesh[145][1] - res[i].annotations.rightEyeIris[0][1]) / res[i].box[3];
|
||||
|
|
|
@ -27,7 +27,13 @@ let handDetectorModel: GraphModel | null;
|
|||
let handPoseModel: GraphModel | null;
|
||||
let handPipeline: handpipeline.HandPipeline;
|
||||
|
||||
export function initPipeline() {
|
||||
const handDetector = handDetectorModel ? new handdetector.HandDetector(handDetectorModel) : undefined;
|
||||
if (handDetector && handPoseModel) handPipeline = new handpipeline.HandPipeline(handDetector, handPoseModel);
|
||||
}
|
||||
|
||||
export async function predict(input: Tensor, config: Config): Promise<HandResult[]> {
|
||||
if (!handPipeline) initPipeline();
|
||||
const predictions = await handPipeline.estimateHands(input, config);
|
||||
if (!predictions) return [];
|
||||
const hands: HandResult[] = [];
|
||||
|
@ -82,21 +88,16 @@ export async function predict(input: Tensor, config: Config): Promise<HandResult
|
|||
return hands;
|
||||
}
|
||||
|
||||
export async function load(config: Config): Promise<[GraphModel | null, GraphModel | null]> {
|
||||
if (env.initial) {
|
||||
handDetectorModel = null;
|
||||
handPoseModel = null;
|
||||
}
|
||||
if (!handDetectorModel || !handPoseModel) {
|
||||
[handDetectorModel, handPoseModel] = await Promise.all([
|
||||
config.hand.enabled ? loadModel(config.hand.detector?.modelPath) : null,
|
||||
config.hand.landmarks ? loadModel(config.hand.skeleton?.modelPath) : null,
|
||||
]);
|
||||
} else {
|
||||
if (config.debug) log('cached model:', handDetectorModel['modelUrl']);
|
||||
if (config.debug) log('cached model:', handPoseModel['modelUrl']);
|
||||
}
|
||||
const handDetector = handDetectorModel ? new handdetector.HandDetector(handDetectorModel) : undefined;
|
||||
if (handDetector && handPoseModel) handPipeline = new handpipeline.HandPipeline(handDetector, handPoseModel);
|
||||
return [handDetectorModel, handPoseModel];
|
||||
export async function loadDetect(config: Config): Promise<GraphModel> {
|
||||
if (env.initial) handDetectorModel = null;
|
||||
if (!handDetectorModel) handDetectorModel = await loadModel(config.hand.detector?.modelPath);
|
||||
else if (config.debug) log('cached model:', handDetectorModel['modelUrl']);
|
||||
return handDetectorModel;
|
||||
}
|
||||
|
||||
export async function loadSkeleton(config: Config): Promise<GraphModel> {
|
||||
if (env.initial) handPoseModel = null;
|
||||
if (!handPoseModel) handPoseModel = await loadModel(config.hand.skeleton?.modelPath);
|
||||
else if (config.debug) log('cached model:', handPoseModel['modelUrl']);
|
||||
return handPoseModel;
|
||||
}
|
||||
|
|
|
@ -94,10 +94,11 @@ export class HandPipeline {
|
|||
const skipTime = (config.hand.skipTime || 0) > (now() - lastTime);
|
||||
const skipFrame = this.skipped < (config.hand.skipFrames || 0);
|
||||
if (config.skipAllowed && skipTime && skipFrame) {
|
||||
this.skipped++;
|
||||
} else {
|
||||
boxes = await this.handDetector.predict(image, config);
|
||||
this.skipped = 0;
|
||||
}
|
||||
if (config.skipAllowed) this.skipped++;
|
||||
|
||||
// if detector result count doesn't match current working set, use it to reset current working set
|
||||
if (boxes && (boxes.length > 0) && ((boxes.length !== this.detectedHands) && (this.detectedHands !== config.hand.maxDetected) || !config.hand.landmarks)) {
|
||||
|
|
|
@ -77,7 +77,9 @@ export async function loadDetect(config: Config): Promise<GraphModel> {
|
|||
fakeOps(['tensorlistreserve', 'enter', 'tensorlistfromtensor', 'merge', 'loopcond', 'switch', 'exit', 'tensorliststack', 'nextiteration', 'tensorlistsetitem', 'tensorlistgetitem', 'reciprocal', 'shape', 'split', 'where'], config);
|
||||
models[0] = await loadModel(config.hand.detector?.modelPath);
|
||||
const inputs = models[0]['executor'] ? Object.values(models[0].modelSignature['inputs']) : undefined;
|
||||
// @ts-ignore model signature properties are not typed and inputs are unreliable for this model
|
||||
inputSize[0][0] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[1].size) : 0;
|
||||
// @ts-ignore model signature properties are not typed and inputs are unreliable for this model
|
||||
inputSize[0][1] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : 0;
|
||||
} else if (config.debug) log('cached model:', models[0]['modelUrl']);
|
||||
return models[0];
|
||||
|
@ -88,7 +90,9 @@ export async function loadSkeleton(config: Config): Promise<GraphModel> {
|
|||
if (!models[1]) {
|
||||
models[1] = await loadModel(config.hand.skeleton?.modelPath);
|
||||
const inputs = models[1]['executor'] ? Object.values(models[1].modelSignature['inputs']) : undefined;
|
||||
// @ts-ignore model signature properties are not typed and inputs are unreliable for this model
|
||||
inputSize[1][0] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[1].size) : 0;
|
||||
// @ts-ignore model signature properties are not typed and inputs are unreliable for this model
|
||||
inputSize[1][1] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : 0;
|
||||
} else if (config.debug) log('cached model:', models[1]['modelUrl']);
|
||||
return models[1];
|
||||
|
|
|
@ -288,7 +288,6 @@ export class Human {
|
|||
const timeStamp = now();
|
||||
const count = Object.values(this.models.models).filter((model) => model).length;
|
||||
if (userConfig) this.config = mergeDeep(this.config, userConfig) as Config;
|
||||
|
||||
if (this.env.initial) { // print version info on first run and check for correct backend setup
|
||||
if (!await backend.check(this, false)) log('error: backend check failed');
|
||||
await tf.ready();
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*/
|
||||
|
||||
import * as tf from 'dist/tfjs.esm.js';
|
||||
import type { Tensor } from '../exports';
|
||||
import type { Tensor } from '../tfjs/types';
|
||||
|
||||
export async function histogramEqualization(inputImage: Tensor): Promise<Tensor> {
|
||||
const squeeze = inputImage.shape.length === 4 ? tf.squeeze(inputImage) : inputImage;
|
||||
|
@ -24,7 +24,7 @@ export async function histogramEqualization(inputImage: Tensor): Promise<Tensor>
|
|||
const enh = [tf.mul(sub[0], factor), tf.mul(sub[1], factor), tf.mul(sub[2], factor)];
|
||||
const stack = tf.stack([enh[0], enh[1], enh[2]], 2);
|
||||
final = tf.reshape(stack, [1, squeeze.shape[0] || 0, squeeze.shape[1] || 0, 3]);
|
||||
tf.dispose([...sub, ...range, ...enh]);
|
||||
tf.dispose([...sub, ...range, ...enh, stack]);
|
||||
} else {
|
||||
final = tf.expandDims(squeeze, 0);
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ let inCanvas: AnyCanvas | null = null; // use global variable to avoid recreatin
|
|||
let outCanvas: AnyCanvas | null = null; // use global variable to avoid recreating canvas on each frame
|
||||
let tmpCanvas: AnyCanvas | null = null; // use global variable to avoid recreating canvas on each frame
|
||||
// @ts-ignore // imagefx is js module that should be converted to a class
|
||||
let fx: fxImage.GLImageFilter | null; // instance of imagefx
|
||||
let fx: fxImage.GLImageFilter | null; // eslint-disable-line @typescript-eslint/no-redundant-type-constituents
|
||||
|
||||
const last: { inputSum: number, cacheDiff: number, sumMethod: number, inputTensor: undefined | Tensor } = {
|
||||
inputSum: 0,
|
||||
|
@ -39,10 +39,18 @@ export function canvas(width: number, height: number): AnyCanvas {
|
|||
if (typeof OffscreenCanvas === 'undefined') throw new Error('canvas error: attempted to run in web worker but OffscreenCanvas is not supported');
|
||||
c = new OffscreenCanvas(width, height);
|
||||
} else { // otherwise use DOM canvas
|
||||
if (typeof document === 'undefined') throw new Error('canvas error: attempted to run in browser but DOM is not defined');
|
||||
c = document.createElement('canvas');
|
||||
c.width = width;
|
||||
c.height = height;
|
||||
if (typeof document !== 'undefined') {
|
||||
c = document.createElement('canvas');
|
||||
c.width = width;
|
||||
c.height = height;
|
||||
} else if (typeof navigator !== 'undefined' && navigator.product === 'ReactNative') {
|
||||
// @ts-ignore // env.canvas is an external monkey-patch
|
||||
if (typeof env.Canvas !== 'undefined') c = new env.Canvas(width, height);
|
||||
else if (typeof globalThis.Canvas !== 'undefined') c = new globalThis.Canvas(width, height);
|
||||
else throw new Error('canvas error: attempted to use canvas in react-native without canvas support installed');
|
||||
} else {
|
||||
throw new Error('canvas error: attempted to run in browser but DOM is not defined');
|
||||
}
|
||||
}
|
||||
} else { // if not running in browser, there is no "default" canvas object, so we need monkey patch or fail
|
||||
// @ts-ignore // env.canvas is an external monkey-patch
|
||||
|
@ -282,7 +290,6 @@ const checksum = async (input: Tensor): Promise<number> => { // use tf sum or js
|
|||
export async function skip(config: Partial<Config>, input: Tensor) {
|
||||
let skipFrame = false;
|
||||
if (config.cacheSensitivity === 0 || !input.shape || input.shape.length !== 4 || input.shape[1] > 3840 || input.shape[2] > 2160) return skipFrame; // cache disabled or input is invalid or too large for cache analysis
|
||||
|
||||
/*
|
||||
const checkSum = await checksum(input);
|
||||
const diff = 100 * (Math.max(checkSum, last.inputSum) / Math.min(checkSum, last.inputSum) - 1);
|
||||
|
|
|
@ -27,7 +27,7 @@ class GLProgram {
|
|||
this.gl = gl;
|
||||
const vertexShader = this.compile(vertexSource, this.gl.VERTEX_SHADER);
|
||||
const fragmentShader = this.compile(fragmentSource, this.gl.FRAGMENT_SHADER);
|
||||
this.id = this.gl.createProgram() as WebGLProgram;
|
||||
this.id = this.gl.createProgram();
|
||||
if (!vertexShader || !fragmentShader) return;
|
||||
if (!this.id) {
|
||||
log('filter: could not create webgl program');
|
||||
|
|
|
@ -100,13 +100,13 @@ export class Models {
|
|||
let totalSizeWeights = 0;
|
||||
let totalSizeLoading = 0;
|
||||
for (const m of Object.values(modelStats)) {
|
||||
totalSizeFromManifest += m.sizeFromManifest;
|
||||
totalSizeWeights += m.sizeLoadedWeights;
|
||||
totalSizeLoading += m.sizeDesired;
|
||||
totalSizeFromManifest += Number.isNaN(+m.sizeFromManifest) ? 0 : m.sizeFromManifest;
|
||||
totalSizeWeights += Number.isNaN(+m.sizeLoadedWeights) ? 0 : m.sizeLoadedWeights;
|
||||
totalSizeLoading += Number.isNaN(+m.sizeDesired) ? 0 : m.sizeDesired;
|
||||
}
|
||||
const percentageLoaded = totalSizeLoading > 0 ? totalSizeWeights / totalSizeLoading : 0;
|
||||
return {
|
||||
numLoadedModels: Object.values(modelStats).length,
|
||||
numLoadedModels: Object.values(modelStats).filter((m) => m?.loaded).length,
|
||||
numDefinedModels: Object.keys(this.models).length,
|
||||
percentageLoaded,
|
||||
totalSizeFromManifest,
|
||||
|
@ -147,7 +147,11 @@ export class Models {
|
|||
// hand alternatives
|
||||
m.handtrack = (this.instance.config.hand.enabled && !this.models.handtrack && this.instance.config.hand.detector?.modelPath?.includes('handtrack')) ? handtrack.loadDetect(this.instance.config) : null;
|
||||
m.handskeleton = (this.instance.config.hand.enabled && this.instance.config.hand.landmarks && !this.models.handskeleton && this.instance.config.hand.detector?.modelPath?.includes('handtrack')) ? handtrack.loadSkeleton(this.instance.config) : null;
|
||||
if (this.instance.config.hand.detector?.modelPath?.includes('handdetect')) [m.handpose, m.handskeleton] = (!this.models.handpose) ? await handpose.load(this.instance.config) : [null, null];
|
||||
// if (this.instance.config.hand.detector?.modelPath?.includes('handdetect')) [m.handpose, m.handskeleton] = (!this.models.handpose) ? await handpose.load(this.instance.config) : [null, null];
|
||||
if (this.instance.config.hand.enabled && !this.models.handdetect && this.instance.config.hand.detector?.modelPath?.includes('handdetect')) {
|
||||
m.handdetect = handpose.loadDetect(this.instance.config);
|
||||
m.handskeleton = handpose.loadSkeleton(this.instance.config);
|
||||
}
|
||||
// object detection alternatives
|
||||
m.centernet = (this.instance.config.object.enabled && !this.models.centernet && this.instance.config.object.modelPath?.includes('centernet')) ? centernet.load(this.instance.config) : null;
|
||||
m.nanodet = (this.instance.config.object.enabled && !this.models.nanodet && this.instance.config.object.modelPath?.includes('nanodet')) ? nanodet.load(this.instance.config) : null;
|
||||
|
@ -174,7 +178,7 @@ export class Models {
|
|||
return models;
|
||||
}
|
||||
|
||||
loaded() {
|
||||
loaded(): string[] {
|
||||
const list = this.list();
|
||||
const loaded = list.filter((model) => model.loaded).map((model) => model.name);
|
||||
return loaded;
|
||||
|
|
|
@ -25,6 +25,7 @@ export async function load(config: Config): Promise<GraphModel> {
|
|||
// fakeOps(['floormod'], config);
|
||||
model = await loadModel(config.object.modelPath);
|
||||
const inputs = model?.['executor'] ? Object.values(model.modelSignature['inputs']) : undefined;
|
||||
// @ts-ignore model signature properties are not typed and inputs are unreliable for this model
|
||||
inputSize = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : 0;
|
||||
} else if (config.debug) log('cached model:', model['modelUrl']);
|
||||
return model;
|
||||
|
|
|
@ -26,6 +26,7 @@ export async function load(config: Config): Promise<GraphModel> {
|
|||
if (!model || env.initial) {
|
||||
model = await loadModel(config.object.modelPath);
|
||||
const inputs = model?.['executor'] ? Object.values(model.modelSignature['inputs']) : undefined;
|
||||
// @ts-ignore model signature properties are not typed and inputs are unreliable for this model
|
||||
inputSize = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : 416;
|
||||
} else if (config.debug) log('cached model:', model['modelUrl']);
|
||||
return model;
|
||||
|
|
|
@ -37,6 +37,8 @@ export interface FaceResult {
|
|||
box: Box,
|
||||
/** detected face box normalized to 0..1 */
|
||||
boxRaw: Box,
|
||||
/** detected face box size */
|
||||
size: [number, number],
|
||||
/** detected face mesh */
|
||||
mesh: Point[]
|
||||
/** detected face mesh normalized to 0..1 */
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*/
|
||||
|
||||
// data:image/jpeg;base64,
|
||||
export const face = `
|
||||
export const face: string = `
|
||||
/9j/4AAQSkZJRgABAQEAYABgAAD/4QBoRXhpZgAATU0AKgAAAAgABAEaAAUAAAABAAAAPgEbAAUA
|
||||
AAABAAAARgEoAAMAAAABAAIAAAExAAIAAAARAAAATgAAAAAAAABgAAAAAQAAAGAAAAABcGFpbnQu
|
||||
bmV0IDQuMi4xMwAA/9sAQwAGBAUGBQQGBgUGBwcGCAoQCgoJCQoUDg8MEBcUGBgXFBYWGh0lHxob
|
||||
|
|
|
@ -93,12 +93,24 @@ export async function check(instance: Human, force = false) {
|
|||
if (instance.config.debug) log('running inside web worker');
|
||||
}
|
||||
|
||||
if (typeof navigator !== 'undefined' && navigator?.userAgent?.toLowerCase().includes('electron')) {
|
||||
if (instance.config.debug) log('running inside electron');
|
||||
}
|
||||
|
||||
// check available backends
|
||||
let available = Object.keys(tf.engine().registryFactory as Record<string, unknown>);
|
||||
if (instance.config.backend === 'humangl' && !available.includes('humangl')) {
|
||||
humangl.register(instance);
|
||||
available = Object.keys(tf.engine().registryFactory as Record<string, unknown>);
|
||||
}
|
||||
if (instance.config.debug) log('available backends:', available);
|
||||
|
||||
// force browser vs node backend
|
||||
if (env.browser && instance.config.backend === 'tensorflow') {
|
||||
if (env.browser && !env.node && (instance.config.backend === 'tensorflow') && available.includes('webgl')) {
|
||||
if (instance.config.debug) log('override: backend set to tensorflow while running in browser');
|
||||
instance.config.backend = 'webgl';
|
||||
}
|
||||
if (env.node && (instance.config.backend === 'webgl' || instance.config.backend === 'humangl')) {
|
||||
if (env.node && !env.browser && (instance.config.backend === 'webgl' || instance.config.backend === 'humangl') && available.includes('tensorflow')) {
|
||||
if (instance.config.debug) log(`override: backend set to ${instance.config.backend} while running in nodejs`);
|
||||
instance.config.backend = 'tensorflow';
|
||||
}
|
||||
|
@ -109,28 +121,23 @@ export async function check(instance: Human, force = false) {
|
|||
log('override: backend set to webgpu but browser does not support webgpu');
|
||||
instance.config.backend = 'webgl';
|
||||
} else {
|
||||
const adapter = await navigator.gpu.requestAdapter();
|
||||
const adapter: GPUAdapter = await navigator.gpu.requestAdapter() as GPUAdapter;
|
||||
if (instance.config.debug) log('enumerated webgpu adapter:', adapter);
|
||||
if (!adapter) {
|
||||
log('override: backend set to webgpu but browser reports no available gpu');
|
||||
instance.config.backend = 'webgl';
|
||||
} else {
|
||||
// @ts-ignore requestAdapterInfo is not in tslib
|
||||
const adapterInfo = 'requestAdapterInfo' in adapter ? await adapter.requestAdapterInfo() : undefined;
|
||||
let adapterInfo;
|
||||
// @ts-ignore gpu adapter info
|
||||
if ('requestAdapterInfo' in adapter) adapterInfo = await adapter?.requestAdapterInfo();
|
||||
// @ts-ignore gpu adapter info
|
||||
else adapterInfo = adapter.info;
|
||||
// if (adapter.features) adapter.features.forEach((feature) => log('webgpu features:', feature));
|
||||
log('webgpu adapter info:', adapterInfo);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// check available backends
|
||||
let available = Object.keys(tf.engine().registryFactory as Record<string, unknown>);
|
||||
if (instance.config.backend === 'humangl' && !available.includes('humangl')) {
|
||||
humangl.register(instance);
|
||||
available = Object.keys(tf.engine().registryFactory as Record<string, unknown>);
|
||||
}
|
||||
if (instance.config.debug) log('available backends:', available);
|
||||
|
||||
if (!available.includes(instance.config.backend)) {
|
||||
log(`error: backend ${instance.config.backend} not found in registry`);
|
||||
instance.config.backend = env.node ? 'tensorflow' : 'webgl';
|
||||
|
@ -217,7 +224,7 @@ export async function check(instance: Human, force = false) {
|
|||
await env.updateBackend(); // update env on backend init
|
||||
registerCustomOps(instance.config);
|
||||
// await env.updateBackend(); // update env on backend init
|
||||
env.initial = false;
|
||||
// env.initial = false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@ const options = {
|
|||
|
||||
export interface ModelInfo {
|
||||
name: string,
|
||||
loaded: boolean,
|
||||
inCache: boolean,
|
||||
sizeDesired: number,
|
||||
sizeFromManifest: number,
|
||||
|
@ -42,6 +43,7 @@ export async function loadModel(modelPath: string | undefined): Promise<GraphMod
|
|||
const cachedModelName = 'indexeddb://' + shortModelName; // generate short model name for cache
|
||||
modelStats[shortModelName] = {
|
||||
name: shortModelName,
|
||||
loaded: false,
|
||||
sizeFromManifest: 0,
|
||||
sizeLoadedWeights: 0,
|
||||
sizeDesired: modelsDefs[shortModelName],
|
||||
|
@ -59,7 +61,7 @@ export async function loadModel(modelPath: string | undefined): Promise<GraphMod
|
|||
modelStats[shortModelName].url = modelStats[shortModelName].inCache ? cachedModelName : modelUrl;
|
||||
const tfLoadOptions = typeof fetch === 'undefined' ? {} : { fetchFunc: (url: string, init?: RequestInit) => httpHandler(url, init) };
|
||||
let model: GraphModel = new tf.GraphModel(modelStats[shortModelName].url, tfLoadOptions) as unknown as GraphModel; // create model prototype and decide if load from cache or from original modelurl
|
||||
let loaded = false;
|
||||
modelStats[shortModelName].loaded = false;
|
||||
try {
|
||||
// @ts-ignore private function
|
||||
model.findIOHandler(); // decide how to actually load a model
|
||||
|
@ -74,13 +76,13 @@ export async function loadModel(modelPath: string | undefined): Promise<GraphMod
|
|||
if (artifacts) model.loadSync(artifacts); // load weights
|
||||
else model = await tf.loadGraphModel(modelStats[shortModelName].inCache ? cachedModelName : modelUrl, tfLoadOptions) as unknown as GraphModel;
|
||||
// @ts-ignore private property
|
||||
modelStats[shortModelName].sizeLoadedWeights = model.artifacts?.weightData?.byteLength || 0;
|
||||
modelStats[shortModelName].sizeLoadedWeights = model.artifacts?.weightData?.byteLength || model.artifacts?.weightData?.[0].byteLength || 0;
|
||||
if (options.verbose) log('load:', { model: shortModelName, url: model['modelUrl'], bytes: modelStats[shortModelName].sizeLoadedWeights });
|
||||
loaded = true;
|
||||
modelStats[shortModelName].loaded = true;
|
||||
} catch (err) {
|
||||
log('error loading model:', modelUrl, err);
|
||||
}
|
||||
if (loaded && options.cacheModels && options.cacheSupported && !modelStats[shortModelName].inCache) { // save model to cache
|
||||
if (modelStats[shortModelName].loaded && options.cacheModels && options.cacheSupported && !modelStats[shortModelName].inCache) { // save model to cache
|
||||
try {
|
||||
const saveResult = await model.save(cachedModelName);
|
||||
if (options.debug) log('model saved:', cachedModelName, saveResult);
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
"experimentalDecorators": true,
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"importHelpers": true,
|
||||
"importsNotUsedAsValues": "error",
|
||||
"isolatedModules": false,
|
||||
"noEmitHelpers": true,
|
||||
"noEmitOnError": false,
|
||||
|
|
|
@ -99,7 +99,7 @@ export class Env {
|
|||
set ImageData(val) { this.#imageData = val; globalThis.ImageData = val; }
|
||||
|
||||
constructor() {
|
||||
this.browser = typeof navigator !== 'undefined';
|
||||
this.browser = (typeof navigator !== 'undefined') && (typeof navigator.appVersion !== 'undefined');
|
||||
this.node = (typeof process !== 'undefined') && (typeof process.versions !== 'undefined') && (typeof process.versions.node !== 'undefined');
|
||||
this.tfjs = { version: tf.version['tfjs-core'] };
|
||||
this.offscreen = typeof OffscreenCanvas !== 'undefined';
|
||||
|
@ -107,12 +107,13 @@ export class Env {
|
|||
|
||||
// @ts-ignore WorkerGlobalScope evaluated in browser only
|
||||
this.worker = this.browser && this.offscreen ? (typeof WorkerGlobalScope !== 'undefined') : undefined;
|
||||
if (typeof navigator !== 'undefined') { // TBD replace with navigator.userAgentData once in mainline
|
||||
const raw = navigator.userAgent.match(/\(([^()]+)\)/g);
|
||||
if ((typeof navigator !== 'undefined') && (typeof navigator.userAgent !== 'undefined')) { // TBD replace with navigator.userAgentData once in mainline
|
||||
const agent = navigator.userAgent || '';
|
||||
const raw = agent.match(/\(([^()]+)\)/g);
|
||||
if (raw?.[0]) {
|
||||
const platformMatch = raw[0].match(/\(([^()]+)\)/g);
|
||||
this.platform = (platformMatch?.[0]) ? platformMatch[0].replace(/\(|\)/g, '') : '';
|
||||
this.agent = navigator.userAgent.replace(raw[0], '');
|
||||
this.agent = agent.replace(raw[0], '');
|
||||
if (this.platform[1]) this.agent = this.agent.replace(raw[1], '');
|
||||
this.agent = this.agent.replace(/ /g, ' ');
|
||||
}
|
||||
|
@ -148,12 +149,17 @@ export class Env {
|
|||
this.webgl.renderer = gl.getParameter(gl.RENDERER);
|
||||
this.webgl.shader = gl.getParameter(gl.SHADING_LANGUAGE_VERSION);
|
||||
}
|
||||
this.webgpu.supported = this.browser && typeof navigator.gpu !== 'undefined';
|
||||
this.webgpu.supported = this.browser && typeof navigator !== 'undefined' && typeof navigator.gpu !== 'undefined';
|
||||
this.webgpu.backend = this.backends.includes('webgpu');
|
||||
try {
|
||||
if (this.webgpu.supported) {
|
||||
const adapter = await navigator.gpu.requestAdapter();
|
||||
this.webgpu.adapter = await adapter?.requestAdapterInfo();
|
||||
if (adapter) {
|
||||
// @ts-ignore requestAdapterInfo is not in tslib
|
||||
if ('requestAdapterInfo' in adapter) this.webgpu.adapter = await adapter.requestAdapterInfo();
|
||||
// @ts-ignore adapter.info is not in tslib
|
||||
else this.webgpu.adapter = await adapter.info;
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
this.webgpu.supported = false;
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
* Results interpolation for smoothening of video detection results inbetween detected frames
|
||||
*/
|
||||
|
||||
import { Result, FaceResult, BodyResult, HandResult, ObjectResult, PersonResult, Box, Point, BodyLandmark, BodyAnnotation, empty } from '../result';
|
||||
import { Result, FaceResult, BodyResult, HandResult, ObjectResult, PersonResult, Box, Point, BodyLandmark, BodyAnnotation, empty, FaceLandmark } from '../result';
|
||||
import type { Config } from '../config';
|
||||
|
||||
import * as moveNetCoords from '../body/movenetcoords';
|
||||
|
@ -125,6 +125,19 @@ export function calc(newResult: Result, config: Config): Result {
|
|||
.map((b, j) => ((bufferedFactor - 1) * bufferedResult.face[i].box[j] + b) / bufferedFactor)) as Box;
|
||||
const boxRaw = (newResult.face[i].boxRaw // update boxRaw
|
||||
.map((b, j) => ((bufferedFactor - 1) * bufferedResult.face[i].boxRaw[j] + b) / bufferedFactor)) as Box;
|
||||
let annotations: Record<FaceLandmark, Point[]> = newResult.face[i].annotations;
|
||||
if (Object.keys(bufferedResult.face[i].annotations).length !== Object.keys(newResult.face[i].annotations).length) {
|
||||
bufferedResult.face[i].annotations = newResult.face[i].annotations; // reset annotations as previous frame did not have them
|
||||
annotations = bufferedResult.face[i].annotations;
|
||||
} else if (newResult.face[i].annotations) {
|
||||
for (const key of Object.keys(newResult.face[i].annotations)) { // update annotations
|
||||
annotations[key] = newResult.face[i]?.annotations?.[key]?.[0]
|
||||
? newResult.face[i].annotations[key]
|
||||
.map((val, j: number) => val
|
||||
.map((coord: number, k: number) => ((bufferedFactor - 1) * bufferedResult.face[i].annotations[key][j][k] + coord) / bufferedFactor))
|
||||
: null;
|
||||
}
|
||||
}
|
||||
if (newResult.face[i].rotation) {
|
||||
const rotation: {
|
||||
matrix: [number, number, number, number, number, number, number, number, number],
|
||||
|
@ -142,9 +155,9 @@ export function calc(newResult: Result, config: Config): Result {
|
|||
bearing: ((bufferedFactor - 1) * (bufferedResult.face[i].rotation?.gaze.bearing || 0) + (newResult.face[i].rotation?.gaze.bearing || 0)) / bufferedFactor,
|
||||
strength: ((bufferedFactor - 1) * (bufferedResult.face[i].rotation?.gaze.strength || 0) + (newResult.face[i].rotation?.gaze.strength || 0)) / bufferedFactor,
|
||||
};
|
||||
bufferedResult.face[i] = { ...newResult.face[i], rotation, box, boxRaw }; // shallow clone plus updated values
|
||||
bufferedResult.face[i] = { ...newResult.face[i], rotation, box, boxRaw, annotations }; // shallow clone plus updated values
|
||||
} else {
|
||||
bufferedResult.face[i] = { ...newResult.face[i], box, boxRaw }; // shallow clone plus updated values
|
||||
bufferedResult.face[i] = { ...newResult.face[i], box, boxRaw, annotations }; // shallow clone plus updated values
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -103,7 +103,7 @@ export class WebCam { // eslint-disable-line @typescript-eslint/no-extraneous-cl
|
|||
};
|
||||
|
||||
/** start method initializizes webcam stream and associates it with a dom video element */
|
||||
public start = async (webcamConfig?: Partial<WebCamConfig>): Promise<void> => {
|
||||
public start = async (webcamConfig?: Partial<WebCamConfig>): Promise<string> => {
|
||||
// set config
|
||||
if (webcamConfig?.debug) this.config.debug = webcamConfig?.debug;
|
||||
if (webcamConfig?.crop) this.config.crop = webcamConfig?.crop;
|
||||
|
@ -120,13 +120,13 @@ export class WebCam { // eslint-disable-line @typescript-eslint/no-extraneous-cl
|
|||
this.element = el;
|
||||
} else {
|
||||
if (this.config.debug) log('webcam', 'cannot get dom element', webcamConfig.element);
|
||||
return;
|
||||
return `webcam error: cannot get dom element: ${webcamConfig.element}`;
|
||||
}
|
||||
} else if (webcamConfig.element instanceof HTMLVideoElement) {
|
||||
this.element = webcamConfig.element;
|
||||
} else {
|
||||
if (this.config.debug) log('webcam', 'unknown dom element', webcamConfig.element);
|
||||
return;
|
||||
return `webcam error: unknown dom element: ${webcamConfig.element}`;
|
||||
}
|
||||
} else {
|
||||
this.element = document.createElement('video');
|
||||
|
@ -156,18 +156,18 @@ export class WebCam { // eslint-disable-line @typescript-eslint/no-extraneous-cl
|
|||
|
||||
// get webcam and set it to run in dom element
|
||||
if (!navigator?.mediaDevices) {
|
||||
if (this.config.debug) log('webcam', 'no devices');
|
||||
return;
|
||||
if (this.config.debug) log('webcam error', 'no devices');
|
||||
return 'webcam error: no devices';
|
||||
}
|
||||
try {
|
||||
this.stream = await navigator.mediaDevices.getUserMedia(requestedConstraints); // get stream that satisfies constraints
|
||||
} catch (err) {
|
||||
log('webcam', err);
|
||||
return;
|
||||
return `webcam error: ${err}`;
|
||||
}
|
||||
if (!this.stream) {
|
||||
if (this.config.debug) log('webcam', 'no stream');
|
||||
return;
|
||||
if (this.config.debug) log('webcam error', 'no stream');
|
||||
return 'webcam error no stream';
|
||||
}
|
||||
this.element.srcObject = this.stream; // assign it to dom element
|
||||
const ready = new Promise((resolve) => { // wait until stream is ready
|
||||
|
@ -189,6 +189,7 @@ export class WebCam { // eslint-disable-line @typescript-eslint/no-extraneous-cl
|
|||
capabilities: this.capabilities,
|
||||
});
|
||||
}
|
||||
return `webcam: ${this.label}`;
|
||||
};
|
||||
|
||||
/** pause webcam video method */
|
||||
|
|
|
@ -166,6 +166,7 @@ export async function warmup(instance: Human, userConfig?: Partial<Config>): Pro
|
|||
}
|
||||
return new Promise(async (resolve) => {
|
||||
await instance.models.load();
|
||||
await tf.ready();
|
||||
await runCompile(instance);
|
||||
const res = await runInference(instance);
|
||||
const t1 = now();
|
||||
|
|
101
test/build.log
101
test/build.log
|
@ -1,50 +1,51 @@
|
|||
2022-11-22 10:34:25 [32mDATA: [39m Build {"name":"@vladmandic/human","version":"3.0.1"}
|
||||
2022-11-22 10:34:25 [36mINFO: [39m Application: {"name":"@vladmandic/human","version":"3.0.1"}
|
||||
2022-11-22 10:34:25 [36mINFO: [39m Environment: {"profile":"production","config":".build.json","package":"package.json","tsconfig":true,"eslintrc":true,"git":true}
|
||||
2022-11-22 10:34:25 [36mINFO: [39m Toolchain: {"build":"0.7.14","esbuild":"0.15.15","typescript":"4.9.3","typedoc":"0.23.21","eslint":"8.28.0"}
|
||||
2022-11-22 10:34:25 [36mINFO: [39m Build: {"profile":"production","steps":["clean","compile","typings","typedoc","lint","changelog"]}
|
||||
2022-11-22 10:34:25 [35mSTATE:[39m Clean: {"locations":["dist/*","types/*","typedoc/*"]}
|
||||
2022-11-22 10:34:25 [35mSTATE:[39m Compile: {"name":"tfjs/browser/version","format":"esm","platform":"browser","input":"tfjs/tf-version.ts","output":"dist/tfjs.version.js","files":1,"inputBytes":1289,"outputBytes":361}
|
||||
2022-11-22 10:34:25 [35mSTATE:[39m Compile: {"name":"tfjs/nodejs/cpu","format":"cjs","platform":"node","input":"tfjs/tf-node.ts","output":"dist/tfjs.esm.js","files":2,"inputBytes":569,"outputBytes":924}
|
||||
2022-11-22 10:34:25 [35mSTATE:[39m Compile: {"name":"human/nodejs/cpu","format":"cjs","platform":"node","input":"src/human.ts","output":"dist/human.node.js","files":80,"inputBytes":670542,"outputBytes":317619}
|
||||
2022-11-22 10:34:25 [35mSTATE:[39m Compile: {"name":"tfjs/nodejs/gpu","format":"cjs","platform":"node","input":"tfjs/tf-node-gpu.ts","output":"dist/tfjs.esm.js","files":2,"inputBytes":577,"outputBytes":928}
|
||||
2022-11-22 10:34:25 [35mSTATE:[39m Compile: {"name":"human/nodejs/gpu","format":"cjs","platform":"node","input":"src/human.ts","output":"dist/human.node-gpu.js","files":80,"inputBytes":670546,"outputBytes":317623}
|
||||
2022-11-22 10:34:25 [35mSTATE:[39m Compile: {"name":"tfjs/nodejs/wasm","format":"cjs","platform":"node","input":"tfjs/tf-node-wasm.ts","output":"dist/tfjs.esm.js","files":2,"inputBytes":665,"outputBytes":1876}
|
||||
2022-11-22 10:34:25 [35mSTATE:[39m Compile: {"name":"human/nodejs/wasm","format":"cjs","platform":"node","input":"src/human.ts","output":"dist/human.node-wasm.js","files":80,"inputBytes":671494,"outputBytes":317734}
|
||||
2022-11-22 10:34:25 [35mSTATE:[39m Compile: {"name":"tfjs/browser/esm/nobundle","format":"esm","platform":"browser","input":"tfjs/tf-browser.ts","output":"dist/tfjs.esm.js","files":2,"inputBytes":1375,"outputBytes":670}
|
||||
2022-11-22 10:34:25 [35mSTATE:[39m Compile: {"name":"human/browser/esm/nobundle","format":"esm","platform":"browser","input":"src/human.ts","output":"dist/human.esm-nobundle.js","files":80,"inputBytes":670288,"outputBytes":316195}
|
||||
2022-11-22 10:34:26 [35mSTATE:[39m Compile: {"name":"tfjs/browser/esm/bundle","format":"esm","platform":"browser","input":"tfjs/tf-browser.ts","output":"dist/tfjs.esm.js","files":10,"inputBytes":1375,"outputBytes":1138192}
|
||||
2022-11-22 10:34:26 [35mSTATE:[39m Compile: {"name":"human/browser/iife/bundle","format":"iife","platform":"browser","input":"src/human.ts","output":"dist/human.js","files":80,"inputBytes":1807810,"outputBytes":1450757}
|
||||
2022-11-22 10:34:26 [35mSTATE:[39m Compile: {"name":"human/browser/esm/bundle","format":"esm","platform":"browser","input":"src/human.ts","output":"dist/human.esm.js","files":80,"inputBytes":1807810,"outputBytes":1894174}
|
||||
2022-11-22 10:34:30 [35mSTATE:[39m Typings: {"input":"src/human.ts","output":"types/lib","files":15}
|
||||
2022-11-22 10:34:31 [35mSTATE:[39m TypeDoc: {"input":"src/human.ts","output":"typedoc","objects":77,"generated":true}
|
||||
2022-11-22 10:34:31 [35mSTATE:[39m Compile: {"name":"demo/typescript","format":"esm","platform":"browser","input":"demo/typescript/index.ts","output":"demo/typescript/index.js","files":1,"inputBytes":6082,"outputBytes":2872}
|
||||
2022-11-22 10:34:31 [35mSTATE:[39m Compile: {"name":"demo/faceid","format":"esm","platform":"browser","input":"demo/faceid/index.ts","output":"demo/faceid/index.js","files":2,"inputBytes":17572,"outputBytes":9456}
|
||||
2022-11-22 10:34:40 [35mSTATE:[39m Lint: {"locations":["**/*.json","src/**/*.ts","test/**/*.js","demo/**/*.js","**/*.md"],"files":169,"errors":0,"warnings":0}
|
||||
2022-11-22 10:34:40 [35mSTATE:[39m ChangeLog: {"repository":"https://github.com/vladmandic/human","branch":"main","output":"CHANGELOG.md"}
|
||||
2022-11-22 10:34:40 [35mSTATE:[39m Copy: {"input":"node_modules/@vladmandic/tfjs/types/tfjs-core.d.ts","output":"types/tfjs-core.d.ts"}
|
||||
2022-11-22 10:34:40 [36mINFO: [39m Done...
|
||||
2022-11-22 10:34:40 [35mSTATE:[39m Copy: {"input":"node_modules/@vladmandic/tfjs/types/tfjs.d.ts","output":"types/tfjs.esm.d.ts"}
|
||||
2022-11-22 10:34:40 [35mSTATE:[39m Copy: {"input":"src/types/tsconfig.json","output":"types/tsconfig.json"}
|
||||
2022-11-22 10:34:40 [35mSTATE:[39m Copy: {"input":"src/types/eslint.json","output":"types/.eslintrc.json"}
|
||||
2022-11-22 10:34:40 [35mSTATE:[39m Copy: {"input":"src/types/tfjs.esm.d.ts","output":"dist/tfjs.esm.d.ts"}
|
||||
2022-11-22 10:34:40 [35mSTATE:[39m Filter: {"input":"types/tfjs-core.d.ts"}
|
||||
2022-11-22 10:34:41 [35mSTATE:[39m API-Extractor: {"succeeeded":true,"errors":0,"warnings":204}
|
||||
2022-11-22 10:34:41 [35mSTATE:[39m Filter: {"input":"types/human.d.ts"}
|
||||
2022-11-22 10:34:41 [35mSTATE:[39m Write: {"output":"dist/human.esm-nobundle.d.ts"}
|
||||
2022-11-22 10:34:41 [35mSTATE:[39m Write: {"output":"dist/human.esm.d.ts"}
|
||||
2022-11-22 10:34:41 [35mSTATE:[39m Write: {"output":"dist/human.d.ts"}
|
||||
2022-11-22 10:34:41 [35mSTATE:[39m Write: {"output":"dist/human.node-gpu.d.ts"}
|
||||
2022-11-22 10:34:41 [35mSTATE:[39m Write: {"output":"dist/human.node.d.ts"}
|
||||
2022-11-22 10:34:41 [35mSTATE:[39m Write: {"output":"dist/human.node-wasm.d.ts"}
|
||||
2022-11-22 10:34:41 [36mINFO: [39m Analyze models: {"folders":8,"result":"models/models.json"}
|
||||
2022-11-22 10:34:41 [35mSTATE:[39m Models {"folder":"./models","models":12}
|
||||
2022-11-22 10:34:41 [35mSTATE:[39m Models {"folder":"../human-models/models","models":41}
|
||||
2022-11-22 10:34:41 [35mSTATE:[39m Models {"folder":"../blazepose/model/","models":4}
|
||||
2022-11-22 10:34:41 [35mSTATE:[39m Models {"folder":"../anti-spoofing/model","models":1}
|
||||
2022-11-22 10:34:41 [35mSTATE:[39m Models {"folder":"../efficientpose/models","models":3}
|
||||
2022-11-22 10:34:41 [35mSTATE:[39m Models {"folder":"../insightface/models","models":5}
|
||||
2022-11-22 10:34:41 [35mSTATE:[39m Models {"folder":"../movenet/models","models":3}
|
||||
2022-11-22 10:34:41 [35mSTATE:[39m Models {"folder":"../nanodet/models","models":4}
|
||||
2022-11-22 10:34:42 [35mSTATE:[39m Models: {"count":55,"totalSize":372917743}
|
||||
2022-11-22 10:34:42 [36mINFO: [39m Human Build complete... {"logFile":"test/build.log"}
|
||||
2025-02-05 09:39:04 [32mDATA: [39m Build {"name":"@vladmandic/human","version":"3.3.5"}
|
||||
2025-02-05 09:39:04 [36mINFO: [39m Application: {"name":"@vladmandic/human","version":"3.3.5"}
|
||||
2025-02-05 09:39:04 [36mINFO: [39m Environment: {"profile":"production","config":".build.json","package":"package.json","tsconfig":true,"eslintrc":true,"git":true}
|
||||
2025-02-05 09:39:04 [36mINFO: [39m Toolchain: {"build":"0.10.2","esbuild":"0.24.2","typescript":"5.7.3","typedoc":"0.27.6","eslint":"8.57.0"}
|
||||
2025-02-05 09:39:04 [36mINFO: [39m Build: {"profile":"production","steps":["clean","compile","typings","typedoc","lint","changelog"]}
|
||||
2025-02-05 09:39:04 [35mSTATE:[39m Clean: {"locations":["dist/*","types/*","typedoc/*"]}
|
||||
2025-02-05 09:39:04 [35mSTATE:[39m Compile: {"name":"tfjs/browser/version","format":"esm","platform":"browser","input":"tfjs/tf-version.ts","output":"dist/tfjs.version.js","files":1,"inputBytes":1289,"outputBytes":358}
|
||||
2025-02-05 09:39:04 [35mSTATE:[39m Compile: {"name":"tfjs/nodejs/cpu","format":"cjs","platform":"node","input":"tfjs/tf-node.ts","output":"dist/tfjs.esm.js","files":2,"inputBytes":566,"outputBytes":957}
|
||||
2025-02-05 09:39:04 [35mSTATE:[39m Compile: {"name":"human/nodejs/cpu","format":"cjs","platform":"node","input":"src/human.ts","output":"dist/human.node.js","files":80,"inputBytes":678664,"outputBytes":321804}
|
||||
2025-02-05 09:39:04 [35mSTATE:[39m Compile: {"name":"tfjs/nodejs/gpu","format":"cjs","platform":"node","input":"tfjs/tf-node-gpu.ts","output":"dist/tfjs.esm.js","files":2,"inputBytes":574,"outputBytes":965}
|
||||
2025-02-05 09:39:04 [35mSTATE:[39m Compile: {"name":"human/nodejs/gpu","format":"cjs","platform":"node","input":"src/human.ts","output":"dist/human.node-gpu.js","files":80,"inputBytes":678672,"outputBytes":321808}
|
||||
2025-02-05 09:39:04 [35mSTATE:[39m Compile: {"name":"tfjs/nodejs/wasm","format":"cjs","platform":"node","input":"tfjs/tf-node-wasm.ts","output":"dist/tfjs.esm.js","files":2,"inputBytes":662,"outputBytes":2003}
|
||||
2025-02-05 09:39:04 [35mSTATE:[39m Compile: {"name":"human/nodejs/wasm","format":"cjs","platform":"node","input":"src/human.ts","output":"dist/human.node-wasm.js","files":80,"inputBytes":679710,"outputBytes":321919}
|
||||
2025-02-05 09:39:04 [35mSTATE:[39m Compile: {"name":"tfjs/browser/esm/nobundle","format":"esm","platform":"browser","input":"tfjs/tf-browser.ts","output":"dist/tfjs.esm.js","files":2,"inputBytes":1403,"outputBytes":690}
|
||||
2025-02-05 09:39:04 [35mSTATE:[39m Compile: {"name":"human/browser/esm/nobundle","format":"esm","platform":"browser","input":"src/human.ts","output":"dist/human.esm-nobundle.js","files":80,"inputBytes":678397,"outputBytes":320365}
|
||||
2025-02-05 09:39:04 [35mSTATE:[39m Compile: {"name":"tfjs/browser/esm/bundle","format":"esm","platform":"browser","input":"tfjs/tf-browser.ts","output":"dist/tfjs.esm.js","files":10,"inputBytes":1403,"outputBytes":1267320}
|
||||
2025-02-05 09:39:04 [35mSTATE:[39m Compile: {"name":"human/browser/iife/bundle","format":"iife","platform":"browser","input":"src/human.ts","output":"dist/human.js","files":80,"inputBytes":1945027,"outputBytes":1583413}
|
||||
2025-02-05 09:39:04 [35mSTATE:[39m Compile: {"name":"human/browser/esm/bundle","format":"esm","platform":"browser","input":"src/human.ts","output":"dist/human.esm.js","files":80,"inputBytes":1945027,"outputBytes":2067530}
|
||||
2025-02-05 09:39:06 [35mSTATE:[39m Typings: {"input":"src/human.ts","output":"types/lib","files":78}
|
||||
2025-02-05 09:39:08 [35mSTATE:[39m TypeDoc: {"input":"src/human.ts","output":"typedoc","objects":81,"generated":true}
|
||||
2025-02-05 09:39:08 [35mSTATE:[39m Compile: {"name":"demo/typescript","format":"esm","platform":"browser","input":"demo/typescript/index.ts","output":"demo/typescript/index.js","files":1,"inputBytes":6318,"outputBytes":2970}
|
||||
2025-02-05 09:39:08 [35mSTATE:[39m Compile: {"name":"demo/faceid","format":"esm","platform":"browser","input":"demo/faceid/index.ts","output":"demo/faceid/index.js","files":2,"inputBytes":17498,"outputBytes":9397}
|
||||
2025-02-05 09:39:08 [35mSTATE:[39m Compile: {"name":"demo/tracker","format":"esm","platform":"browser","input":"demo/tracker/index.ts","output":"demo/tracker/index.js","files":2,"inputBytes":54375,"outputBytes":22791}
|
||||
2025-02-05 09:39:16 [35mSTATE:[39m Lint: {"locations":["**/*.json","src/**/*.ts","test/**/*.js","demo/**/*.js","**/*.md"],"files":171,"errors":0,"warnings":0}
|
||||
2025-02-05 09:39:16 [35mSTATE:[39m ChangeLog: {"repository":"https://github.com/vladmandic/human","branch":"main","output":"CHANGELOG.md"}
|
||||
2025-02-05 09:39:16 [35mSTATE:[39m Copy: {"input":"node_modules/@vladmandic/tfjs/types/tfjs-core.d.ts","output":"types/tfjs-core.d.ts"}
|
||||
2025-02-05 09:39:16 [36mINFO: [39m Done...
|
||||
2025-02-05 09:39:16 [35mSTATE:[39m Copy: {"input":"node_modules/@vladmandic/tfjs/types/tfjs.d.ts","output":"types/tfjs.esm.d.ts"}
|
||||
2025-02-05 09:39:16 [35mSTATE:[39m Copy: {"input":"src/types/tsconfig.json","output":"types/tsconfig.json"}
|
||||
2025-02-05 09:39:16 [35mSTATE:[39m Copy: {"input":"src/types/eslint.json","output":"types/.eslintrc.json"}
|
||||
2025-02-05 09:39:16 [35mSTATE:[39m Copy: {"input":"src/types/tfjs.esm.d.ts","output":"dist/tfjs.esm.d.ts"}
|
||||
2025-02-05 09:39:16 [35mSTATE:[39m Filter: {"input":"types/tfjs-core.d.ts"}
|
||||
2025-02-05 09:39:17 [35mSTATE:[39m API-Extractor: {"succeeeded":true,"errors":0,"warnings":0}
|
||||
2025-02-05 09:39:17 [35mSTATE:[39m Filter: {"input":"types/human.d.ts"}
|
||||
2025-02-05 09:39:17 [35mSTATE:[39m Write: {"output":"dist/human.esm-nobundle.d.ts"}
|
||||
2025-02-05 09:39:17 [35mSTATE:[39m Write: {"output":"dist/human.esm.d.ts"}
|
||||
2025-02-05 09:39:17 [35mSTATE:[39m Write: {"output":"dist/human.d.ts"}
|
||||
2025-02-05 09:39:17 [35mSTATE:[39m Write: {"output":"dist/human.node-gpu.d.ts"}
|
||||
2025-02-05 09:39:17 [35mSTATE:[39m Write: {"output":"dist/human.node.d.ts"}
|
||||
2025-02-05 09:39:17 [35mSTATE:[39m Write: {"output":"dist/human.node-wasm.d.ts"}
|
||||
2025-02-05 09:39:17 [36mINFO: [39m Analyze models: {"folders":8,"result":"models/models.json"}
|
||||
2025-02-05 09:39:17 [35mSTATE:[39m Models {"folder":"./models","models":12}
|
||||
2025-02-05 09:39:17 [35mSTATE:[39m Models {"folder":"../human-models/models","models":44}
|
||||
2025-02-05 09:39:17 [35mSTATE:[39m Models {"folder":"../blazepose/model/","models":4}
|
||||
2025-02-05 09:39:17 [35mSTATE:[39m Models {"folder":"../anti-spoofing/model","models":1}
|
||||
2025-02-05 09:39:17 [35mSTATE:[39m Models {"folder":"../efficientpose/models","models":3}
|
||||
2025-02-05 09:39:17 [35mSTATE:[39m Models {"folder":"../insightface/models","models":5}
|
||||
2025-02-05 09:39:17 [35mSTATE:[39m Models {"folder":"../movenet/models","models":3}
|
||||
2025-02-05 09:39:17 [35mSTATE:[39m Models {"folder":"../nanodet/models","models":4}
|
||||
2025-02-05 09:39:17 [35mSTATE:[39m Models: {"count":58,"totalSize":380063249}
|
||||
2025-02-05 09:39:17 [36mINFO: [39m Human Build complete... {"logFile":"test/build.log"}
|
||||
|
|
|
@ -26,6 +26,7 @@ const demos = [
|
|||
{ cmd: '../demo/multithread/node-multiprocess.js', args: [] },
|
||||
{ cmd: '../demo/facematch/node-match.js', args: [] },
|
||||
{ cmd: '../demo/nodejs/node-bench.js', args: [] },
|
||||
{ cmd: '../test/test-node-emotion.js', args: [] },
|
||||
// { cmd: '../demo/nodejs/node-video.js', args: [] },
|
||||
// { cmd: '../demo/nodejs/node-webcam.js', args: [] },
|
||||
];
|
||||
|
|
|
@ -3,7 +3,7 @@ const H = require('../dist/human.node.js');
|
|||
const test = require('./test-node-main.js').test;
|
||||
|
||||
const config = {
|
||||
cacheSensitivity: 0,
|
||||
cacheSensitivity: 0.01,
|
||||
modelBasePath: 'file://models/',
|
||||
backend: 'cpu',
|
||||
debug: false,
|
||||
|
|
|
@ -3,7 +3,7 @@ const H = require('../dist/human.node-gpu.js');
|
|||
const test = require('./test-node-main.js').test;
|
||||
|
||||
const config = {
|
||||
cacheSensitivity: 0,
|
||||
cacheSensitivity: 0.01,
|
||||
modelBasePath: 'file://models/',
|
||||
backend: 'tensorflow',
|
||||
debug: false,
|
||||
|
|
|
@ -10,7 +10,7 @@ H.env.Canvas = Canvas; // requires monkey-patch as wasm does not have tf.browser
|
|||
H.env.Image = Image; // requires monkey-patch as wasm does not have tf.browser namespace
|
||||
|
||||
const config = {
|
||||
cacheSensitivity: 0,
|
||||
cacheSensitivity: 0.01,
|
||||
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
|
||||
backend: 'wasm',
|
||||
// wasmPath: 'node_modules/@tensorflow/tfjs-backend-wasm/dist/',
|
||||
|
|
|
@ -4,7 +4,7 @@ const H = require('../dist/human.node.js');
|
|||
const test = require('./test-node-main.js').test;
|
||||
|
||||
const config = {
|
||||
cacheSensitivity: 0,
|
||||
cacheSensitivity: 0.01,
|
||||
modelBasePath: 'file://models/',
|
||||
backend: 'tensorflow',
|
||||
debug: false,
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue