Compare commits

...

No commits in common. "1.4.1" and "main" have entirely different histories.
1.4.1 ... main

703 changed files with 113007 additions and 83963 deletions

27
.api-extractor.json Normal file
View File

@ -0,0 +1,27 @@
{
"$schema": "https://developer.microsoft.com/json-schemas/api-extractor/v7/api-extractor.schema.json",
"mainEntryPointFilePath": "types/lib/src/human.d.ts",
"compiler": {
"skipLibCheck": true
},
"newlineKind": "lf",
"dtsRollup": {
"enabled": true,
"untrimmedFilePath": "types/human.d.ts"
},
"docModel": { "enabled": false },
"tsdocMetadata": { "enabled": false },
"apiReport": { "enabled": false },
"messages": {
"compilerMessageReporting": {
"default": { "logLevel": "warning" }
},
"extractorMessageReporting": {
"default": { "logLevel": "warning" },
"ae-missing-release-tag": { "logLevel": "none" }
},
"tsdocMessageReporting": {
"default": { "logLevel": "warning" }
}
}
}

181
.build.json Normal file
View File

@ -0,0 +1,181 @@
{
"log": {
"enabled": true,
"debug": false,
"console": true,
"output": "test/build.log"
},
"profiles": {
"production": ["clean", "compile", "typings", "typedoc", "lint", "changelog"],
"development": ["serve", "watch", "compile"],
"serve": ["serve"],
"clean": ["clean"]
},
"clean": {
"locations": ["dist/*", "types/*", "typedoc/*"]
},
"lint": {
"locations": [ "**/*.json", "src/**/*.ts", "test/**/*.js", "demo/**/*.js", "**/*.md" ],
"rules": { }
},
"changelog": {
"log": "CHANGELOG.md"
},
"serve": {
"sslKey": "node_modules/@vladmandic/build/cert/https.key",
"sslCrt": "node_modules/@vladmandic/build/cert/https.crt",
"httpPort": 8000,
"httpsPort": 8001,
"documentRoot": ".",
"defaultFolder": "demo",
"defaultFile": "index.html"
},
"build": {
"global": {
"target": "es2018",
"sourcemap": false,
"treeShaking": true,
"ignoreAnnotations": true,
"banner": { "js": "/*\n Human\n homepage: <https://github.com/vladmandic/human>\n author: <https://github.com/vladmandic>'\n*/\n" }
},
"targets": [
{
"name": "tfjs/browser/version",
"platform": "browser",
"format": "esm",
"input": "tfjs/tf-version.ts",
"output": "dist/tfjs.version.js"
},
{
"name": "tfjs/nodejs/cpu",
"platform": "node",
"format": "cjs",
"input": "tfjs/tf-node.ts",
"output": "dist/tfjs.esm.js",
"external": ["@tensorflow"]
},
{
"name": "human/nodejs/cpu",
"platform": "node",
"format": "cjs",
"input": "src/human.ts",
"output": "dist/human.node.js",
"external": ["@tensorflow"]
},
{
"name": "tfjs/nodejs/gpu",
"platform": "node",
"format": "cjs",
"input": "tfjs/tf-node-gpu.ts",
"output": "dist/tfjs.esm.js",
"external": ["@tensorflow"]
},
{
"name": "human/nodejs/gpu",
"platform": "node",
"format": "cjs",
"input": "src/human.ts",
"output": "dist/human.node-gpu.js",
"external": ["@tensorflow"]
},
{
"name": "tfjs/nodejs/wasm",
"platform": "node",
"format": "cjs",
"input": "tfjs/tf-node-wasm.ts",
"output": "dist/tfjs.esm.js",
"minify": false,
"external": ["@tensorflow"]
},
{
"name": "human/nodejs/wasm",
"platform": "node",
"format": "cjs",
"input": "src/human.ts",
"output": "dist/human.node-wasm.js",
"external": ["@tensorflow"]
},
{
"name": "tfjs/browser/esm/nobundle",
"platform": "browser",
"format": "esm",
"input": "tfjs/tf-browser.ts",
"output": "dist/tfjs.esm.js",
"external": ["@tensorflow"]
},
{
"name": "human/browser/esm/nobundle",
"platform": "browser",
"format": "esm",
"input": "src/human.ts",
"output": "dist/human.esm-nobundle.js",
"sourcemap": false,
"external": ["@tensorflow"]
},
{
"name": "tfjs/browser/esm/bundle",
"platform": "browser",
"format": "esm",
"input": "tfjs/tf-browser.ts",
"output": "dist/tfjs.esm.js",
"sourcemap": false,
"minify": true
},
{
"name": "human/browser/iife/bundle",
"platform": "browser",
"format": "iife",
"input": "src/human.ts",
"output": "dist/human.js",
"minify": true,
"globalName": "Human",
"external": ["@tensorflow"]
},
{
"name": "human/browser/esm/bundle",
"platform": "browser",
"format": "esm",
"input": "src/human.ts",
"output": "dist/human.esm.js",
"sourcemap": true,
"minify": false,
"external": ["@tensorflow"],
"typings": "types/lib",
"typedoc": "typedoc"
},
{
"name": "demo/typescript",
"platform": "browser",
"format": "esm",
"input": "demo/typescript/index.ts",
"output": "demo/typescript/index.js",
"sourcemap": true,
"external": ["*/human.esm.js"]
},
{
"name": "demo/faceid",
"platform": "browser",
"format": "esm",
"input": "demo/faceid/index.ts",
"output": "demo/faceid/index.js",
"sourcemap": true,
"external": ["*/human.esm.js"]
},
{
"name": "demo/tracker",
"platform": "browser",
"format": "esm",
"input": "demo/tracker/index.ts",
"output": "demo/tracker/index.js",
"sourcemap": true,
"external": ["*/human.esm.js"]
}
]
},
"watch": {
"locations": [ "src/**/*", "tfjs/**/*", "demo/**/*.ts" ]
},
"typescript": {
"allowJs": false
}
}

View File

@ -1,73 +1,221 @@
{
"globals": {},
"env": {
"browser": true,
"commonjs": true,
"es6": true,
"node": true,
"jquery": true,
"es2020": true
"globals": {
},
"parser": "@typescript-eslint/parser",
"parserOptions": { "ecmaVersion": 2020 },
"plugins": [
"@typescript-eslint"
],
"extends": [
"eslint:recommended",
"plugin:import/errors",
"plugin:import/warnings",
"plugin:node/recommended",
"plugin:promise/recommended",
"plugin:json/recommended-with-comments",
"plugin:@typescript-eslint/eslint-recommended",
"plugin:@typescript-eslint/recommended",
"airbnb-base"
],
"ignorePatterns": [ "dist", "assets", "media", "models", "node_modules", "demo/helpers" ],
"rules": {
"@typescript-eslint/ban-ts-comment": "off",
"@typescript-eslint/explicit-module-boundary-types": "off",
"@typescript-eslint/ban-types": "off",
"@typescript-eslint/no-explicit-any": "off",
"@typescript-eslint/no-var-requires": "off",
"camelcase": "off",
"dot-notation": "off",
"func-names": "off",
"guard-for-in": "off",
"import/extensions": "off",
"import/no-extraneous-dependencies": "off",
"import/no-named-as-default": "off",
"import/no-unresolved": "off",
"import/prefer-default-export": "off",
"lines-between-class-members": "off",
"max-len": [1, 275, 3],
"newline-per-chained-call": "off",
"no-async-promise-executor": "off",
"no-await-in-loop": "off",
"no-bitwise": "off",
"no-case-declarations":"off",
"no-continue": "off",
"no-loop-func": "off",
"no-mixed-operators": "off",
"no-param-reassign":"off",
"no-plusplus": "off",
"no-regex-spaces": "off",
"no-restricted-globals": "off",
"no-restricted-syntax": "off",
"no-return-assign": "off",
"no-underscore-dangle": "off",
"node/no-missing-import": ["error", { "tryExtensions": [".js", ".json", ".ts"] }],
"node/no-unpublished-import": "off",
"node/no-unpublished-require": "off",
"node/no-unsupported-features/es-syntax": "off",
"node/shebang": "off",
"object-curly-newline": "off",
"prefer-destructuring": "off",
"prefer-template":"off",
"promise/always-return": "off",
"promise/catch-or-return": "off",
"promise/no-nesting": "off",
"radix": "off"
}
}
"@typescript-eslint/no-require-imports":"off"
},
"overrides": [
{
"files": ["**/*.ts"],
"parser": "@typescript-eslint/parser",
"parserOptions": { "ecmaVersion": "latest", "project": ["./tsconfig.json"] },
"plugins": ["@typescript-eslint"],
"env": {
"browser": true,
"commonjs": false,
"node": false,
"es2021": true
},
"extends": [
"airbnb-base",
"eslint:recommended",
"plugin:@typescript-eslint/eslint-recommended",
"plugin:@typescript-eslint/recommended",
"plugin:@typescript-eslint/recommended-requiring-type-checking",
"plugin:@typescript-eslint/strict",
"plugin:import/recommended",
"plugin:promise/recommended"
],
"rules": {
"@typescript-eslint/ban-ts-comment":"off",
"@typescript-eslint/dot-notation":"off",
"@typescript-eslint/no-empty-interface":"off",
"@typescript-eslint/no-inferrable-types":"off",
"@typescript-eslint/no-misused-promises":"off",
"@typescript-eslint/no-unnecessary-condition":"off",
"@typescript-eslint/no-unsafe-argument":"off",
"@typescript-eslint/no-unsafe-assignment":"off",
"@typescript-eslint/no-unsafe-call":"off",
"@typescript-eslint/no-unsafe-member-access":"off",
"@typescript-eslint/no-unsafe-return":"off",
"@typescript-eslint/no-require-imports":"off",
"@typescript-eslint/no-empty-object-type":"off",
"@typescript-eslint/non-nullable-type-assertion-style":"off",
"@typescript-eslint/prefer-for-of":"off",
"@typescript-eslint/prefer-nullish-coalescing":"off",
"@typescript-eslint/prefer-ts-expect-error":"off",
"@typescript-eslint/restrict-plus-operands":"off",
"@typescript-eslint/restrict-template-expressions":"off",
"dot-notation":"off",
"guard-for-in":"off",
"import/extensions": ["off", "always"],
"import/no-unresolved":"off",
"import/prefer-default-export":"off",
"lines-between-class-members":"off",
"max-len": [1, 275, 3],
"no-async-promise-executor":"off",
"no-await-in-loop":"off",
"no-bitwise":"off",
"no-continue":"off",
"no-lonely-if":"off",
"no-mixed-operators":"off",
"no-param-reassign":"off",
"no-plusplus":"off",
"no-regex-spaces":"off",
"no-restricted-syntax":"off",
"no-return-assign":"off",
"no-void":"off",
"object-curly-newline":"off",
"prefer-destructuring":"off",
"prefer-template":"off",
"radix":"off"
}
},
{
"files": ["**/*.d.ts"],
"parser": "@typescript-eslint/parser",
"parserOptions": { "ecmaVersion": "latest", "project": ["./tsconfig.json"] },
"plugins": ["@typescript-eslint"],
"env": {
"browser": true,
"commonjs": false,
"node": false,
"es2021": true
},
"extends": [
"airbnb-base",
"eslint:recommended",
"plugin:@typescript-eslint/eslint-recommended",
"plugin:@typescript-eslint/recommended",
"plugin:@typescript-eslint/recommended-requiring-type-checking",
"plugin:@typescript-eslint/strict",
"plugin:import/recommended",
"plugin:promise/recommended"
],
"rules": {
"@typescript-eslint/array-type":"off",
"@typescript-eslint/ban-types":"off",
"@typescript-eslint/consistent-indexed-object-style":"off",
"@typescript-eslint/consistent-type-definitions":"off",
"@typescript-eslint/no-empty-interface":"off",
"@typescript-eslint/no-explicit-any":"off",
"@typescript-eslint/no-invalid-void-type":"off",
"@typescript-eslint/no-unnecessary-type-arguments":"off",
"@typescript-eslint/no-unnecessary-type-constraint":"off",
"comma-dangle":"off",
"indent":"off",
"lines-between-class-members":"off",
"max-classes-per-file":"off",
"max-len":"off",
"no-multiple-empty-lines":"off",
"no-shadow":"off",
"no-use-before-define":"off",
"quotes":"off",
"semi":"off"
}
},
{
"files": ["**/*.js"],
"parserOptions": { "sourceType": "module", "ecmaVersion": "latest" },
"plugins": [],
"env": {
"browser": true,
"commonjs": true,
"node": true,
"es2021": true
},
"extends": [
"airbnb-base",
"eslint:recommended",
"plugin:node/recommended",
"plugin:promise/recommended"
],
"rules": {
"dot-notation":"off",
"import/extensions": ["error", "always"],
"import/no-extraneous-dependencies":"off",
"max-len": [1, 275, 3],
"no-await-in-loop":"off",
"no-bitwise":"off",
"no-continue":"off",
"no-mixed-operators":"off",
"no-param-reassign":"off",
"no-plusplus":"off",
"no-regex-spaces":"off",
"no-restricted-syntax":"off",
"no-return-assign":"off",
"node/no-unsupported-features/es-syntax":"off",
"object-curly-newline":"off",
"prefer-destructuring":"off",
"prefer-template":"off",
"radix":"off"
}
},
{
"files": ["**/*.json"],
"parserOptions": { "ecmaVersion": "latest" },
"plugins": ["json"],
"env": {
"browser": false,
"commonjs": false,
"node": false,
"es2021": false
},
"extends": []
},
{
"files": ["**/*.html"],
"parserOptions": { "sourceType": "module", "ecmaVersion": "latest" },
"parser": "@html-eslint/parser",
"plugins": ["html", "@html-eslint"],
"env": {
"browser": true,
"commonjs": false,
"node": false,
"es2021": false
},
"extends": ["plugin:@html-eslint/recommended"],
"rules": {
"@html-eslint/element-newline":"off",
"@html-eslint/attrs-newline":"off",
"@html-eslint/indent": ["error", 2]
}
},
{
"files": ["**/*.md"],
"plugins": ["markdown"],
"processor": "markdown/markdown",
"rules": {
"no-undef":"off"
}
},
{
"files": ["**/*.md/*.js"],
"rules": {
"@typescript-eslint/no-unused-vars":"off",
"@typescript-eslint/triple-slash-reference":"off",
"import/newline-after-import":"off",
"import/no-unresolved":"off",
"no-console":"off",
"no-global-assign":"off",
"no-multi-spaces":"off",
"no-restricted-globals":"off",
"no-undef":"off",
"no-unused-vars":"off",
"node/no-missing-import":"off",
"node/no-missing-require":"off",
"promise/catch-or-return":"off"
}
}
],
"ignorePatterns": [
"node_modules",
"assets",
"dist",
"demo/helpers/*.js",
"demo/typescript/*.js",
"demo/faceid/*.js",
"demo/tracker/*.js",
"typedoc"
]
}

11
.github/FUNDING.yml vendored Normal file
View File

@ -0,0 +1,11 @@
github: [vladmandic]
patreon: # Replace with a single Patreon username
open_collective: # Replace with a single Open Collective username
ko_fi: # Replace with a single Ko-fi username
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']

View File

@ -13,16 +13,23 @@ assignees: vladmandic
**Expected Behavior**
**Environment
**Environment**
- Module version?
- Human library version?
- Built-in demo or custom code?
- Type of module used (e.g. `js`, `esm`, `esm-nobundle`)?
- Browser or NodeJS and version (e.g. NodeJS 14.15 or Chrome 89)?
- OS and Hardware platform (e.g. Windows 10, Ubuntu Linux on x64, Android 10)?
- Packager (if any) (e.g, webpack, rollup, parcel, esbuild, etc.)?
- TensorFlow/JS version (if not using bundled module)?
- Browser or NodeJS and version (e.g. *NodeJS 14.15* or *Chrome 89*)?
- OS and Hardware platform (e.g. *Windows 10*, *Ubuntu Linux on x64*, *Android 10*)?
- Packager (if any) (e.g, *webpack*, *rollup*, *parcel*, *esbuild*, etc.)?
- Framework (if any) (e.g. *React*, *NextJS*, etc.)?
**Diagnostics**
- Check out any applicable [diagnostic steps](https://github.com/vladmandic/human/wiki/Diag)
**Additional**
- For installation or startup issues include your `package.json`
- For usage issues, it is recommended to post your code as [gist](https://gist.github.com/)
- For general questions, create a [discussion topic](https://github.com/vladmandic/human/discussions)

10
.gitignore vendored
View File

@ -1,3 +1,9 @@
node_modules
private
node_modules/
types/lib
pnpm-lock.yaml
package-lock.json
*.swp
samples/**/*.mp4
samples/**/*.webm
temp
tmp

View File

@ -3,8 +3,11 @@
"web-recommended"
],
"browserslist": [
"last 1 versions",
"not ie < 20"
"chrome >= 90",
"edge >= 90",
"firefox >= 100",
"android >= 90",
"safari >= 15"
],
"hints": {
"no-inline-styles": "off",

View File

@ -1,6 +1,7 @@
{
"MD012": false,
"MD013": false,
"MD029": false,
"MD033": false,
"MD036": false,
"MD041": false

View File

@ -1,3 +1,7 @@
node_modules
private
pnpm-lock.yaml
samples
typedoc
test
wiki
types/lib

5
.npmrc Normal file
View File

@ -0,0 +1,5 @@
force=true
omit=dev
legacy-peer-deps=true
strict-peer-dependencies=false
node-options='--no-deprecation'

10
.vscode/settings.json vendored Normal file
View File

@ -0,0 +1,10 @@
{
"search.exclude": {
"dist/*": true,
"node_modules/*": true,
"types": true,
"typedoc": true,
},
"search.useGlobalIgnoreFiles": true,
"search.useParentIgnoreFiles": true
}

View File

@ -1,16 +1,658 @@
# @vladmandic/human
Version: **1.4.0**
Description: **Human: AI-powered 3D Face Detection, Face Description & Recognition, Body Pose Tracking, Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction & Gesture Recognition**
Author: **Vladimir Mandic <mandic00@live.com>**
License: **MIT** </LICENSE>
Repository: **<git+https://github.com/vladmandic/human.git>**
Version: **3.3.5**
Description: **Human: AI-powered 3D Face Detection & Rotation Tracking, Face Description & Recognition, Body Pose Tracking, 3D Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction, Gesture Recognition**
Author: **Vladimir Mandic <mandic00@live.com>**
License: **MIT**
Repository: **<https://github.com/vladmandic/human>**
## Changelog
### **3.3.5** 2025/02/05 mandic00@live.com
### **HEAD -> main** 2021/04/08 mandic00@live.com
### **origin/main** 2024/10/24 mandic00@live.com
- add human.draw.tensor method
### **3.3.4** 2024/10/24 mandic00@live.com
### **3.3.3** 2024/10/14 mandic00@live.com
- add loaded property to model stats and mark models not loaded correctly.
- release build
### **3.3.2** 2024/09/11 mandic00@live.com
- full rebuild
### **3.3.1** 2024/09/11 mandic00@live.com
- add config.face.detector.square option
- human 3.3 alpha test run
- human 3.3 alpha with new build environment
- release rebuild
- fix flazeface tensor scale and update build platform
### **3.2.2** 2024/04/17 mandic00@live.com
### **release: 3.2.1** 2024/02/15 mandic00@live.com
### **3.2.1** 2024/02/15 mandic00@live.com
### **3.2.0** 2023/12/06 mandic00@live.com
- set browser false when navigator object is empty
- https://github.com/vladmandic/human/issues/402
### **release: 3.1.2** 2023/09/18 mandic00@live.com
- full rebuild
### **3.1.2** 2023/09/18 mandic00@live.com
- major toolkit upgrade
- full rebuild
- major toolkit upgrade
### **3.1.1** 2023/08/05 mandic00@live.com
- fixes plus tfjs upgrade for new release
### **3.0.7** 2023/06/12 mandic00@live.com
- full rebuild
- fix memory leak in histogramequalization
- initial work on tracker
### **3.0.6** 2023/03/21 mandic00@live.com
- add optional crop to multiple models
- fix movenet-multipose
- add electron detection
- fix gender-ssrnet-imdb
- add movenet-multipose workaround
- rebuild and publish
- add face.detector.minsize configurable setting
- add affectnet
### **3.0.5** 2023/02/02 mandic00@live.com
- add gear-e models
- detect react-native
- redo blazeface annotations
### **3.0.4** 2023/01/29 mandic00@live.com
- make naviator calls safe
- fix facedetector-only configs
### **3.0.3** 2023/01/07 mandic00@live.com
- full rebuild
### **3.0.2** 2023/01/06 mandic00@live.com
- default face.rotation disabled
### **release: 3.0.1** 2022/11/22 mandic00@live.com
### **3.0.1** 2022/11/22 mandic00@live.com
- support dynamic loads
- polish demos
- add facedetect demo and fix model async load
- enforce markdown linting
- cleanup git history
- default empty result
- refactor draw and models namespaces
- refactor distance
- add basic anthropometry
- added webcam id specification
- include external typedefs
- prepare external typedefs
- rebuild all
- include project files for types
- architectural improvements
- refresh dependencies
- add named exports
- add draw label templates
- reduce dev dependencies
- tensor rank strong typechecks
- rebuild dependencies
### **2.11.1** 2022/10/09 mandic00@live.com
- add rvm segmentation model
- add human.webcam methods
- create funding.yml
- fix rotation interpolation
### **2.10.3** 2022/09/21 mandic00@live.com
- add human.video method
### **2.10.2** 2022/09/11 mandic00@live.com
- add node.js esm compatibility (#292)
- release
### **2.10.1** 2022/09/07 mandic00@live.com
- release candidate
- add config flags
- test update
- release preview
- optimize startup sequence
- reorder backend init code
- test embedding
- embedding test
- add browser iife tests
- minor bug fixes and increased test coverage
- extend release tests
- add model load exception handling
- add softwarekernels config option
- expand type safety
- full eslint rule rewrite
### **2.9.4** 2022/08/20 mandic00@live.com
- add browser test
- add tensorflow library detection
- fix wasm detection
- enumerate additional models
- release refresh
### **2.9.3** 2022/08/10 mandic00@live.com
- rehault testing framework
- release refresh
- add insightface
### **2.9.2** 2022/08/08 mandic00@live.com
- release rebuild
### **2.9.1** 2022/07/25 mandic00@live.com
- full rebuild
- release cleanup
- tflite experiments
- add load monitor test
- beta for upcoming major release
- swtich to release version of tfjs
- placeholder for face contours
- improve face compare in main demo
- add webview support
- fix(gear): ensure gear.modelpath is used for loadmodel()
- npm default install should be prod only
- fix npm v7 compatibility
- add getmodelstats method
- rebuild
- release build
### **2.8.1** 2022/06/08 mandic00@live.com
- webgpu and wasm optimizations
- add faceboxes prototype
- full rebuild
### **2.7.4** 2022/05/24 mandic00@live.com
### **2.7.3** 2022/05/24 mandic00@live.com
- add face.mesh.keepinvalid config flag
- initial work for new facemesh model
### **2.7.2** 2022/05/12 mandic00@live.com
- fix demo when used with video files
- major release
### **2.7.1** 2022/05/09 mandic00@live.com
- support 4k input
- add attention draw methods
- fix coloring function
- enable precompile as part of warmup
- prepare release beta
- change default face crop
- beta release 2.7
- refactor draw methods
- implement face attention model
- add electronjs demo
- rebuild
### **2.6.5** 2022/04/01 mandic00@live.com
- bundle offscreencanvas types
- prototype precompile pass
- fix changelog generation
- fix indexdb config check
### **2.6.4** 2022/02/27 mandic00@live.com
- fix types typo
- refresh
- add config option wasmplatformfetch
### **2.6.3** 2022/02/10 mandic00@live.com
- rebuild
### **2.6.2** 2022/02/07 mandic00@live.com
- release rebuild
### **2.6.1** 2022/01/20 mandic00@live.com
- implement model caching using indexdb
- prototype global fetch handler
- fix face box and hand tracking when in front of face
### **2.5.8** 2022/01/14 mandic00@live.com
- fix samples
- fix(src): typo
- change on how face box is calculated
### **2.5.7** 2021/12/27 mandic00@live.com
- fix posenet
- release refresh
### **2.5.6** 2021/12/15 mandic00@live.com
- strong type for string enums
- rebuild
- fix node detection in electron environment
### **2.5.5** 2021/12/01 mandic00@live.com
- added human-motion
- add offscreencanvas typedefs
- release preview
- fix face box scaling on detection
- cleanup
### **2.5.4** 2021/11/22 mandic00@live.com
- prototype blazepose detector
- minor fixes
- add body 3d interpolation
- edit blazepose keypoints
- new build process
### **2.5.3** 2021/11/18 mandic00@live.com
- create typedef rollup
- optimize centernet
- cache frequent tf constants
- add extra face rotation prior to mesh
- release 2.5.2
- improve error handling
### **2.5.2** 2021/11/14 mandic00@live.com
- fix mobilefacenet module
- fix gear and ssrnet modules
- fix for face crop when mesh is disabled
- implement optional face masking
- add similarity score range normalization
- add faceid demo
- documentation overhaul
- auto tensor shape and channels handling
- disable use of path2d in node
- add liveness module and facerecognition demo
- initial version of facerecognition demo
- rebuild
- add type defs when working with relative path imports
- disable humangl backend if webgl 1.0 is detected
- add additional hand gestures
### **2.5.1** 2021/11/08 mandic00@live.com
- new human.compare api
- added links to release notes
- new frame change detection algorithm
- add histogram equalization
- implement wasm missing ops
- performance and memory optimizations
- fix react compatibility issues
- improve box rescaling for all modules
- improve precision using wasm backend
- refactor predict with execute
- patch tfjs type defs
- start 2.5 major version
- build and docs cleanup
- fix firefox bug
### **2.4.3** 2021/10/28 mandic00@live.com
- additional human.performance counters
### **2.4.2** 2021/10/27 mandic00@live.com
- add ts demo
- switch from es2018 to es2020 for main build
- switch to custom tfjs for demos
- release 2.4
### **2.4.1** 2021/10/25 mandic00@live.com
- refactoring plus jsdoc comments
- increase face similarity match resolution
- time based caching
- turn on minification
- initial work on skiptime
- added generic types
- enhanced typing exports
- add optional autodetected custom wasm path
### **2.3.6** 2021/10/21 mandic00@live.com
- fix for human.draw labels and typedefs
- refactor human.env to a class type
- add human.custom.esm using custom tfjs build
### **2.3.5** 2021/10/19 mandic00@live.com
- removed direct usage of performance.now
### **2.3.4** 2021/10/19 mandic00@live.com
- minor blazepose optimizations
- compress samples
- remove posenet from default package
- enhanced movenet postprocessing
- use transferrable buffer for worker messages
- add optional anti-spoofing module
- add node-match advanced example using worker thread pool
- package updates
- optimize image preprocessing
- set webgpu optimized flags
- major precision improvements to movenet and handtrack
- image processing fixes
- redesign body and hand caching and interpolation
- demo default config cleanup
- improve gaze and face angle visualizations in draw
- release 2.3.1
### **2.3.1** 2021/10/06 mandic00@live.com
- workaround for chrome offscreencanvas bug
- fix backend conflict in webworker
- add blazepose v2 and add annotations to body results
- fix backend order initialization
- added docker notes
- breaking change: new similarity and match methods
- tweaked default values
- enable handtrack as default model
- redesign face processing
- refactoring
- define app specific types
- implement box caching for movenet
- autodetect number of bodies and hands
- upload new samples
- new samples gallery and major code folder restructure
- new release
### **2.2.3** 2021/09/24 mandic00@live.com
- optimize model loading
- support segmentation for nodejs
- redo segmentation and handtracking
- prototype handtracking
- automated browser tests
- support for dynamic backend switching
- initial automated browser tests
- enhanced automated test coverage
- more automated tests
- added configuration validation
- prevent validation failed on some model combinations
- webgl exception handling
### **2.2.2** 2021/09/17 mandic00@live.com
- experimental webgl status monitoring
- major release
### **2.2.1** 2021/09/16 mandic00@live.com
- add vr model demo
- all tests passing
- redefine draw helpers interface
- add simple webcam and webrtc demo
- added visual results browser to demo
- reorganize tfjs bundle
- experimental custom tfjs bundle - disabled
- add platform and backend capabilities detection
- enhanced automated tests
- enable canvas patching for nodejs
- full ts strict typechecks
- fix multiple memory leaks
- modularize human class and add model validation
- add dynamic kernel op detection
- added human.env diagnostic class
- minor typos
- release candidate
- parametrize face config
- mark all config items as optional
- redefine config and result interfaces
- fix usge of string enums
- start using partial definitions
- implement event emitters
- fix iife loader
- simplify dependencies
- change build process
- add benchmark info
- simplify canvas handling in nodejs
- full rebuild
### **2.1.5** 2021/08/31 mandic00@live.com
- added demo node-canvas
- dynamically generate default wasm path
- implement finger poses in hand detection and gestures
- implemented movenet-multipose model
### **2.1.4** 2021/08/19 mandic00@live.com
- add static type definitions to main class
- fix interpolation overflow
- rebuild full
- improve face box caching
- strict type checks
- add webgu checks
- experimental webgpu support
- add experimental webgu demo
- add backend initialization checks
- complete async work
- list detect cameras
- switch to async data reads
### **2.1.3** 2021/08/12 mandic00@live.com
- fix centernet & update blazeface
- minor update
- replace movenet with lightning-v4
- enable webgl uniform support for faster warmup
### **2.1.2** 2021/07/29 mandic00@live.com
- fix unregistered ops in tfjs
- fix typo
- rebuild new release
### **2.1.1** 2021/07/29 mandic00@live.com
- add note on manually disping tensor
- modularize model loading
### **2.0.3** 2021/06/18 mandic00@live.com
- fix demo paths
- added multithreaded demo
### **2.0.2** 2021/06/14 mandic00@live.com
- reorganize demos
- fix centernet box width & height
- add body segmentation sample
- add release notes
- release 2.0
### **2.0.1** 2021/06/08 mandic00@live.com
- add video drag&drop capability
- modularize build platform
- custom build tfjs from sources
- modularize build platform
- enable body segmentation and background replacement in demo
- minor git corruption
- unified build
- enable body segmentation and background replacement
- work on body segmentation
- added experimental body segmentation module
- add meet and selfie models
- add live hints to demo
- switch worker from module to iife importscripts
- release candidate
- added samples to git
- implemented drag & drop for image processing
- release candidate
- breaking changes to results.face output properties
- breaking changes to results.object output properties
- breaking changes to results.hand output properties
- breaking changes to results.body output properties
- implemented human.next global interpolation method
- finished draw buffering and smoothing and enabled by default
- implemented service worker
- release candidate
- added usage restrictions
- quantize handdetect model
- added experimental movenet-lightning and removed blazepose from default dist
- added experimental face.rotation.gaze
- fix and optimize for mobile platform
- lock typescript to 4.2 due to typedoc incompatibility with 4.3
### **1.9.4** 2021/05/27 mandic00@live.com
- fix demo facecompare
- webhint and lighthouse optimizations
- add camera startup diag messages
- implemented unified result.persons that combines face, body and hands for each person
- added experimental results interpolation for smooth draw operations
### **1.9.3** 2021/05/23 mandic00@live.com
- use green weighted for input diff calculation
- implement experimental drawoptions.bufferedoutput and bufferedfactor
- use explicit tensor interface
- add tfjs types and remove all instances of any
- enhance strong typing
- rebuild all for release
### **1.9.2** 2021/05/22 mandic00@live.com
- add id and boxraw on missing objects
- restructure results strong typing
### **1.9.1** 2021/05/21 mandic00@live.com
- caching improvements
- add experimental mb3-centernet object detection
- individual model skipframes values still max high threshold for caching
- config.videooptimized has been removed and config.cachesensitivity has been added instead
- caching determination is now dynamic based on detection of input change and not based on input types
- human 1.9.0 beta with breaking changes regarding caching
### **1.8.5** 2021/05/18 mandic00@live.com
- add node-video sample
- add node-webcam demo
- fix node build and update model signatures
### **1.8.4** 2021/05/11 mandic00@live.com
### **1.8.3** 2021/05/05 mandic00@live.com
- switch posenet weights
### **1.8.2** 2021/05/04 mandic00@live.com
- release 1.8 with major changes and tfjs 3.6.0
### **1.8.1** 2021/04/30 mandic00@live.com
- blazeface optimizations
- add hand labels in draw
- cleanup demo workflow
- convert blazeface to module
- version 1.8 release candidate
- build nodejs deliverables in non-minified form
- stop building sourcemaps for nodejs deliverables
- remove deallocate, profile, scoped
- replaced maxfaces, maxdetections, maxhands, maxresults with maxdetected
- replaced nmsradius with built-in default
- unified minconfidence and scorethresdold as minconfidence
- add exception handlers to all demos
- remove blazeface-front and add unhandledrejection handler
- major update for 1.8 release candidate
- enable webworker detection
### **1.7.1** 2021/04/25 mandic00@live.com
- remove obsolete binary models
- enable cross origin isolation
- rewrite posenet decoder
- remove efficientpose
- major version rebuild
### **1.6.1** 2021/04/22 mandic00@live.com
- add npmrc
- added filter.flip feature
- added demo load image from http
- mobile demo optimization and iris gestures
- full rebuild
- new look
- added benchmarks
- added node-multiprocess demo
- fix image orientation
- flat app style
- add full nodejs test coverage
### **1.5.2** 2021/04/14 mandic00@live.com
- experimental node-wasm support
### **1.5.1** 2021/04/13 mandic00@live.com
- fix for safari imagebitmap
- refactored human.config and human.draw
### **1.4.3** 2021/04/12 mandic00@live.com
- implement webrtc
### **1.4.2** 2021/04/12 mandic00@live.com
- added support for multiple instances of human
- fix typedoc
- exception handling
### **1.4.1** 2021/04/09 mandic00@live.com
- add modelbasepath option
### **1.3.5** 2021/04/06 mandic00@live.com
@ -90,7 +732,6 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
- add experimental nanodet object detection
- full models signature
- cleanup
### **1.1.7** 2021/03/16 mandic00@live.com
@ -136,7 +777,6 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
### **1.0.3** 2021/03/10 mandic00@live.com
- strong typing for public classes and hide private classes
- re-added blazeface-front
- enhanced age, gender, emotion detection
- full rebuild
@ -145,151 +785,73 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
- remove blazeface-front, blazepose-upper, faceboxes
- remove blazeface-front and faceboxes
### **release: 1.0.1** 2021/03/09 mandic00@live.com
### **1.0.1** 2021/03/09 mandic00@live.com
- fix for face detector when mesh is disabled
- optimize for npm
### **0.40.9** 2021/03/08 mandic00@live.com
- 0.40.9
- fix performance issue when running with low confidence
### **0.40.8** 2021/03/08 mandic00@live.com
### **0.40.7** 2021/03/06 mandic00@live.com
- 0.40.8
- 0.40.7
- implemented 3d face angle calculations
### **0.40.6** 2021/03/06 mandic00@live.com
- 0.40.6
- add curve draw output
### **0.40.5** 2021/03/05 mandic00@live.com
- 0.40.5
- fix human.draw
### **0.40.4** 2021/03/05 mandic00@live.com
- cleanup blazepose code
- 0.40.4
- fix demo
### **0.40.3** 2021/03/05 mandic00@live.com
### **0.40.2** 2021/03/05 mandic00@live.com
- 0.40.3
- 0.40.2
- added blazepose-upper
### **0.40.1** 2021/03/04 mandic00@live.com
- 0.40.1
- implement blazepose and update demos
- add todo list
### **0.30.6** 2021/03/03 mandic00@live.com
- 0.30.6
- fine tuning age and face models
### **0.30.5** 2021/03/02 mandic00@live.com
- 0.30.5
- add debug logging flag
### **0.30.4** 2021/03/01 mandic00@live.com
- 0.30.4
- added skipinitial flag
### **0.30.3** 2021/02/28 mandic00@live.com
- 0.30.3
- typo
### **0.30.2** 2021/02/26 mandic00@live.com
- 0.30.2
- rebuild
- fix typo
### **0.30.1** 2021/02/25 mandic00@live.com
### **0.20.11** 2021/02/24 mandic00@live.com
### **0.20.10** 2021/02/22 mandic00@live.com
### **0.20.9** 2021/02/21 mandic00@live.com
- remove extra items
- simmilarity fix
### **0.20.8** 2021/02/21 mandic00@live.com
- embedding fix
### **0.20.7** 2021/02/21 mandic00@live.com
- 0.30.1
- 0.20.11
- 0.20.10
- 0.20.9
- 0.20.8
- 0.20.7
- build fix
### **0.20.6** 2021/02/21 mandic00@live.com
- 0.20.6
- embedding fix
### **0.20.5** 2021/02/21 mandic00@live.com
- 0.20.5
- fix imagefx and add dev builds
### **0.20.4** 2021/02/19 mandic00@live.com
- 0.20.4
### **0.20.3** 2021/02/17 mandic00@live.com
- 0.20.3
- rebuild
### **0.20.2** 2021/02/13 mandic00@live.com
- 0.20.2
- merge branch 'main' of https://github.com/vladmandic/human into main
- create codeql-analysis.yml
- create security.md
- add templates
### **0.20.1** 2021/02/08 mandic00@live.com
- 0.20.1
- menu fixes
- convert to typescript
### **0.11.5** 2021/02/06 mandic00@live.com
- 0.11.5
- added faceboxes alternative model
### **0.11.4** 2021/02/06 mandic00@live.com
### **0.11.3** 2021/02/02 mandic00@live.com
### **0.11.2** 2021/01/30 mandic00@live.com
- 0.11.4
- 0.11.3
- 0.11.2
- added warmup for nodejs
### **update for tfjs 3.0.0** 2021/01/29 mandic00@live.com
### **0.11.1** 2021/01/29 mandic00@live.com
### **0.10.2** 2021/01/22 mandic00@live.com
### **0.10.1** 2021/01/20 mandic00@live.com
- 0.11.1
- 0.10.2
- 0.10.1
### **0.9.26** 2021/01/18 mandic00@live.com
- fix face detection when mesh is disabled
- added minification notes
- version bump
### **0.9.25** 2021/01/13 mandic00@live.com
@ -351,7 +913,6 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
- conditional hand rotation
- staggered skipframes
- fix permissions
### **0.9.13** 2020/12/08 mandic00@live.com
@ -403,9 +964,7 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
### **0.9.3** 2020/11/16 mandic00@live.com
- switched to minified build
### **release: 1.2** 2020/11/15 mandic00@live.com
- web worker fixes
- full rebuild
### **0.9.2** 2020/11/14 mandic00@live.com
@ -462,7 +1021,6 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
- optimized model loader
- merge branch 'main' of https://github.com/vladmandic/human into main
- created wiki
- delete bug_report.md
- optimize font resizing
- fix nms sync call
@ -486,7 +1044,6 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
- optimized camera and mobile layout
- fixed worker and filter compatibility
- removed test code
### **0.7.2** 2020/11/04 mandic00@live.com
@ -563,7 +1120,6 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
### **0.4.8** 2020/10/28 mandic00@live.com
- revert "updated menu handler"
- fix webpack compatibility issue
### **0.4.7** 2020/10/27 mandic00@live.com
@ -651,7 +1207,6 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
### **0.2.8** 2020/10/13 mandic00@live.com
- added example image
### **0.2.7** 2020/10/13 mandic00@live.com
@ -667,7 +1222,6 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
### **0.2.4** 2020/10/12 mandic00@live.com
- removed extra files
### **0.2.3** 2020/10/12 mandic00@live.com
@ -675,9 +1229,6 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
### **0.2.2** 2020/10/12 mandic00@live.com
### **release: 1.0** 2020/10/12 mandic00@live.com
### **0.2.1** 2020/10/12 mandic00@live.com
- added sample image

View File

@ -22,3 +22,12 @@ Any of the following behavior is unacceptable:
If you believe someone is violating the code of conduct, we ask that you report it
Participants asked to stop any harassing behavior are expected to comply immediately
<br>
## Usage Restrictions
`Human` library does not alow for usage in following scenarios:
- Any life-critical decisions
- Any form of surveillance without consent of the user is explicitly out of scope

View File

@ -14,7 +14,7 @@ Procedure for contributing:
`npm run lint`
- Test your changes in Browser and NodeJS
`npm run dev` and naviate to https://localhost:10031
`node demo/node.js`
`node test/test-node.js`
- Push changes to your fork
Exclude files in `/dist', '/types', '/typedoc' from the commit as they are dynamically generated during build
- Submit a PR (pull request)

396
README.md
View File

@ -1,72 +1,144 @@
[![](https://img.shields.io/static/v1?label=Sponsor&message=%E2%9D%A4&logo=GitHub&color=%23fe8e86)](https://github.com/sponsors/vladmandic)
![Git Version](https://img.shields.io/github/package-json/v/vladmandic/human?style=flat-square&svg=true&label=git)
![NPM Version](https://img.shields.io/npm/v/@vladmandic/human.png?style=flat-square)
![Last Commit](https://img.shields.io/github/last-commit/vladmandic/human?style=flat-square&svg=true)
![License](https://img.shields.io/github/license/vladmandic/human?style=flat-square&svg=true)
![GitHub Status Checks](https://img.shields.io/github/checks-status/vladmandic/human/main?style=flat-square&svg=true)
![Vulnerabilities](https://img.shields.io/snyk/vulnerabilities/github/vladmandic/human?style=flat-square&svg=true)
# Human Library
**3D Face Detection & Rotation Tracking, Face Description & Recognition,**
**Body Pose Tracking, 3D Hand & Finger Tracking,**
**Iris Analysis, Age & Gender & Emotion Prediction,**
**Gesture Recognition**
**AI-powered 3D Face Detection & Rotation Tracking, Face Description & Recognition,**
**Body Pose Tracking, 3D Hand & Finger Tracking, Iris Analysis,**
**Age & Gender & Emotion Prediction, Gaze Tracking, Gesture Recognition, Body Segmentation**
<br>
JavaScript module using TensorFlow/JS Machine Learning library
## Highlights
- **Browser**:
Compatible with both desktop and mobile platforms
Compatible with *CPU*, *WebGL*, *WASM* backends
Compatible with *WebWorker* execution
- **NodeJS**:
Compatible with both software *tfjs-node* and
GPU accelerated backends *tfjs-node-gpu* using CUDA libraries
- Compatible with most server-side and client-side environments and frameworks
- Combines multiple machine learning models which can be switched on-demand depending on the use-case
- Related models are executed in an attention pipeline to provide details when needed
- Optimized input pre-processing that can enhance image quality of any type of inputs
- Detection of frame changes to trigger only required models for improved performance
- Intelligent temporal interpolation to provide smooth results regardless of processing performance
- Simple unified API
- Built-in Image, Video and WebCam handling
Check out [**Live Demo**](https://vladmandic.github.io/human/demo/index.html) for processing of live WebCam video or static images
[*Jump to Quick Start*](#quick-start)
<br>
## Compatibility
**Browser**:
- Compatible with both desktop and mobile platforms
- Compatible with *WebGPU*, *WebGL*, *WASM*, *CPU* backends
- Compatible with *WebWorker* execution
- Compatible with *WebView*
- Primary platform: *Chromium*-based browsers
- Secondary platform: *Firefox*, *Safari*
**NodeJS**:
- Compatibile with *WASM* backend for executions on architectures where *tensorflow* binaries are not available
- Compatible with *tfjs-node* using software execution via *tensorflow* shared libraries
- Compatible with *tfjs-node* using GPU-accelerated execution via *tensorflow* shared libraries and nVidia CUDA
- Supported versions are from **14.x** to **22.x**
- NodeJS version **23.x** is not supported due to breaking changes and issues with `@tensorflow/tfjs`
<br>
## Releases
- [Release Notes](https://github.com/vladmandic/human/releases)
- [NPM Link](https://www.npmjs.com/package/@vladmandic/human)
## Demos
- [**Demo Application**](https://vladmandic.github.io/human/demo/index.html)
- [**Face Extraction, Description, Identification and Matching**](https://vladmandic.github.io/human/demo/facematch.html)
- [**Face Extraction and 3D Rendering**](https://vladmandic.github.io/human/demo/face3d.html)
*Check out [**Simple Live Demo**](https://vladmandic.github.io/human/demo/typescript/index.html) fully annotated app as a good start starting point ([html](https://github.com/vladmandic/human/blob/main/demo/typescript/index.html))([code](https://github.com/vladmandic/human/blob/main/demo/typescript/index.ts))*
*Check out [**Main Live Demo**](https://vladmandic.github.io/human/demo/index.html) app for advanced processing of of webcam, video stream or images static images with all possible tunable options*
- To start video detection, simply press *Play*
- To process images, simply drag & drop in your Browser window
- Note: For optimal performance, select only models you'd like to use
- Note: If you have modern GPU, *WebGL* (default) backend is preferred, otherwise select *WASM* backend
<br>
- [**List of all Demo applications**](https://github.com/vladmandic/human/wiki/Demos)
- [**Live Examples galery**](https://vladmandic.github.io/human/samples/index.html)
### Browser Demos
*All browser demos are self-contained without any external dependencies*
- **Full** [[*Live*]](https://vladmandic.github.io/human/demo/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo): Main browser demo app that showcases all Human capabilities
- **Simple** [[*Live*]](https://vladmandic.github.io/human/demo/typescript/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/typescript): Simple demo in WebCam processing demo in TypeScript
- **Embedded** [[*Live*]](https://vladmandic.github.io/human/demo/video/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/video/index.html): Even simpler demo with tiny code embedded in HTML file
- **Face Detect** [[*Live*]](https://vladmandic.github.io/human/demo/facedetect/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facedetect): Extract faces from images and processes details
- **Face Match** [[*Live*]](https://vladmandic.github.io/human/demo/facematch/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facematch): Extract faces from images, calculates face descriptors and similarities and matches them to known database
- **Face ID** [[*Live*]](https://vladmandic.github.io/human/demo/faceid/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/faceid): Runs multiple checks to validate webcam input before performing face match to faces in IndexDB
- **Multi-thread** [[*Live*]](https://vladmandic.github.io/human/demo/multithread/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/multithread): Runs each Human module in a separate web worker for highest possible performance
- **NextJS** [[*Live*]](https://vladmandic.github.io/human-next/out/index.html) [[*Details*]](https://github.com/vladmandic/human-next): Use Human with TypeScript, NextJS and ReactJS
- **ElectronJS** [[*Details*]](https://github.com/vladmandic/human-electron): Use Human with TypeScript and ElectonJS to create standalone cross-platform apps
- **3D Analysis with BabylonJS** [[*Live*]](https://vladmandic.github.io/human-motion/src/index.html) [[*Details*]](https://github.com/vladmandic/human-motion): 3D tracking and visualization of heead, face, eye, body and hand
- **VRM Virtual Model Tracking with Three.JS** [[*Live*]](https://vladmandic.github.io/human-three-vrm/src/human-vrm.html) [[*Details*]](https://github.com/vladmandic/human-three-vrm): VR model with head, face, eye, body and hand tracking
- **VRM Virtual Model Tracking with BabylonJS** [[*Live*]](https://vladmandic.github.io/human-bjs-vrm/src/index.html) [[*Details*]](https://github.com/vladmandic/human-bjs-vrm): VR model with head, face, eye, body and hand tracking
### NodeJS Demos
*NodeJS demos may require extra dependencies which are used to decode inputs*
*See header of each demo to see its dependencies as they are not automatically installed with `Human`*
- **Main** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node.js): Process images from files, folders or URLs using native methods
- **Canvas** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-canvas.js): Process image from file or URL and draw results to a new image file using `node-canvas`
- **Video** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-video.js): Processing of video input using `ffmpeg`
- **WebCam** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-webcam.js): Processing of webcam screenshots using `fswebcam`
- **Events** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-event.js): Showcases usage of `Human` eventing to get notifications on processing
- **Similarity** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-similarity.js): Compares two input images for similarity of detected faces
- **Face Match** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facematch/node-match.js): Parallel processing of face **match** in multiple child worker threads
- **Multiple Workers** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/multithread/node-multiprocess.js): Runs multiple parallel `human` by dispaching them to pool of pre-created worker processes
- **Dynamic Load** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Loads Human dynamically with multiple different desired backends
## Project pages
- [**Code Repository**](https://github.com/vladmandic/human)
- [**NPM Package**](https://www.npmjs.com/package/@vladmandic/human)
- [**Issues Tracker**](https://github.com/vladmandic/human/issues)
- [**API Specification**](https://vladmandic.github.io/human/typedoc/classes/human.html)
- [**TypeDoc API Specification - Main class**](https://vladmandic.github.io/human/typedoc/classes/Human.html)
- [**TypeDoc API Specification - Full**](https://vladmandic.github.io/human/typedoc/)
- [**Change Log**](https://github.com/vladmandic/human/blob/main/CHANGELOG.md)
<br>
- [**Current To-do List**](https://github.com/vladmandic/human/blob/main/TODO.md)
## Wiki pages
- [**Home**](https://github.com/vladmandic/human/wiki)
- [**Demos**](https://github.com/vladmandic/human/wiki/Demos)
- [**Installation**](https://github.com/vladmandic/human/wiki/Install)
- [**Usage & Functions**](https://github.com/vladmandic/human/wiki/Usage)
- [**Configuration Details**](https://github.com/vladmandic/human/wiki/Configuration)
- [**Output Details**](https://github.com/vladmandic/human/wiki/Outputs)
- [**Configuration Details**](https://github.com/vladmandic/human/wiki/Config)
- [**Result Details**](https://github.com/vladmandic/human/wiki/Result)
- [**Customizing Draw Methods**](https://github.com/vladmandic/human/wiki/Draw)
- [**Caching & Smoothing**](https://github.com/vladmandic/human/wiki/Caching)
- [**Input Processing**](https://github.com/vladmandic/human/wiki/Image)
- [**Face Recognition & Face Description**](https://github.com/vladmandic/human/wiki/Embedding)
- [**Gesture Recognition**](https://github.com/vladmandic/human/wiki/Gesture)
- [**Common Issues**](https://github.com/vladmandic/human/wiki/Issues)
<br>
- [**Background and Benchmarks**](https://github.com/vladmandic/human/wiki/Background)
## Additional notes
- [**Notes on Backends**](https://github.com/vladmandic/human/wiki/Backends)
- [**Comparing Backends**](https://github.com/vladmandic/human/wiki/Backends)
- [**Development Server**](https://github.com/vladmandic/human/wiki/Development-Server)
- [**Build Process**](https://github.com/vladmandic/human/wiki/Build-Process)
- [**Adding Custom Modules**](https://github.com/vladmandic/human/wiki/Module)
- [**Performance Notes**](https://github.com/vladmandic/human/wiki/Performance)
- [**Performance Profiling**](https://github.com/vladmandic/human/wiki/Profiling)
- [**Platform Support**](https://github.com/vladmandic/human/wiki/Platforms)
- [**Diagnostic and Performance trace information**](https://github.com/vladmandic/human/wiki/Diag)
- [**Dockerize Human applications**](https://github.com/vladmandic/human/wiki/Docker)
- [**List of Models & Credits**](https://github.com/vladmandic/human/wiki/Models)
- [**Models Download Repository**](https://github.com/vladmandic/human-models)
- [**Security & Privacy Policy**](https://github.com/vladmandic/human/blob/main/SECURITY.md)
- [**License & Usage Restrictions**](https://github.com/vladmandic/human/blob/main/LICENSE)
<br>
@ -74,78 +146,111 @@ Check out [**Live Demo**](https://vladmandic.github.io/human/demo/index.html) fo
*Suggestions are welcome!*
<br><hr><br>
<hr><br>
## App Examples
Visit [Examples gallery](https://vladmandic.github.io/human/samples/index.html) for more examples
[<img src="assets/samples.jpg" width="640"/>](assets/samples.jpg)
<br>
## Options
As presented in the demo application...
![Options visible in demo](assets/screenshot-menu.png)
All options as presented in the demo application...
[demo/index.html](demo/index.html)
[<img src="assets/screenshot-menu.png"/>](assets/screenshot-menu.png)
<br>
## Examples
**Results Browser:**
[ *Demo -> Display -> Show Results* ]<br>
[<img src="assets/screenshot-results.png"/>](assets/screenshot-results.png)
<br>
**Training image:**
## Advanced Examples
![Example Training Image](assets/screenshot-sample.png)
1. **Face Similarity Matching:**
Extracts all faces from provided input images,
sorts them by similarity to selected face
and optionally matches detected face with database of known people to guess their names
> [demo/facematch](demo/facematch/index.html)
**Using static images:**
[<img src="assets/screenshot-facematch.jpg" width="640"/>](assets/screenshot-facematch.jpg)
![Example Using Image](assets/screenshot-images.jpg)
2. **Face Detect:**
Extracts all detect faces from loaded images on-demand and highlights face details on a selected face
> [demo/facedetect](demo/facedetect/index.html)
**Live WebCam view:**
[<img src="assets/screenshot-facedetect.jpg" width="640"/>](assets/screenshot-facedetect.jpg)
![Example Using WebCam](assets/screenshot-webcam.jpg)
3. **Face ID:**
Performs validation check on a webcam input to detect a real face and matches it to known faces stored in database
> [demo/faceid](demo/faceid/index.html)
**Face Similarity Matching:**
[<img src="assets/screenshot-faceid.jpg" width="640"/>](assets/screenshot-faceid.jpg)
![Face Matching](assets/screenshot-facematch.jpg)
<br>
**Face3D OpenGL Rendering:**
4. **3D Rendering:**
> [human-motion](https://github.com/vladmandic/human-motion)
![Face Matching](assets/screenshot-face3d.jpg)
[<img src="https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-face.jpg" width="640"/>](https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-face.jpg)
[<img src="https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-body.jpg" width="640"/>](https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-body.jpg)
[<img src="https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-hand.jpg" width="640"/>](https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-hand.jpg)
<br>
5. **VR Model Tracking:**
> [human-three-vrm](https://github.com/vladmandic/human-three-vrm)
> [human-bjs-vrm](https://github.com/vladmandic/human-bjs-vrm)
[<img src="https://github.com/vladmandic/human-three-vrm/raw/main/assets/human-vrm-screenshot.jpg" width="640"/>](https://github.com/vladmandic/human-three-vrm/raw/main/assets/human-vrm-screenshot.jpg)
6. **Human as OS native application:**
> [human-electron](https://github.com/vladmandic/human-electron)
<br>
**468-Point Face Mesh Defails:**
(view in full resolution to see keypoints)
![FaceMesh](assets/facemesh.png)
[<img src="assets/facemesh.png" width="400"/>](assets/facemesh.png)
<br><hr><br>
## Quick Start
Simply load `Human` directly from a CDN in your HTML file:
Simply load `Human` (*IIFE version*) directly from a cloud CDN in your HTML file:
(pick one: `jsdelirv`, `unpkg` or `cdnjs`)
```html
<!DOCTYPE HTML>
<script src="https://cdn.jsdelivr.net/npm/@vladmandic/human/dist/human.js"></script>
```
or
```html
<script src="https://unpkg.dev/@vladmandic/human/dist/human.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/human/3.0.0/human.js"></script>
```
For details, including how to use `Browser ESM` version or `NodeJS` version of `Human`, see [**Installation**](https://github.com/vladmandic/human/wiki/Install)
<br>
## Example
## Code Examples
Example simple app that uses Human to process video input and
Simple app that uses Human to process video input and
draw output on screen using internal draw helper functions
```js
// create instance of human with simple configuration using default values
const config = { backend: 'webgl' };
const human = new Human(config);
const human = new Human.Human(config);
// select input HTMLVideoElement and output HTMLCanvasElement from page
const inputVideo = document.getElementById('video-id');
const outputCanvas = document.getElementById('canvas-id');
function detectVideo() {
// select input HTMLVideoElement and output HTMLCanvasElement from page
const inputVideo = document.getElementById('video-id');
const outputCanvas = document.getElementById('canvas-id');
// perform processing using default configuration
human.detect(inputVideo).then((result) => {
// result object will contain detected details
@ -157,45 +262,204 @@ function detectVideo() {
human.draw.body(outputCanvas, result.body);
human.draw.hand(outputCanvas, result.hand);
human.draw.gesture(outputCanvas, result.gesture);
// and loop immediate to next frame
// and loop immediate to the next frame
requestAnimationFrame(detectVideo);
return result;
});
}
detectVideo();
```
or using `async/await`:
```js
// create instance of human with simple configuration using default values
const config = { backend: 'webgl' };
const human = new Human(config); // create instance of Human
const inputVideo = document.getElementById('video-id');
const outputCanvas = document.getElementById('canvas-id');
async function detectVideo() {
const result = await human.detect(inputVideo); // run detection
human.draw.all(outputCanvas, result); // draw all results
requestAnimationFrame(detectVideo); // run loop
}
detectVideo(); // start loop
```
or using `Events`:
```js
// create instance of human with simple configuration using default values
const config = { backend: 'webgl' };
const human = new Human(config); // create instance of Human
const inputVideo = document.getElementById('video-id');
const outputCanvas = document.getElementById('canvas-id');
human.events.addEventListener('detect', () => { // event gets triggered when detect is complete
human.draw.all(outputCanvas, human.result); // draw all results
});
function detectVideo() {
human.detect(inputVideo) // run detection
.then(() => requestAnimationFrame(detectVideo)); // upon detect complete start processing of the next frame
}
detectVideo(); // start loop
```
or using interpolated results for smooth video processing by separating detection and drawing loops:
```js
const human = new Human(); // create instance of Human
const inputVideo = document.getElementById('video-id');
const outputCanvas = document.getElementById('canvas-id');
let result;
async function detectVideo() {
result = await human.detect(inputVideo); // run detection
requestAnimationFrame(detectVideo); // run detect loop
}
async function drawVideo() {
if (result) { // check if result is available
const interpolated = human.next(result); // get smoothened result using last-known results
human.draw.all(outputCanvas, interpolated); // draw the frame
}
requestAnimationFrame(drawVideo); // run draw loop
}
detectVideo(); // start detection loop
drawVideo(); // start draw loop
```
or same, but using built-in full video processing instead of running manual frame-by-frame loop:
```js
const human = new Human(); // create instance of Human
const inputVideo = document.getElementById('video-id');
const outputCanvas = document.getElementById('canvas-id');
async function drawResults() {
const interpolated = human.next(); // get smoothened result using last-known results
human.draw.all(outputCanvas, interpolated); // draw the frame
requestAnimationFrame(drawResults); // run draw loop
}
human.video(inputVideo); // start detection loop which continously updates results
drawResults(); // start draw loop
```
or using built-in webcam helper methods that take care of video handling completely:
```js
const human = new Human(); // create instance of Human
const outputCanvas = document.getElementById('canvas-id');
async function drawResults() {
const interpolated = human.next(); // get smoothened result using last-known results
human.draw.canvas(outputCanvas, human.webcam.element); // draw current webcam frame
human.draw.all(outputCanvas, interpolated); // draw the frame detectgion results
requestAnimationFrame(drawResults); // run draw loop
}
await human.webcam.start({ crop: true });
human.video(human.webcam.element); // start detection loop which continously updates results
drawResults(); // start draw loop
```
And for even better results, you can run detection in a separate web worker thread
<br><hr><br>
## Inputs
`Human` library can process all known input types:
- `Image`, `ImageData`, `ImageBitmap`, `Canvas`, `OffscreenCanvas`, `Tensor`,
- `HTMLImageElement`, `HTMLCanvasElement`, `HTMLVideoElement`, `HTMLMediaElement`
Additionally, `HTMLVideoElement`, `HTMLMediaElement` can be a standard `<video>` tag that links to:
- WebCam on user's system
- Any supported video type
e.g. `.mp4`, `.avi`, etc.
- Additional video types supported via *HTML5 Media Source Extensions*
e.g.: **HLS** (*HTTP Live Streaming*) using `hls.js` or **DASH** (*Dynamic Adaptive Streaming over HTTP*) using `dash.js`
- **WebRTC** media track using built-in support
<br><hr><br>
## Detailed Usage
- [**Wiki Home**](https://github.com/vladmandic/human/wiki)
- [**List of all available methods, properies and namespaces**](https://github.com/vladmandic/human/wiki/Usage)
- [**TypeDoc API Specification - Main class**](https://vladmandic.github.io/human/typedoc/classes/Human.html)
- [**TypeDoc API Specification - Full**](https://vladmandic.github.io/human/typedoc/)
![typedoc](assets/screenshot-typedoc.png)
<br><hr><br>
## TypeDefs
`Human` is written using TypeScript strong typing and ships with full **TypeDefs** for all classes defined by the library bundled in `types/human.d.ts` and enabled by default
*Note*: This does not include embedded `tfjs`
If you want to use embedded `tfjs` inside `Human` (`human.tf` namespace) and still full **typedefs**, add this code:
> import type * as tfjs from '@vladmandic/human/dist/tfjs.esm';
> const tf = human.tf as typeof tfjs;
This is not enabled by default as `Human` does not ship with full **TFJS TypeDefs** due to size considerations
Enabling `tfjs` TypeDefs as above creates additional project (dev-only as only types are required) dependencies as defined in `@vladmandic/human/dist/tfjs.esm.d.ts`:
> @tensorflow/tfjs-core, @tensorflow/tfjs-converter, @tensorflow/tfjs-backend-wasm, @tensorflow/tfjs-backend-webgl
<br><hr><br>
## Default models
Default models in Human library are:
- **Face Detection**: MediaPipe BlazeFace-Back
- **Face Mesh**: MediaPipe FaceMesh
- **Face Description**: HSE FaceRes
- **Face Iris Analysis**: MediaPipe Iris
- **Emotion Detection**: Oarriaga Emotion
- **Body Analysis**: PoseNet
- **Face Detection**: *MediaPipe BlazeFace Back variation*
- **Face Mesh**: *MediaPipe FaceMesh*
- **Face Iris Analysis**: *MediaPipe Iris*
- **Face Description**: *HSE FaceRes*
- **Emotion Detection**: *Oarriaga Emotion*
- **Body Analysis**: *MoveNet Lightning variation*
- **Hand Analysis**: *HandTrack & MediaPipe HandLandmarks*
- **Body Segmentation**: *Google Selfie*
- **Object Detection**: *CenterNet with MobileNet v3*
Note that alternative models are provided and can be enabled via configuration
For example, `PoseNet` model can be switched for `BlazePose` model depending on the use case
For example, body pose detection by default uses *MoveNet Lightning*, but can be switched to *MultiNet Thunder* for higher precision or *Multinet MultiPose* for multi-person detection or even *PoseNet*, *BlazePose* or *EfficientPose* depending on the use case
For more info, see [**Configuration Details**](https://github.com/vladmandic/human/wiki/Configuration) and [**List of Models**](https://github.com/vladmandic/human/wiki/Models)
<br><hr><br>
`Human` library is written in `TypeScript` [4.3](https://www.typescriptlang.org/docs/handbook/intro.html)
Conforming to `JavaScript` [ECMAScript version 2020](https://www.ecma-international.org/ecma-262/11.0/index.html) standard
Build target is `JavaScript` **EMCAScript version 2018**
## Diagnostics
- [How to get diagnostic information or performance trace information](https://github.com/vladmandic/human/wiki/Diag)
<br><hr><br>
`Human` library is written in [TypeScript](https://www.typescriptlang.org/docs/handbook/intro.html) **5.1** using [TensorFlow/JS](https://www.tensorflow.org/js/) **4.10** and conforming to latest `JavaScript` [ECMAScript version 2022](https://262.ecma-international.org/) standard
Build target for distributables is `JavaScript` [EMCAScript version 2018](https://262.ecma-international.org/9.0/)
<br>
For details see [**Wiki Pages**](https://github.com/vladmandic/human/wiki)
and [**API Specification**](https://vladmandic.github.io/human/typedoc/classes/human.html)
and [**API Specification**](https://vladmandic.github.io/human/typedoc/classes/Human.html)
<br>
[![](https://img.shields.io/static/v1?label=Sponsor&message=%E2%9D%A4&logo=GitHub&color=%23fe8e86)](https://github.com/sponsors/vladmandic)
![Stars](https://img.shields.io/github/stars/vladmandic/human?style=flat-square&svg=true)
![Forks](https://badgen.net/github/forks/vladmandic/human)
![Code Size](https://img.shields.io/github/languages/code-size/vladmandic/human?style=flat-square&svg=true)

View File

@ -1,5 +1,32 @@
# Security Policy
# Security & Privacy Policy
All issues are tracked publicly on GitHub
<br>
Entire code base and indluded dependencies is automatically scanned against known security vulnerabilities
## Issues
All issues are tracked publicly on GitHub: <https://github.com/vladmandic/human/issues>
<br>
## Vulnerabilities
`Human` library code base and indluded dependencies are automatically scanned against known security vulnerabilities
Any code commit is validated before merge
- [Dependencies](https://github.com/vladmandic/human/security/dependabot)
- [Scanning Alerts](https://github.com/vladmandic/human/security/code-scanning)
<br>
## Privacy
`Human` library and included demo apps:
- Are fully self-contained and does not send or share data of any kind with external targets
- Do not store any user or system data tracking, user provided inputs (images, video) or detection results
- Do not utilize any analytic services (such as Google Analytics)
`Human` library can establish external connections *only* for following purposes and *only* when explicitly configured by user:
- Load models from externally hosted site (e.g. CDN)
- Load inputs for detection from *http & https* sources

44
TODO.md
View File

@ -1,20 +1,38 @@
# To-Do list for Human library
## Big Ticket Items
## Work-in-Progress
- Strong(er) typing
- Automated testing framework
- TypeDoc comments
<hr><br>
## Explore Models
## Known Issues & Limitations
- EfficientPose
<https://github.com/daniegr/EfficientPose>
<https://github.com/PINTO0309/PINTO_model_zoo/tree/main/084_EfficientPose>
- InsightFace
RetinaFace detetor and ArcFace recognition
<https://github.com/deepinsight/insightface>
### Face with Attention
## Issues
`FaceMesh-Attention` is not supported when using `WASM` backend due to missing kernel op in **TFJS**
No issues with default model `FaceMesh`
- box sizing on mobile
### Object Detection
`NanoDet` model is not supported when using `WASM` backend due to missing kernel op in **TFJS**
No issues with default model `MB3-CenterNet`
## Body Detection using MoveNet-MultiPose
Model does not return valid detection scores (all other functionality is not impacted)
### Firefox
Running in **web workers** requires `OffscreenCanvas` which is still disabled by default in **Firefox**
Enable via `about:config` -> `gfx.offscreencanvas.enabled`
[Details](https://developer.mozilla.org/en-US/docs/Web/API/OffscreenCanvas#browser_compatibility)
### Safari
No support for running in **web workers** as Safari still does not support `OffscreenCanvas`
[Details](https://developer.mozilla.org/en-US/docs/Web/API/OffscreenCanvas#browser_compatibility)
## React-Native
`Human` support for **React-Native** is best-effort, but not part of the main development focus
<hr><br>

Binary file not shown.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.1 MiB

After

Width:  |  Height:  |  Size: 595 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 152 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 141 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 178 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 216 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 206 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 162 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 295 KiB

BIN
assets/samples.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 261 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 47 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 366 KiB

After

Width:  |  Height:  |  Size: 321 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 434 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 58 KiB

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 113 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

BIN
assets/screenshot-vrm.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 177 KiB

Binary file not shown.

Binary file not shown.

153
build.js Normal file
View File

@ -0,0 +1,153 @@
const fs = require('fs');
const path = require('path');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
const Build = require('@vladmandic/build').Build; // eslint-disable-line node/no-unpublished-require
const APIExtractor = require('@microsoft/api-extractor'); // eslint-disable-line node/no-unpublished-require
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
const packageJSON = require('./package.json');
const logFile = 'test/build.log';
const modelsOut = 'models/models.json';
const modelsFolders = [
'./models',
'../human-models/models',
'../blazepose/model/',
'../anti-spoofing/model',
'../efficientpose/models',
'../insightface/models',
'../movenet/models',
'../nanodet/models',
];
const apiExtractorIgnoreList = [ // eslint-disable-line no-unused-vars
'ae-missing-release-tag',
'tsdoc-param-tag-missing-hyphen',
'tsdoc-escape-right-brace',
'tsdoc-undefined-tag',
'tsdoc-escape-greater-than',
'ae-unresolved-link',
'ae-forgotten-export',
'tsdoc-malformed-inline-tag',
'tsdoc-unnecessary-backslash',
];
const regEx = [
{ search: 'types="@webgpu/types/dist"', replace: 'path="../src/types/webgpu.d.ts"' },
{ search: 'types="offscreencanvas"', replace: 'path="../src/types/offscreencanvas.d.ts"' },
];
function copyFile(src, dst) {
if (!fs.existsSync(src)) {
log.warn('Copy:', { input: src, output: dst });
return;
}
log.state('Copy:', { input: src, output: dst });
const buffer = fs.readFileSync(src);
fs.writeFileSync(dst, buffer);
}
function writeFile(str, dst) {
log.state('Write:', { output: dst });
fs.writeFileSync(dst, str);
}
function regExFile(src, entries) {
if (!fs.existsSync(src)) {
log.warn('Filter:', { src });
return;
}
log.state('Filter:', { input: src });
for (const entry of entries) {
const buffer = fs.readFileSync(src, 'UTF-8');
const lines = buffer.split(/\r?\n/);
const out = [];
for (const line of lines) {
if (line.includes(entry.search)) out.push(line.replace(entry.search, entry.replace));
else out.push(line);
}
fs.writeFileSync(src, out.join('\n'));
}
}
async function analyzeModels() {
log.info('Analyze models:', { folders: modelsFolders.length, result: modelsOut });
let totalSize = 0;
const models = {};
const allModels = [];
for (const folder of modelsFolders) {
try {
if (!fs.existsSync(folder)) continue;
const stat = fs.statSync(folder);
if (!stat.isDirectory) continue;
const dir = fs.readdirSync(folder);
const found = dir.map((f) => `file://${folder}/${f}`).filter((f) => f.endsWith('json'));
log.state('Models', { folder, models: found.length });
allModels.push(...found);
} catch {
// log.warn('Cannot enumerate:', modelFolder);
}
}
for (const url of allModels) {
// if (!f.endsWith('.json')) continue;
// const url = `file://${modelsDir}/${f}`;
const model = new tf.GraphModel(url); // create model prototype and decide if load from cache or from original modelurl
model.findIOHandler();
const artifacts = await model.handler.load();
const size = artifacts?.weightData?.byteLength || 0;
totalSize += size;
const name = path.basename(url).replace('.json', '');
if (!models[name]) models[name] = size;
}
const json = JSON.stringify(models, null, 2);
fs.writeFileSync(modelsOut, json);
log.state('Models:', { count: Object.keys(models).length, totalSize });
}
async function main() {
log.logFile(logFile);
log.data('Build', { name: packageJSON.name, version: packageJSON.version });
// run production build
const build = new Build();
await build.run('production');
// patch tfjs typedefs
copyFile('node_modules/@vladmandic/tfjs/types/tfjs-core.d.ts', 'types/tfjs-core.d.ts');
copyFile('node_modules/@vladmandic/tfjs/types/tfjs.d.ts', 'types/tfjs.esm.d.ts');
copyFile('src/types/tsconfig.json', 'types/tsconfig.json');
copyFile('src/types/eslint.json', 'types/.eslintrc.json');
copyFile('src/types/tfjs.esm.d.ts', 'dist/tfjs.esm.d.ts');
regExFile('types/tfjs-core.d.ts', regEx);
// run api-extractor to create typedef rollup
const extractorConfig = APIExtractor.ExtractorConfig.loadFileAndPrepare('.api-extractor.json');
try {
const extractorResult = APIExtractor.Extractor.invoke(extractorConfig, {
localBuild: true,
showVerboseMessages: false,
messageCallback: (msg) => {
msg.handled = true;
if (msg.logLevel === 'none' || msg.logLevel === 'verbose' || msg.logLevel === 'info') return;
if (msg.sourceFilePath?.includes('/node_modules/')) return;
// if (apiExtractorIgnoreList.reduce((prev, curr) => prev || msg.messageId.includes(curr), false)) return; // those are external issues outside of human control
log.data('API', { level: msg.logLevel, category: msg.category, id: msg.messageId, file: msg.sourceFilePath, line: msg.sourceFileLine, text: msg.text });
},
});
log.state('API-Extractor:', { succeeeded: extractorResult.succeeded, errors: extractorResult.errorCount, warnings: extractorResult.warningCount });
} catch (err) {
log.error('API-Extractor:', err);
}
regExFile('types/human.d.ts', regEx);
writeFile('export * from \'../types/human\';', 'dist/human.esm-nobundle.d.ts');
writeFile('export * from \'../types/human\';', 'dist/human.esm.d.ts');
writeFile('export * from \'../types/human\';', 'dist/human.d.ts');
writeFile('export * from \'../types/human\';', 'dist/human.node-gpu.d.ts');
writeFile('export * from \'../types/human\';', 'dist/human.node.d.ts');
writeFile('export * from \'../types/human\';', 'dist/human.node-wasm.d.ts');
// generate model signature
await analyzeModels();
log.info('Human Build complete...', { logFile });
}
main();

View File

@ -1,5 +1,67 @@
# Human Library: Demos
For details see Wiki:
For details on other demos see Wiki: [**Demos**](https://github.com/vladmandic/human/wiki/Demos)
- [**Demos**](https://github.com/vladmandic/human/wiki/Demos)
## Main Demo
`index.html`: Full demo using `Human` ESM module running in Browsers,
Includes:
- Selectable inputs:
- Sample images
- Image via drag & drop
- Image via URL param
- WebCam input
- Video stream
- WebRTC stream
- Selectable active `Human` modules
- With interactive module params
- Interactive `Human` image filters
- Selectable interactive `results` browser
- Selectable `backend`
- Multiple execution methods:
- Sync vs Async
- in main thread or web worker
- live on git pages, on user-hosted web server or via included [**micro http2 server**](https://github.com/vladmandic/human/wiki/Development-Server)
### Demo Options
- General `Human` library options
in `index.js:userConfig`
- General `Human` `draw` options
in `index.js:drawOptions`
- Demo PWA options
in `index.js:pwa`
- Demo specific options
in `index.js:ui`
```js
const ui = {
console: true, // log messages to browser console
useWorker: true, // use web workers for processing
buffered: true, // should output be buffered between frames
interpolated: true, // should output be interpolated for smoothness between frames
results: false, // show results tree
useWebRTC: false, // use webrtc as camera source instead of local webcam
};
```
Demo implements several ways to use `Human` library,
### URL Params
Demo app can use URL parameters to override configuration values
For example:
- Force using `WASM` as backend: <https://vladmandic.github.io/human/demo/index.html?backend=wasm>
- Enable `WebWorkers`: <https://vladmandic.github.io/human/demo/index.html?worker=true>
- Skip pre-loading and warming up: <https://vladmandic.github.io/human/demo/index.html?preload=false&warmup=false>
### WebRTC
Note that WebRTC connection requires a WebRTC server that provides a compatible media track such as H.264 video track
For such a WebRTC server implementation see <https://github.com/vladmandic/stream-rtsp> project
that implements a connection to IP Security camera using RTSP protocol and transcodes it to WebRTC
ready to be consumed by a client such as `Human`

View File

@ -1,173 +0,0 @@
import { DoubleSide, Mesh, MeshBasicMaterial, OrthographicCamera, Scene, sRGBEncoding, VideoTexture, WebGLRenderer, BufferGeometry, BufferAttribute } from './helpers/three.js';
import { OrbitControls } from './helpers/three-orbitControls.js';
import Human from '../dist/human.esm.js'; // equivalent of @vladmandic/human
const userConfig = {
backend: 'wasm',
async: false,
profile: false,
warmup: 'full',
videoOptimized: false,
filter: { enabled: false },
face: { enabled: true,
detector: { rotation: false, maxFaces: 1 },
mesh: { enabled: true },
iris: { enabled: true },
description: { enabled: false },
emotion: { enabled: false },
},
hand: { enabled: false },
gesture: { enabled: false },
body: { enabled: false },
object: { enabled: false },
};
const human = new Human(userConfig);
const wireframe = true; // enable wireframe overlay
const canvas = document.getElementById('canvas');
let width = 0;
let height = 0;
const renderer = new WebGLRenderer({ antialias: true, alpha: true, canvas });
renderer.setClearColor(0x000000);
renderer.outputEncoding = sRGBEncoding;
const camera = new OrthographicCamera();
const controls = new OrbitControls(camera, renderer.domElement); // pan&zoom controls
controls.enabled = true;
const materialWireFrame = new MeshBasicMaterial({ // create wireframe material
color: 0xffaaaa,
wireframe: true,
});
const materialFace = new MeshBasicMaterial({ // create material for mask
color: 0xffffff,
map: null, // will be created when the video is ready.
side: DoubleSide,
});
class FaceGeometry extends BufferGeometry {
constructor(triangulation) {
super();
this.positions = new Float32Array(478 * 3);
this.uvs = new Float32Array(478 * 2);
this.setAttribute('position', new BufferAttribute(this.positions, 3));
this.setAttribute('uv', new BufferAttribute(this.uvs, 2));
this.setIndex(triangulation);
}
update(face) {
let ptr = 0;
for (const p of face.mesh) {
this.positions[ptr + 0] = -p[0] + width / 2;
this.positions[ptr + 1] = height - p[1] - height / 2;
this.positions[ptr + 2] = -p[2];
ptr += 3;
}
ptr = 0;
for (const p of face.meshRaw) {
this.uvs[ptr + 0] = 0 + p[0];
this.uvs[ptr + 1] = 1 - p[1];
ptr += 2;
}
materialFace.map.update(); // update textures from video
this.attributes.position.needsUpdate = true; // vertices
this.attributes.uv.needsUpdate = true; // textures
this.computeVertexNormals();
}
}
const scene = new Scene();
const faceGeometry = new FaceGeometry(human.faceTriangulation); // create a new geometry helper
const mesh = new Mesh(faceGeometry, materialFace); // create mask mesh
scene.add(mesh);
function resize(input) {
width = input.videoWidth;
height = input.videoHeight;
camera.left = -width / 2;
camera.right = width / 2;
camera.top = height / 2;
camera.bottom = -height / 2;
camera.near = -100;
camera.far = 100;
camera.zoom = 2;
camera.updateProjectionMatrix();
renderer.setSize(width, height);
}
const isLive = (input) => input.srcObject && (input.srcObject.getVideoTracks()[0].readyState === 'live') && (input.readyState > 2) && (!input.paused);
async function render(input) {
if (isLive(input)) {
if (width !== input.videoWidth || height !== input.videoHeight) resize(input); // resize orthographic camera to video dimensions if necessary
const res = await human.detect(input);
if (res?.face?.length > 0) {
faceGeometry.update(res.face[0]);
// render the mask
mesh.material = materialFace;
renderer.autoClear = true;
renderer.render(scene, camera);
if (wireframe) { // overlay wireframe
mesh.material = materialWireFrame;
renderer.autoClear = false;
renderer.render(scene, camera);
}
}
}
requestAnimationFrame(() => render(input));
}
// setup webcam
async function setupCamera() {
if (!navigator.mediaDevices) return null;
const video = document.getElementById('video');
canvas.addEventListener('click', () => {
if (isLive(video)) video.pause();
else video.play();
});
const constraints = {
audio: false,
video: { facingMode: 'user', resizeMode: 'crop-and-scale' },
};
if (window.innerWidth > window.innerHeight) constraints.video.width = { ideal: window.innerWidth };
else constraints.video.height = { ideal: window.innerHeight };
const stream = await navigator.mediaDevices.getUserMedia(constraints);
if (stream) video.srcObject = stream;
else return null;
// get information data
const track = stream.getVideoTracks()[0];
const settings = track.getSettings();
// log('camera constraints:', constraints, 'window:', { width: window.innerWidth, height: window.innerHeight }, 'settings:', settings, 'track:', track);
const engineData = human.tf.engine();
const gpuData = (engineData.backendInstance && engineData.backendInstance.numBytesInGPU > 0) ? `gpu: ${(engineData.backendInstance.numBytesInGPU ? engineData.backendInstance.numBytesInGPU : 0).toLocaleString()} bytes` : '';
const cameraData = { name: track.label?.toLowerCase(), width: settings.width, height: settings.height, facing: settings.facingMode === 'user' ? 'front' : 'back' };
const memoryData = `system: ${engineData.state.numBytes.toLocaleString()} bytes ${gpuData} | tensors: ${engineData.state.numTensors.toLocaleString()}`;
document.getElementById('log').innerHTML = `
video: ${cameraData.name} | facing: ${cameraData.facing} | screen: ${window.innerWidth} x ${window.innerHeight} camera: ${cameraData.width} x ${cameraData.height}<br>
backend: ${human.tf.getBackend()} | ${memoryData}<br>
`;
// return when camera is ready
return new Promise((resolve) => {
video.onloadeddata = async () => {
video.width = video.videoWidth;
video.height = video.videoHeight;
canvas.width = video.width;
canvas.height = video.height;
video.play();
resolve(video);
};
});
}
async function main() {
await human.load();
const video = await setupCamera();
if (video) {
const videoTexture = new VideoTexture(video); // now load textures from video
videoTexture.encoding = sRGBEncoding;
materialFace.map = videoTexture;
render(video);
}
}
window.onload = main;

View File

@ -0,0 +1,160 @@
/**
* Human demo for browsers
*
* Demo for face detection
*/
/** @type {Human} */
import { Human } from '../../dist/human.esm.js';
let loader;
const humanConfig = { // user configuration for human, used to fine-tune behavior
cacheSensitivity: 0,
debug: true,
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
filter: { enabled: true, equalization: false, flip: false },
face: {
enabled: true,
detector: { rotation: false, maxDetected: 100, minConfidence: 0.2, return: true, square: false },
iris: { enabled: true },
description: { enabled: true },
emotion: { enabled: true },
antispoof: { enabled: true },
liveness: { enabled: true },
},
body: { enabled: false },
hand: { enabled: false },
object: { enabled: false },
gesture: { enabled: false },
segmentation: { enabled: false },
};
const human = new Human(humanConfig); // new instance of human
export const showLoader = (msg) => { loader.setAttribute('msg', msg); loader.style.display = 'block'; };
export const hideLoader = () => loader.style.display = 'none';
class ComponentLoader extends HTMLElement { // watch for attributes
message = document.createElement('div');
static get observedAttributes() { return ['msg']; }
attributeChangedCallback(_name, _prevVal, currVal) {
this.message.innerHTML = currVal;
}
connectedCallback() { // triggered on insert
this.attachShadow({ mode: 'open' });
const css = document.createElement('style');
css.innerHTML = `
.loader-container { top: 450px; justify-content: center; position: fixed; width: 100%; }
.loader-message { font-size: 1.5rem; padding: 1rem; }
.loader { width: 300px; height: 300px; border: 3px solid transparent; border-radius: 50%; border-top: 4px solid #f15e41; animation: spin 4s linear infinite; position: relative; }
.loader::before, .loader::after { content: ""; position: absolute; top: 6px; bottom: 6px; left: 6px; right: 6px; border-radius: 50%; border: 4px solid transparent; }
.loader::before { border-top-color: #bad375; animation: 3s spin linear infinite; }
.loader::after { border-top-color: #26a9e0; animation: spin 1.5s linear infinite; }
@keyframes spin { from { transform: rotate(0deg); } to { transform: rotate(360deg); } }
`;
const container = document.createElement('div');
container.id = 'loader-container';
container.className = 'loader-container';
loader = document.createElement('div');
loader.id = 'loader';
loader.className = 'loader';
this.message.id = 'loader-message';
this.message.className = 'loader-message';
this.message.innerHTML = '';
container.appendChild(this.message);
container.appendChild(loader);
this.shadowRoot?.append(css, container);
loader = this; // eslint-disable-line @typescript-eslint/no-this-alias
}
}
customElements.define('component-loader', ComponentLoader);
function addFace(face, source) {
const deg = (rad) => Math.round((rad || 0) * 180 / Math.PI);
const canvas = document.createElement('canvas');
const emotion = face.emotion?.map((e) => `${Math.round(100 * e.score)}% ${e.emotion}`) || [];
const rotation = `pitch ${deg(face.rotation?.angle.pitch)}° | roll ${deg(face.rotation?.angle.roll)}° | yaw ${deg(face.rotation?.angle.yaw)}°`;
const gaze = `direction ${deg(face.rotation?.gaze.bearing)}° strength ${Math.round(100 * (face.rotation.gaze.strength || 0))}%`;
canvas.title = `
source: ${source}
score: ${Math.round(100 * face.boxScore)}% detection ${Math.round(100 * face.faceScore)}% analysis
age: ${face.age} years | gender: ${face.gender} score ${Math.round(100 * face.genderScore)}%
emotion: ${emotion.join(' | ')}
head rotation: ${rotation}
eyes gaze: ${gaze}
camera distance: ${face.distance}m | ${Math.round(100 * face.distance / 2.54)}in
check: ${Math.round(100 * face.real)}% real ${Math.round(100 * face.live)}% live
`.replace(/ /g, ' ');
canvas.onclick = (e) => {
e.preventDefault();
document.getElementById('description').innerHTML = canvas.title;
};
human.draw.tensor(face.tensor, canvas);
human.tf.dispose(face.tensor);
return canvas;
}
async function addFaces(imgEl) {
showLoader('human: busy');
const faceEl = document.getElementById('faces');
faceEl.innerHTML = '';
const res = await human.detect(imgEl);
console.log(res); // eslint-disable-line no-console
document.getElementById('description').innerHTML = `detected ${res.face.length} faces`;
for (const face of res.face) {
const canvas = addFace(face, imgEl.src.substring(0, 64));
faceEl.appendChild(canvas);
}
hideLoader();
}
function addImage(imageUri) {
const imgEl = new Image(256, 256);
imgEl.onload = () => {
const images = document.getElementById('images');
images.appendChild(imgEl); // add image if loaded ok
images.scroll(images?.offsetWidth, 0);
};
imgEl.onerror = () => console.error('addImage', { imageUri }); // eslint-disable-line no-console
imgEl.onclick = () => addFaces(imgEl);
imgEl.title = imageUri.substring(0, 64);
imgEl.src = encodeURI(imageUri);
}
async function initDragAndDrop() {
const reader = new FileReader();
reader.onload = async (e) => {
if (e.target.result.startsWith('data:image')) await addImage(e.target.result);
};
document.body.addEventListener('dragenter', (evt) => evt.preventDefault());
document.body.addEventListener('dragleave', (evt) => evt.preventDefault());
document.body.addEventListener('dragover', (evt) => evt.preventDefault());
document.body.addEventListener('drop', async (evt) => {
evt.preventDefault();
evt.dataTransfer.dropEffect = 'copy';
for (const f of evt.dataTransfer.files) reader.readAsDataURL(f);
});
document.body.onclick = (e) => {
if (e.target.localName !== 'canvas') document.getElementById('description').innerHTML = '';
};
}
async function main() {
showLoader('loading models');
await human.load();
showLoader('compiling models');
await human.warmup();
showLoader('loading images');
const images = ['group-1.jpg', 'group-2.jpg', 'group-3.jpg', 'group-4.jpg', 'group-5.jpg', 'group-6.jpg', 'group-7.jpg', 'solvay1927.jpg', 'stock-group-1.jpg', 'stock-group-2.jpg', 'stock-models-6.jpg', 'stock-models-7.jpg'];
const imageUris = images.map((a) => `../../samples/in/${a}`);
for (let i = 0; i < imageUris.length; i++) addImage(imageUris[i]);
initDragAndDrop();
hideLoader();
}
window.onload = main;

View File

@ -0,0 +1,43 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Human</title>
<!-- <meta http-equiv="content-type" content="text/html; charset=utf-8"> -->
<meta name="viewport" content="width=device-width, shrink-to-fit=yes">
<meta name="keywords" content="Human">
<meta name="application-name" content="Human">
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="theme-color" content="#000000">
<link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../../assets/icon.png">
<script src="./facedetect.js" type="module"></script>
<style>
img { object-fit: contain; }
img:hover { filter: grayscale(1); transform: scale(1.08); transition : all 0.3s ease; }
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
html { font-family: 'Lato', 'Segoe UI'; font-size: 24px; font-variant: small-caps; }
body { margin: 24px; background: black; color: white; overflow: hidden; text-align: -webkit-center; width: 100vw; height: 100vh; }
::-webkit-scrollbar { height: 8px; border: 0; border-radius: 0; }
::-webkit-scrollbar-thumb { background: grey }
::-webkit-scrollbar-track { margin: 3px; }
canvas { width: 192px; height: 192px; margin: 2px; padding: 2px; cursor: grab; transform: scale(1.00); transition : all 0.3s ease; }
canvas:hover { filter: grayscale(1); transform: scale(1.08); transition : all 0.3s ease; }
</style>
</head>
<body>
<component-loader></component-loader>
<div style="display: flex">
<div>
<div style="margin: 24px">select image to show detected faces<br>drag & drop to add your images</div>
<div id="images" style="display: flex; width: 98vw; overflow-x: auto; overflow-y: hidden; scroll-behavior: smooth"></div>
</div>
</div>
<div id="list" style="height: 10px"></div>
<div style="margin: 24px">hover or click on face to show details</div>
<div id="faces" style="overflow-y: auto"></div>
<div id="description" style="white-space: pre;"></div>
</body>
</html>

42
demo/faceid/README.md Normal file
View File

@ -0,0 +1,42 @@
# Human Face Recognition: FaceID
`faceid` runs multiple checks to validate webcam input before performing face match
Detected face image and descriptor are stored in client-side IndexDB
## Workflow
- Starts webcam
- Waits until input video contains validated face or timeout is reached
- Number of people
- Face size
- Face and gaze direction
- Detection scores
- Blink detection (including temporal check for blink speed) to verify live input
- Runs `antispoofing` optional module
- Runs `liveness` optional module
- Runs match against database of registered faces and presents best match with scores
## Notes
Both `antispoof` and `liveness` models are tiny and
designed to serve as a quick check when used together with other indicators:
- size below 1MB
- very quick inference times as they are very simple (11 ops for antispoof and 23 ops for liveness)
- trained on low-resolution inputs
### Anti-spoofing Module
- Checks if input is realistic (e.g. computer generated faces)
- Configuration: `human.config.face.antispoof`.enabled
- Result: `human.result.face[0].real` as score
### Liveness Module
- Checks if input has obvious artifacts due to recording (e.g. playing back phone recording of a face)
- Configuration: `human.config.face.liveness`.enabled
- Result: `human.result.face[0].live` as score
### Models
**FaceID** is compatible with
- `faceres.json` (default) perfoms combined age/gender/descriptor analysis
- `faceres-deep.json` higher resolution variation of `faceres`
- `insightface` alternative model for face descriptor analysis
- `mobilefacenet` alternative model for face descriptor analysis

49
demo/faceid/index.html Normal file
View File

@ -0,0 +1,49 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Human: Face Recognition</title>
<meta name="viewport" content="width=device-width" id="viewport">
<meta name="keywords" content="Human">
<meta name="application-name" content="Human">
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="theme-color" content="#000000">
<link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../../assets/icon.png">
<script src="./index.js" type="module"></script>
<style>
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
body { margin: 0; padding: 16px; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
body::-webkit-scrollbar { display: none; }
.button { padding: 2px; cursor: pointer; box-shadow: 2px 2px black; width: 64px; text-align: center; place-content: center; margin-left: 16px; height: 16px; display: none }
.ok { position: absolute; top: 64px; right: 20px; width: 150px; background-color: grey; padding: 4px; color: black; font-size: 14px }
</style>
</head>
<body>
<div style="padding: 8px">
<h1 style="margin: 0">faceid demo using human library</h1>
look directly at camera and make sure that detection passes all of the required tests noted on the right hand side of the screen<br>
if input does not satisfies tests within specific timeout, no image will be selected<br>
once face image is approved, it will be compared with existing face database<br>
you can also store face descriptor with label in a browser's indexdb for future usage<br>
<br>
<i>note: this is not equivalent to full faceid methods as used by modern mobile phones or windows hello<br>
as they rely on additional infrared sensors and depth-sensing and not just camera image for additional levels of security</i>
</div>
<canvas id="canvas" style="padding: 8px"></canvas>
<canvas id="source" style="padding: 8px"></canvas>
<video id="video" playsinline style="display: none"></video>
<pre id="log" style="padding: 8px"></pre>
<div id="match" style="display: none; padding: 8px">
<label for="name">name:</label>
<input id="name" type="text" value="" style="height: 16px; border: none; padding: 2px; margin-left: 8px">
<span id="save" class="button" style="background-color: royalblue">save</span>
<span id="delete" class="button" style="background-color: lightcoral">delete</span>
</div>
<div id="retry" class="button" style="background-color: darkslategray; width: 93%; margin-top: 32px; padding: 12px">retry</div>
<div id="ok"></div>
</body>
</html>

9
demo/faceid/index.js Normal file

File diff suppressed because one or more lines are too long

7
demo/faceid/index.js.map Normal file

File diff suppressed because one or more lines are too long

318
demo/faceid/index.ts Normal file
View File

@ -0,0 +1,318 @@
/**
* Human demo for browsers
* @default Human Library
* @summary <https://github.com/vladmandic/human>
* @author <https://github.com/vladmandic>
* @copyright <https://github.com/vladmandic>
* @license MIT
*/
import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
import * as indexDb from './indexdb'; // methods to deal with indexdb
const humanConfig = { // user configuration for human, used to fine-tune behavior
cacheSensitivity: 0.01,
modelBasePath: '../../models',
filter: { enabled: true, equalization: true }, // lets run with histogram equilizer
debug: true,
face: {
enabled: true,
detector: { rotation: true, return: true, mask: false }, // return tensor is used to get detected face image
description: { enabled: true }, // default model for face descriptor extraction is faceres
// mobilefacenet: { enabled: true, modelPath: 'https://vladmandic.github.io/human-models/models/mobilefacenet.json' }, // alternative model
// insightface: { enabled: true, modelPath: 'https://vladmandic.github.io/insightface/models/insightface-mobilenet-swish.json' }, // alternative model
iris: { enabled: true }, // needed to determine gaze direction
emotion: { enabled: false }, // not needed
antispoof: { enabled: true }, // enable optional antispoof module
liveness: { enabled: true }, // enable optional liveness module
},
body: { enabled: false },
hand: { enabled: false },
object: { enabled: false },
gesture: { enabled: true }, // parses face and iris gestures
};
// const matchOptions = { order: 2, multiplier: 1000, min: 0.0, max: 1.0 }; // for embedding model
const matchOptions = { order: 2, multiplier: 25, min: 0.2, max: 0.8 }; // for faceres model
const options = {
minConfidence: 0.6, // overal face confidence for box, face, gender, real, live
minSize: 224, // min input to face descriptor model before degradation
maxTime: 30000, // max time before giving up
blinkMin: 10, // minimum duration of a valid blink
blinkMax: 800, // maximum duration of a valid blink
threshold: 0.5, // minimum similarity
distanceMin: 0.4, // closest that face is allowed to be to the cammera in cm
distanceMax: 1.0, // farthest that face is allowed to be to the cammera in cm
mask: humanConfig.face.detector.mask,
rotation: humanConfig.face.detector.rotation,
...matchOptions,
};
const ok: Record<string, { status: boolean | undefined, val: number }> = { // must meet all rules
faceCount: { status: false, val: 0 },
faceConfidence: { status: false, val: 0 },
facingCenter: { status: false, val: 0 },
lookingCenter: { status: false, val: 0 },
blinkDetected: { status: false, val: 0 },
faceSize: { status: false, val: 0 },
antispoofCheck: { status: false, val: 0 },
livenessCheck: { status: false, val: 0 },
distance: { status: false, val: 0 },
age: { status: false, val: 0 },
gender: { status: false, val: 0 },
timeout: { status: true, val: 0 },
descriptor: { status: false, val: 0 },
elapsedMs: { status: undefined, val: 0 }, // total time while waiting for valid face
detectFPS: { status: undefined, val: 0 }, // mark detection fps performance
drawFPS: { status: undefined, val: 0 }, // mark redraw fps performance
};
const allOk = () => ok.faceCount.status
&& ok.faceSize.status
&& ok.blinkDetected.status
&& ok.facingCenter.status
&& ok.lookingCenter.status
&& ok.faceConfidence.status
&& ok.antispoofCheck.status
&& ok.livenessCheck.status
&& ok.distance.status
&& ok.descriptor.status
&& ok.age.status
&& ok.gender.status;
const current: { face: H.FaceResult | null, record: indexDb.FaceRecord | null } = { face: null, record: null }; // current face record and matched database record
const blink = { // internal timers for blink start/end/duration
start: 0,
end: 0,
time: 0,
};
// let db: Array<{ name: string, source: string, embedding: number[] }> = []; // holds loaded face descriptor database
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
human.env.perfadd = false; // is performance data showing instant or total values
human.draw.options.font = 'small-caps 18px "Lato"'; // set font used to draw labels when using draw methods
human.draw.options.lineHeight = 20;
const dom = { // grab instances of dom objects so we dont have to look them up later
video: document.getElementById('video') as HTMLVideoElement,
canvas: document.getElementById('canvas') as HTMLCanvasElement,
log: document.getElementById('log') as HTMLPreElement,
fps: document.getElementById('fps') as HTMLPreElement,
match: document.getElementById('match') as HTMLDivElement,
name: document.getElementById('name') as HTMLInputElement,
save: document.getElementById('save') as HTMLSpanElement,
delete: document.getElementById('delete') as HTMLSpanElement,
retry: document.getElementById('retry') as HTMLDivElement,
source: document.getElementById('source') as HTMLCanvasElement,
ok: document.getElementById('ok') as HTMLDivElement,
};
const timestamp = { detect: 0, draw: 0 }; // holds information used to calculate performance and possible memory leaks
let startTime = 0;
const log = (...msg) => { // helper method to output messages
dom.log.innerText += msg.join(' ') + '\n';
console.log(...msg); // eslint-disable-line no-console
};
async function webCam() { // initialize webcam
// @ts-ignore resizeMode is not yet defined in tslib
const cameraOptions: MediaStreamConstraints = { audio: false, video: { facingMode: 'user', resizeMode: 'none', width: { ideal: document.body.clientWidth } } };
const stream: MediaStream = await navigator.mediaDevices.getUserMedia(cameraOptions);
const ready = new Promise((resolve) => { dom.video.onloadeddata = () => resolve(true); });
dom.video.srcObject = stream;
void dom.video.play();
await ready;
dom.canvas.width = dom.video.videoWidth;
dom.canvas.height = dom.video.videoHeight;
dom.canvas.style.width = '50%';
dom.canvas.style.height = '50%';
if (human.env.initial) log('video:', dom.video.videoWidth, dom.video.videoHeight, '|', stream.getVideoTracks()[0].label);
dom.canvas.onclick = () => { // pause when clicked on screen and resume on next click
if (dom.video.paused) void dom.video.play();
else dom.video.pause();
};
}
async function detectionLoop() { // main detection loop
if (!dom.video.paused) {
if (current.face?.tensor) human.tf.dispose(current.face.tensor); // dispose previous tensor
await human.detect(dom.video); // actual detection; were not capturing output in a local variable as it can also be reached via human.result
const now = human.now();
ok.detectFPS.val = Math.round(10000 / (now - timestamp.detect)) / 10;
timestamp.detect = now;
requestAnimationFrame(detectionLoop); // start new frame immediately
}
}
function drawValidationTests() {
let y = 32;
for (const [key, val] of Object.entries(ok)) {
let el = document.getElementById(`ok-${key}`);
if (!el) {
el = document.createElement('div');
el.id = `ok-${key}`;
el.innerText = key;
el.className = 'ok';
el.style.top = `${y}px`;
dom.ok.appendChild(el);
}
if (typeof val.status === 'boolean') el.style.backgroundColor = val.status ? 'lightgreen' : 'lightcoral';
const status = val.status ? 'ok' : 'fail';
el.innerText = `${key}: ${val.val === 0 ? status : val.val}`;
y += 28;
}
}
async function validationLoop(): Promise<H.FaceResult> { // main screen refresh loop
const interpolated = human.next(human.result); // smoothen result using last-known results
human.draw.canvas(dom.video, dom.canvas); // draw canvas to screen
await human.draw.all(dom.canvas, interpolated); // draw labels, boxes, lines, etc.
const now = human.now();
ok.drawFPS.val = Math.round(10000 / (now - timestamp.draw)) / 10;
timestamp.draw = now;
ok.faceCount.val = human.result.face.length;
ok.faceCount.status = ok.faceCount.val === 1; // must be exactly detected face
if (ok.faceCount.status) { // skip the rest if no face
const gestures: string[] = Object.values(human.result.gesture).map((gesture: H.GestureResult) => gesture.gesture); // flatten all gestures
if (gestures.includes('blink left eye') || gestures.includes('blink right eye')) blink.start = human.now(); // blink starts when eyes get closed
if (blink.start > 0 && !gestures.includes('blink left eye') && !gestures.includes('blink right eye')) blink.end = human.now(); // if blink started how long until eyes are back open
ok.blinkDetected.status = ok.blinkDetected.status || (Math.abs(blink.end - blink.start) > options.blinkMin && Math.abs(blink.end - blink.start) < options.blinkMax);
if (ok.blinkDetected.status && blink.time === 0) blink.time = Math.trunc(blink.end - blink.start);
ok.facingCenter.status = gestures.includes('facing center');
ok.lookingCenter.status = gestures.includes('looking center'); // must face camera and look at camera
ok.faceConfidence.val = human.result.face[0].faceScore || human.result.face[0].boxScore || 0;
ok.faceConfidence.status = ok.faceConfidence.val >= options.minConfidence;
ok.antispoofCheck.val = human.result.face[0].real || 0;
ok.antispoofCheck.status = ok.antispoofCheck.val >= options.minConfidence;
ok.livenessCheck.val = human.result.face[0].live || 0;
ok.livenessCheck.status = ok.livenessCheck.val >= options.minConfidence;
ok.faceSize.val = Math.min(human.result.face[0].box[2], human.result.face[0].box[3]);
ok.faceSize.status = ok.faceSize.val >= options.minSize;
ok.distance.val = human.result.face[0].distance || 0;
ok.distance.status = (ok.distance.val >= options.distanceMin) && (ok.distance.val <= options.distanceMax);
ok.descriptor.val = human.result.face[0].embedding?.length || 0;
ok.descriptor.status = ok.descriptor.val > 0;
ok.age.val = human.result.face[0].age || 0;
ok.age.status = ok.age.val > 0;
ok.gender.val = human.result.face[0].genderScore || 0;
ok.gender.status = ok.gender.val >= options.minConfidence;
}
// run again
ok.timeout.status = ok.elapsedMs.val <= options.maxTime;
drawValidationTests();
if (allOk() || !ok.timeout.status) { // all criteria met
dom.video.pause();
return human.result.face[0];
}
ok.elapsedMs.val = Math.trunc(human.now() - startTime);
return new Promise((resolve) => {
setTimeout(async () => {
await validationLoop(); // run validation loop until conditions are met
resolve(human.result.face[0]); // recursive promise resolve
}, 30); // use to slow down refresh from max refresh rate to target of 30 fps
});
}
async function saveRecords() {
if (dom.name.value.length > 0) {
const image = dom.canvas.getContext('2d')?.getImageData(0, 0, dom.canvas.width, dom.canvas.height) as ImageData;
const rec = { id: 0, name: dom.name.value, descriptor: current.face?.embedding as number[], image };
await indexDb.save(rec);
log('saved face record:', rec.name, 'descriptor length:', current.face?.embedding?.length);
log('known face records:', await indexDb.count());
} else {
log('invalid name');
}
}
async function deleteRecord() {
if (current.record && current.record.id > 0) {
await indexDb.remove(current.record);
}
}
async function detectFace() {
dom.canvas.style.height = '';
dom.canvas.getContext('2d')?.clearRect(0, 0, options.minSize, options.minSize);
if (!current?.face?.tensor || !current?.face?.embedding) return false;
console.log('face record:', current.face); // eslint-disable-line no-console
log(`detected face: ${current.face.gender} ${current.face.age || 0}y distance ${100 * (current.face.distance || 0)}cm/${Math.round(100 * (current.face.distance || 0) / 2.54)}in`);
await human.draw.tensor(current.face.tensor, dom.canvas);
if (await indexDb.count() === 0) {
log('face database is empty: nothing to compare face with');
document.body.style.background = 'black';
dom.delete.style.display = 'none';
return false;
}
const db = await indexDb.load();
const descriptors = db.map((rec) => rec.descriptor).filter((desc) => desc.length > 0);
const res = human.match.find(current.face.embedding, descriptors, matchOptions);
current.record = db[res.index] || null;
if (current.record) {
log(`best match: ${current.record.name} | id: ${current.record.id} | similarity: ${Math.round(1000 * res.similarity) / 10}%`);
dom.name.value = current.record.name;
dom.source.style.display = '';
dom.source.getContext('2d')?.putImageData(current.record.image, 0, 0);
}
document.body.style.background = res.similarity > options.threshold ? 'darkgreen' : 'maroon';
return res.similarity > options.threshold;
}
async function main() { // main entry point
ok.faceCount.status = false;
ok.faceConfidence.status = false;
ok.facingCenter.status = false;
ok.blinkDetected.status = false;
ok.faceSize.status = false;
ok.antispoofCheck.status = false;
ok.livenessCheck.status = false;
ok.age.status = false;
ok.gender.status = false;
ok.elapsedMs.val = 0;
dom.match.style.display = 'none';
dom.retry.style.display = 'none';
dom.source.style.display = 'none';
dom.canvas.style.height = '50%';
document.body.style.background = 'black';
await webCam();
await detectionLoop(); // start detection loop
startTime = human.now();
current.face = await validationLoop(); // start validation loop
dom.canvas.width = current.face?.tensor?.shape[1] || options.minSize;
dom.canvas.height = current.face?.tensor?.shape[0] || options.minSize;
dom.source.width = dom.canvas.width;
dom.source.height = dom.canvas.height;
dom.canvas.style.width = '';
dom.match.style.display = 'flex';
dom.save.style.display = 'flex';
dom.delete.style.display = 'flex';
dom.retry.style.display = 'block';
if (!allOk()) { // is all criteria met?
log('did not find valid face');
return false;
}
return detectFace();
}
async function init() {
log('human version:', human.version, '| tfjs version:', human.tf.version['tfjs-core']);
log('options:', JSON.stringify(options).replace(/{|}|"|\[|\]/g, '').replace(/,/g, ' '));
log('initializing webcam...');
await webCam(); // start webcam
log('loading human models...');
await human.load(); // preload all models
log('initializing human...');
log('face embedding model:', humanConfig.face.description.enabled ? 'faceres' : '', humanConfig.face['mobilefacenet']?.enabled ? 'mobilefacenet' : '', humanConfig.face['insightface']?.enabled ? 'insightface' : '');
log('loading face database...');
log('known face records:', await indexDb.count());
dom.retry.addEventListener('click', main);
dom.save.addEventListener('click', saveRecords);
dom.delete.addEventListener('click', deleteRecord);
await human.warmup(); // warmup function to initialize backend for future faster detection
await main();
}
window.onload = init;

65
demo/faceid/indexdb.ts Normal file
View File

@ -0,0 +1,65 @@
let db: IDBDatabase; // instance of indexdb
const database = 'human';
const table = 'person';
export interface FaceRecord { id: number, name: string, descriptor: number[], image: ImageData }
const log = (...msg) => console.log('indexdb', ...msg); // eslint-disable-line no-console
export async function open() {
if (db) return true;
return new Promise((resolve) => {
const request: IDBOpenDBRequest = indexedDB.open(database, 1);
request.onerror = (evt) => log('error:', evt);
request.onupgradeneeded = (evt: IDBVersionChangeEvent) => { // create if doesnt exist
log('create:', evt.target);
db = (evt.target as IDBOpenDBRequest).result;
db.createObjectStore(table, { keyPath: 'id', autoIncrement: true });
};
request.onsuccess = (evt) => { // open
db = (evt.target as IDBOpenDBRequest).result;
log('open:', db);
resolve(true);
};
});
}
export async function load(): Promise<FaceRecord[]> {
const faceDB: FaceRecord[] = [];
if (!db) await open(); // open or create if not already done
return new Promise((resolve) => {
const cursor: IDBRequest = db.transaction([table], 'readwrite').objectStore(table).openCursor(null, 'next');
cursor.onerror = (evt) => log('load error:', evt);
cursor.onsuccess = (evt) => {
if ((evt.target as IDBRequest).result) {
faceDB.push((evt.target as IDBRequest).result.value);
(evt.target as IDBRequest).result.continue();
} else {
resolve(faceDB);
}
};
});
}
export async function count(): Promise<number> {
if (!db) await open(); // open or create if not already done
return new Promise((resolve) => {
const store: IDBRequest = db.transaction([table], 'readwrite').objectStore(table).count();
store.onerror = (evt) => log('count error:', evt);
store.onsuccess = () => resolve(store.result);
});
}
export async function save(faceRecord: FaceRecord) {
if (!db) await open(); // open or create if not already done
const newRecord = { name: faceRecord.name, descriptor: faceRecord.descriptor, image: faceRecord.image }; // omit id as its autoincrement
db.transaction([table], 'readwrite').objectStore(table).put(newRecord);
log('save:', newRecord);
}
export async function remove(faceRecord: FaceRecord) {
if (!db) await open(); // open or create if not already done
db.transaction([table], 'readwrite').objectStore(table).delete(faceRecord.id); // delete based on id
log('delete:', faceRecord);
}

File diff suppressed because one or more lines are too long

View File

@ -1,42 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<title>Human</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="viewport" content="width=device-width, shrink-to-fit=yes">
<meta name="keywords" content="Human">
<meta name="application-name" content="Human">
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<link rel="manifest" href="./manifest.webmanifest">
<link rel="shortcut icon" href="../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../assets/icon.png">
<script src="./facematch.js" type="module"></script>
<style>
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../assets/lato-light.woff2') }
html { font-family: 'Lato', 'Segoe UI'; font-size: 24px; font-variant: small-caps; }
body { margin: 0; background: black; color: white; overflow-x: hidden; }
img { object-fit: contain; }
.face { width: 128px; height: 128px; }
</style>
</head>
<body>
<div style="display: block">
<div style="display: flex">
<div>
Selected Face<br>
<canvas id="orig" style="width: 200px; height: 200px; padding: 20px"></canvas>
</div>
<div style="width: 20px"></div>
<div>
Sample Images<br>
<div id="images" style="display: flex; flex-wrap: wrap; width: 85vw"></div>
</div>
<span id="desc" style="visibility: hidden; font-size: 0.4rem;"></span><br>
</div>
<div style="height: 10px"></div>
Extracted Faces - click on a face to sort by similarity and get a known face match:<br>
<div id="faces"></div>
</div>
</body>
</html>

View File

@ -1,228 +0,0 @@
import Human from '../dist/human.esm.js';
const userConfig = {
backend: 'wasm',
async: false,
warmup: 'none',
debug: true,
videoOptimized: false,
face: {
enabled: true,
detector: { rotation: true, return: true },
mesh: { enabled: true },
embedding: { enabled: true },
iris: { enabled: false },
age: { enabled: false },
gender: { enabled: false },
emotion: { enabled: false },
description: { enabled: true },
},
hand: { enabled: false },
gesture: { enabled: false },
body: { enabled: false },
filter: {
enabled: false,
},
};
const human = new Human(userConfig); // new instance of human
const all = []; // array that will hold all detected faces
let db = []; // array that holds all known faces
const minScore = 0.6;
const minConfidence = 0.8;
function log(...msg) {
const dt = new Date();
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
// eslint-disable-next-line no-console
console.log(ts, ...msg);
}
async function getFaceDB() {
// download db with known faces
try {
let res = await fetch('/demo/facematch-faces.json');
if (!res || !res.ok) res = await fetch('/human/demo/facematch-faces.json');
db = (res && res.ok) ? await res.json() : [];
for (const rec of db) {
rec.embedding = rec.embedding.map((a) => parseFloat(a.toFixed(4)));
}
} catch (err) {
log('Could not load faces database', err);
}
}
async function analyze(face) {
// refresh faces database
await getFaceDB();
// if we have face image tensor, enhance it and display it
if (face.tensor) {
const enhanced = human.enhance(face);
const desc = document.getElementById('desc');
desc.innerText = `{"name":"unknown", "source":"${face.fileName}", "embedding":[${face.embedding}]},`;
const embedding = face.embedding.map((a) => parseFloat(a.toFixed(4)));
navigator.clipboard.writeText(`{"name":"unknown", "source":"${face.fileName}", "embedding":[${embedding}]},`);
if (enhanced) {
const c = document.getElementById('orig');
const squeeze = enhanced.squeeze().div(255);
await human.tf.browser.toPixels(squeeze, c);
enhanced.dispose();
squeeze.dispose();
const ctx = c.getContext('2d');
ctx.font = 'small-caps 0.4rem "Lato"';
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
}
}
// loop through all canvases that contain faces
const canvases = document.getElementsByClassName('face');
for (const canvas of canvases) {
// calculate similarity from selected face to current one in the loop
const current = all[canvas.tag.sample][canvas.tag.face];
const similarity = human.similarity(face.embedding, current.embedding, 3);
// get best match
// draw the canvas
canvas.title = similarity;
await human.tf.browser.toPixels(current.tensor, canvas);
const ctx = canvas.getContext('2d');
ctx.font = 'small-caps 1rem "Lato"';
ctx.fillStyle = 'rgba(0, 0, 0, 1)';
ctx.fillText(`${(100 * similarity).toFixed(1)}%`, 3, 23);
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
ctx.fillText(`${(100 * similarity).toFixed(1)}%`, 4, 24);
ctx.font = 'small-caps 0.8rem "Lato"';
ctx.fillText(`${current.age}y ${(100 * (current.genderConfidence || 0)).toFixed(1)}% ${current.gender}`, 4, canvas.height - 6);
// identify person
ctx.font = 'small-caps 1rem "Lato"';
const person = await human.match(current.embedding, db);
if (person.similarity && person.similarity > minScore && current.confidence > minConfidence) ctx.fillText(`${(100 * person.similarity).toFixed(1)}% ${person.name}`, 4, canvas.height - 30);
}
// sort all faces by similarity
const sorted = document.getElementById('faces');
[...sorted.children]
.sort((a, b) => parseFloat(b.title) - parseFloat(a.title))
.forEach((canvas) => sorted.appendChild(canvas));
}
async function faces(index, res, fileName) {
all[index] = res.face;
for (const i in res.face) {
all[index][i].fileName = fileName;
const canvas = document.createElement('canvas');
canvas.tag = { sample: index, face: i };
canvas.width = 200;
canvas.height = 200;
canvas.className = 'face';
// mouse click on any face canvas triggers analysis
canvas.addEventListener('click', (evt) => {
log('Select:', 'Image:', evt.target.tag.sample, 'Face:', evt.target.tag.face, all[evt.target.tag.sample][evt.target.tag.face]);
analyze(all[evt.target.tag.sample][evt.target.tag.face]);
});
// if we actually got face image tensor, draw canvas with that face
if (res.face[i].tensor) {
await human.tf.browser.toPixels(res.face[i].tensor, canvas);
document.getElementById('faces').appendChild(canvas);
const ctx = canvas.getContext('2d');
ctx.font = 'small-caps 0.8rem "Lato"';
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
ctx.fillText(`${res.face[i].age}y ${(100 * (res.face[i].genderConfidence || 0)).toFixed(1)}% ${res.face[i].gender}`, 4, canvas.height - 6);
const person = await human.match(res.face[i].embedding, db);
ctx.font = 'small-caps 1rem "Lato"';
if (person.similarity && person.similarity > minScore && res.face[i].confidence > minConfidence) ctx.fillText(`${(100 * person.similarity).toFixed(1)}% ${person.name}`, 4, canvas.height - 30);
}
}
}
async function process(index, image) {
return new Promise((resolve) => {
const img = new Image(128, 128);
img.onload = () => { // must wait until image is loaded
human.detect(img).then(async (res) => {
await faces(index, res, image); // then wait until image is analyzed
log('Add image:', index + 1, image, 'faces:', res.face.length);
document.getElementById('images').appendChild(img); // and finally we can add it
resolve(true);
});
};
img.onerror = () => {
log('Add image error:', index + 1, image);
resolve(false);
};
img.title = image;
img.src = encodeURI(image);
});
}
async function createDB() {
log('Creating Faces DB...');
for (const image of all) {
for (const face of image) db.push({ name: 'unknown', source: face.fileName, embedding: face.embedding });
}
log(db);
}
async function main() {
// pre-load human models
await human.load();
let res;
let images = [];
let dir = [];
// load face descriptor database
await getFaceDB();
// enumerate all sample images in /assets
res = await fetch('/assets');
dir = (res && res.ok) ? await res.json() : [];
images = images.concat(dir.filter((img) => (img.endsWith('.jpg') && img.includes('sample'))));
// enumerate additional private test images in /private, not includded in git repository
res = await fetch('/private/me');
dir = (res && res.ok) ? await res.json() : [];
images = images.concat(dir.filter((img) => (img.endsWith('.jpg'))));
// enumerate additional error images, not includded in git repository
res = await fetch('/private/err');
dir = (res && res.ok) ? await res.json() : [];
images = images.concat(dir.filter((img) => (img.endsWith('.jpg'))));
log('Enumerated:', images.length, 'images');
// could not dynamically enumerate images so using static list
if (images.length === 0) {
images = [
'sample1.jpg',
'sample2.jpg',
'sample3.jpg',
'sample4.jpg',
'sample5.jpg',
'sample6.jpg',
'sample6.jpg',
'sample-me.jpg',
'human-sample-face.jpg',
'human-sample-upper.jpg',
'human-sample-body.jpg',
];
// add prefix for gitpages
images = images.map((a) => `/human/assets/${a}`);
log('Adding static image list:', images.length, 'images');
}
// download and analyze all images
for (let i = 0; i < images.length; i++) await process(i, images[i]);
// print stats
const num = all.reduce((prev, cur) => prev += cur.length, 0);
log('Extracted faces:', num, 'from images:', all.length);
log(human.tf.engine().memory());
// if we didn't download db, generate it from current faces
if (!db || db.length === 0) await createDB();
else log('Loaded Faces DB:', db.length);
log('Ready');
}
window.onload = main;

84
demo/facematch/README.md Normal file
View File

@ -0,0 +1,84 @@
# Human Face Recognition & Matching
- **Browser** demo: `index.html` & `facematch.js`:
Loads sample images, extracts faces and runs match and similarity analysis
- **NodeJS** demo `node-match.js` and `node-match-worker.js`
Advanced multithreading demo that runs number of worker threads to process high number of matches
- Sample face database: `faces.json`
<br>
## Browser Face Recognition Demo
- `demo/facematch`: Demo for Browsers that uses all face description and embedding features to
detect, extract and identify all faces plus calculate similarity between them
It highlights functionality such as:
- Loading images
- Extracting faces from images
- Calculating face embedding descriptors
- Finding face similarity and sorting them by similarity
- Finding best face match based on a known list of faces and printing matches
<br>
## NodeJS Multi-Threading Match Solution
### Methods and Properties in `node-match`
- `createBuffer`: create shared buffer array
single copy of data regardless of number of workers
fixed size based on `options.dbMax`
- `appendRecord`: add additional batch of descriptors to buffer
can append batch of records to buffer at anytime
workers are informed of the new content after append has been completed
- `workersStart`: start or expand pool of `threadPoolSize` workers
each worker runs `node-match-worker` and listens for messages from main thread
can shutdown workers or create additional worker threads on-the-fly
safe against workers that exit
- `workersClose`: close workers in a pool
first request workers to exit then terminate after timeout
- `match`: dispach a match job to a worker
returns first match that satisfies `minThreshold`
assigment to workers using round-robin
since timing for each job is near-fixed and predictable
- `getDescriptor`: get descriptor array for a given id from a buffer
- `fuzDescriptor`: small randomize descriptor content for harder match
- `getLabel`: fetch label for resolved descriptor index
- `loadDB`: load face database from a JSON file `dbFile`
extracts descriptors and adds them to buffer
extracts labels and maintains them in main thread
for test purposes loads same database `dbFact` times to create a very large database
`node-match` runs in a listens for messages from workers until `maxJobs` have been reached
### Performance
Linear performance decrease that depends on number of records in database
Non-linear performance that increases with number of worker threads due to communication overhead
- Face dataase with 10k records:
> threadPoolSize: 1 => ~60 ms / match job
> threadPoolSize: 6 => ~25 ms / match job
- Face database with 50k records:
> threadPoolSize: 1 => ~300 ms / match job
> threadPoolSize: 6 => ~100 ms / match job
- Face database with 100k records:
> threadPoolSize: 1 => ~600 ms / match job
> threadPoolSize: 6 => ~200 ms / match job
### Example
> node node-match
<!-- eslint-skip -->
```js
INFO: options: { dbFile: './faces.json', dbMax: 10000, threadPoolSize: 6, workerSrc: './node-match-worker.js', debug: false, minThreshold: 0.9, descLength: 1024 }
DATA: created shared buffer: { maxDescriptors: 10000, totalBytes: 40960000, totalElements: 10240000 }
DATA: db loaded: { existingRecords: 0, newRecords: 5700 }
INFO: starting worker thread pool: { totalWorkers: 6, alreadyActive: 0 }
STATE: submitted: { matchJobs: 100, poolSize: 6, activeWorkers: 6 }
STATE: { matchJobsFinished: 100, totalTimeMs: 1769, averageTimeMs: 17.69 }
INFO: closing workers: { poolSize: 6, activeWorkers: 6 }
```

257
demo/facematch/facematch.js Normal file
View File

@ -0,0 +1,257 @@
/**
* Human demo for browsers
*
* Demo for face descriptor analysis and face similarity analysis
*/
/** @type {Human} */
import { Human } from '../../dist/human.esm.js';
const userConfig = {
backend: 'humangl',
async: true,
warmup: 'none',
cacheSensitivity: 0.01,
debug: true,
modelBasePath: '../../models/',
deallocate: true,
filter: {
enabled: true,
equalization: true,
width: 0,
},
face: {
enabled: true,
detector: { return: true, rotation: true, maxDetected: 50, iouThreshold: 0.01, minConfidence: 0.2 },
mesh: { enabled: true },
iris: { enabled: false },
emotion: { enabled: true },
description: { enabled: true },
},
hand: { enabled: false },
gesture: { enabled: false },
body: { enabled: false },
segmentation: { enabled: false },
};
const human = new Human(userConfig); // new instance of human
const all = []; // array that will hold all detected faces
let db = []; // array that holds all known faces
const minScore = 0.4;
function log(...msg) {
const dt = new Date();
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
console.log(ts, ...msg); // eslint-disable-line no-console
}
function title(msg) {
document.getElementById('title').innerHTML = msg;
}
async function loadFaceMatchDB() {
// download db with known faces
try {
let res = await fetch('/demo/facematch/faces.json');
if (!res || !res.ok) res = await fetch('/human/demo/facematch/faces.json');
db = (res && res.ok) ? await res.json() : [];
log('Loaded Faces DB:', db);
} catch (err) {
log('Could not load faces database', err);
}
}
async function selectFaceCanvas(face) {
// if we have face image tensor, enhance it and display it
let embedding;
document.getElementById('orig').style.filter = 'blur(16px)';
if (face.tensor) {
title('Sorting Faces by Similarity');
const c = document.getElementById('orig');
await human.draw.tensor(face.tensor, c);
const arr = db.map((rec) => rec.embedding);
const res = await human.match.find(face.embedding, arr);
log('Match:', db[res.index].name);
const emotion = face.emotion[0] ? `${Math.round(100 * face.emotion[0].score)}% ${face.emotion[0].emotion}` : 'N/A';
document.getElementById('desc').innerHTML = `
source: ${face.fileName}<br>
match: ${Math.round(1000 * res.similarity) / 10}% ${db[res.index].name}<br>
score: ${Math.round(100 * face.boxScore)}% detection ${Math.round(100 * face.faceScore)}% analysis<br>
age: ${face.age} years<br>
gender: ${Math.round(100 * face.genderScore)}% ${face.gender}<br>
emotion: ${emotion}<br>
`;
embedding = face.embedding.map((a) => parseFloat(a.toFixed(4)));
navigator.clipboard.writeText(`{"name":"unknown", "source":"${face.fileName}", "embedding":[${embedding}]},`);
}
// loop through all canvases that contain faces
const canvases = document.getElementsByClassName('face');
let time = 0;
for (const canvas of canvases) {
// calculate similarity from selected face to current one in the loop
const current = all[canvas.tag.sample][canvas.tag.face];
const similarity = human.match.similarity(face.embedding, current.embedding);
canvas.tag.similarity = similarity;
// get best match
// draw the canvas
await human.draw.tensor(current.tensor, canvas);
const ctx = canvas.getContext('2d');
ctx.font = 'small-caps 1rem "Lato"';
ctx.fillStyle = 'rgba(0, 0, 0, 1)';
ctx.fillText(`${(100 * similarity).toFixed(1)}%`, 3, 23);
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
ctx.fillText(`${(100 * similarity).toFixed(1)}%`, 4, 24);
ctx.font = 'small-caps 0.8rem "Lato"';
ctx.fillText(`${current.age}y ${(100 * (current.genderScore || 0)).toFixed(1)}% ${current.gender}`, 4, canvas.height - 6);
// identify person
ctx.font = 'small-caps 1rem "Lato"';
const start = human.now();
const arr = db.map((rec) => rec.embedding);
const res = await human.match.find(current.embedding, arr);
time += (human.now() - start);
if (res.similarity > minScore) ctx.fillText(`DB: ${(100 * res.similarity).toFixed(1)}% ${db[res.index].name}`, 4, canvas.height - 30);
}
log('Analyzed:', 'Face:', canvases.length, 'DB:', db.length, 'Time:', time);
// sort all faces by similarity
const sorted = document.getElementById('faces');
[...sorted.children]
.sort((a, b) => parseFloat(b.tag.similarity) - parseFloat(a.tag.similarity))
.forEach((canvas) => sorted.appendChild(canvas));
document.getElementById('orig').style.filter = 'blur(0)';
title('Selected Face');
}
async function addFaceCanvas(index, res, fileName) {
all[index] = res.face;
for (const i in res.face) {
if (!res.face[i].tensor) continue; // did not get valid results
if ((res.face[i].faceScore || 0) < human.config.face.detector.minConfidence) continue; // face analysis score too low
all[index][i].fileName = fileName;
const canvas = document.createElement('canvas');
canvas.tag = { sample: index, face: i, source: fileName };
canvas.width = 200;
canvas.height = 200;
canvas.className = 'face';
const emotion = res.face[i].emotion[0] ? `${Math.round(100 * res.face[i].emotion[0].score)}% ${res.face[i].emotion[0].emotion}` : 'N/A';
canvas.title = `
source: ${res.face[i].fileName}
score: ${Math.round(100 * res.face[i].boxScore)}% detection ${Math.round(100 * res.face[i].faceScore)}% analysis
age: ${res.face[i].age} years
gender: ${Math.round(100 * res.face[i].genderScore)}% ${res.face[i].gender}
emotion: ${emotion}
`.replace(/ /g, ' ');
await human.draw.tensor(res.face[i].tensor, canvas);
const ctx = canvas.getContext('2d');
if (!ctx) return;
ctx.font = 'small-caps 0.8rem "Lato"';
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
ctx.fillText(`${res.face[i].age}y ${(100 * (res.face[i].genderScore || 0)).toFixed(1)}% ${res.face[i].gender}`, 4, canvas.height - 6);
const arr = db.map((rec) => rec.embedding);
const result = human.match.find(res.face[i].embedding, arr);
ctx.font = 'small-caps 1rem "Lato"';
if (result.similarity && res.similarity > minScore) ctx.fillText(`${(100 * result.similarity).toFixed(1)}% ${db[result.index].name}`, 4, canvas.height - 30);
document.getElementById('faces').appendChild(canvas);
canvas.addEventListener('click', (evt) => {
log('Select:', 'Image:', evt.target.tag.sample, 'Face:', evt.target.tag.face, 'Source:', evt.target.tag.source, all[evt.target.tag.sample][evt.target.tag.face]);
selectFaceCanvas(all[evt.target.tag.sample][evt.target.tag.face]);
});
}
}
async function addImageElement(index, image, length) {
const faces = all.reduce((prev, curr) => prev += curr.length, 0);
title(`Analyzing Input Images<br> ${Math.round(100 * index / length)}% [${index} / ${length}]<br>Found ${faces} Faces`);
return new Promise((resolve) => {
const img = new Image(128, 128);
img.onload = () => { // must wait until image is loaded
document.getElementById('images').appendChild(img); // and finally we can add it
human.detect(img, userConfig)
.then((res) => { // eslint-disable-line promise/always-return
addFaceCanvas(index, res, image); // then wait until image is analyzed
resolve(true);
})
.catch(() => log('human detect error'));
};
img.onerror = () => {
log('Add image error:', index + 1, image);
resolve(false);
};
img.title = image;
img.src = encodeURI(image);
});
}
function createFaceMatchDB() {
log('Creating Faces DB...');
for (const image of all) {
for (const face of image) db.push({ name: 'unknown', source: face.fileName, embedding: face.embedding });
}
log(db);
}
async function main() {
// pre-load human models
await human.load();
title('Loading Face Match Database');
let images = [];
let dir = [];
// load face descriptor database
await loadFaceMatchDB();
// enumerate all sample images in /assets
title('Enumerating Input Images');
const res = await fetch('/samples/in');
dir = (res && res.ok) ? await res.json() : [];
images = images.concat(dir.filter((img) => (img.endsWith('.jpg') && img.includes('sample'))));
// could not dynamically enumerate images so using static list
if (images.length === 0) {
images = [
'ai-face.jpg', 'ai-upper.jpg', 'ai-body.jpg', 'solvay1927.jpg',
'group-1.jpg', 'group-2.jpg', 'group-3.jpg', 'group-4.jpg', 'group-5.jpg', 'group-6.jpg', 'group-7.jpg',
'person-celeste.jpg', 'person-christina.jpg', 'person-lauren.jpg', 'person-lexi.jpg', 'person-linda.jpg', 'person-nicole.jpg', 'person-tasia.jpg', 'person-tetiana.jpg', 'person-vlado.jpg', 'person-vlado1.jpg', 'person-vlado5.jpg',
'stock-group-1.jpg', 'stock-group-2.jpg',
'stock-models-1.jpg', 'stock-models-2.jpg', 'stock-models-3.jpg', 'stock-models-4.jpg', 'stock-models-5.jpg', 'stock-models-6.jpg', 'stock-models-7.jpg', 'stock-models-8.jpg', 'stock-models-9.jpg',
'stock-teen-1.jpg', 'stock-teen-2.jpg', 'stock-teen-3.jpg', 'stock-teen-4.jpg', 'stock-teen-5.jpg', 'stock-teen-6.jpg', 'stock-teen-7.jpg', 'stock-teen-8.jpg',
'stock-models-10.jpg', 'stock-models-11.jpg', 'stock-models-12.jpg', 'stock-models-13.jpg', 'stock-models-14.jpg', 'stock-models-15.jpg', 'stock-models-16.jpg',
'cgi-model-1.jpg', 'cgi-model-2.jpg', 'cgi-model-3.jpg', 'cgi-model-4.jpg', 'cgi-model-5.jpg', 'cgi-model-6.jpg', 'cgi-model-7.jpg', 'cgi-model-8.jpg', 'cgi-model-9.jpg',
'cgi-model-10.jpg', 'cgi-model-11.jpg', 'cgi-model-12.jpg', 'cgi-model-13.jpg', 'cgi-model-14.jpg', 'cgi-model-15.jpg', 'cgi-model-18.jpg', 'cgi-model-19.jpg',
'cgi-model-20.jpg', 'cgi-model-21.jpg', 'cgi-model-22.jpg', 'cgi-model-23.jpg', 'cgi-model-24.jpg', 'cgi-model-25.jpg', 'cgi-model-26.jpg', 'cgi-model-27.jpg', 'cgi-model-28.jpg', 'cgi-model-29.jpg',
'cgi-model-30.jpg', 'cgi-model-31.jpg', 'cgi-model-33.jpg', 'cgi-model-34.jpg',
'cgi-multiangle-1.jpg', 'cgi-multiangle-2.jpg', 'cgi-multiangle-3.jpg', 'cgi-multiangle-4.jpg', 'cgi-multiangle-6.jpg', 'cgi-multiangle-7.jpg', 'cgi-multiangle-8.jpg', 'cgi-multiangle-9.jpg', 'cgi-multiangle-10.jpg', 'cgi-multiangle-11.jpg',
'stock-emotions-a-1.jpg', 'stock-emotions-a-2.jpg', 'stock-emotions-a-3.jpg', 'stock-emotions-a-4.jpg', 'stock-emotions-a-5.jpg', 'stock-emotions-a-6.jpg', 'stock-emotions-a-7.jpg', 'stock-emotions-a-8.jpg',
'stock-emotions-b-1.jpg', 'stock-emotions-b-2.jpg', 'stock-emotions-b-3.jpg', 'stock-emotions-b-4.jpg', 'stock-emotions-b-5.jpg', 'stock-emotions-b-6.jpg', 'stock-emotions-b-7.jpg', 'stock-emotions-b-8.jpg',
];
// add prefix for gitpages
images = images.map((a) => `../../samples/in/${a}`);
log('Adding static image list:', images);
} else {
log('Discovered images:', images);
}
// images = ['/samples/in/person-lexi.jpg', '/samples/in/person-carolina.jpg', '/samples/in/solvay1927.jpg'];
const t0 = human.now();
for (let i = 0; i < images.length; i++) await addImageElement(i, images[i], images.length);
const t1 = human.now();
// print stats
const num = all.reduce((prev, cur) => prev += cur.length, 0);
log('Extracted faces:', num, 'from images:', all.length, 'time:', Math.round(t1 - t0));
log(human.tf.engine().memory());
// if we didn't download db, generate it from current faces
if (!db || db.length === 0) createFaceMatchDB();
title('');
log('Ready');
human.validate(userConfig);
human.match.similarity([], []);
}
window.onload = main;

81
demo/facematch/faces.json Normal file

File diff suppressed because one or more lines are too long

50
demo/facematch/index.html Normal file
View File

@ -0,0 +1,50 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Human</title>
<!-- <meta http-equiv="content-type" content="text/html; charset=utf-8"> -->
<meta name="viewport" content="width=device-width, shrink-to-fit=yes">
<meta name="keywords" content="Human">
<meta name="application-name" content="Human">
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="theme-color" content="#000000">
<link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../../assets/icon.png">
<script src="./facematch.js" type="module"></script>
<style>
img { object-fit: contain; }
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
html { font-family: 'Lato', 'Segoe UI'; font-size: 24px; font-variant: small-caps; }
body { margin: 24px; background: black; color: white; overflow: hidden; text-align: -webkit-center; min-height: 100%; max-height: 100%; }
::-webkit-scrollbar { height: 8px; border: 0; border-radius: 0; }
::-webkit-scrollbar-thumb { background: grey }
::-webkit-scrollbar-track { margin: 3px; }
.orig { width: 200px; height: 200px; padding-bottom: 20px; filter: blur(16px); transition : all 0.5s ease; }
.text { margin: 24px; }
.face { width: 128px; height: 128px; margin: 2px; padding: 2px; cursor: grab; transform: scale(1.00); transition : all 0.3s ease; }
.face:hover { filter: grayscale(1); transform: scale(1.08); transition : all 0.3s ease; }
</style>
</head>
<body>
<div style="display: block">
<div style="display: flex">
<div style="min-width: 400px">
<div class="text" id="title"></div>
<canvas id="orig" class="orig"></canvas>
<div id="desc" style="font-size: 0.8rem; text-align: left;"></div>
</div>
<div style="width: 20px"></div>
<div>
<div class="text">Input Images</div>
<div id="images" style="display: flex; width: 60vw; overflow-x: auto; overflow-y: hidden; scroll-behavior: smooth"></div>
</div>
</div>
<div id="list" style="height: 10px"></div>
<div class="text">Select person to sort by similarity and get a known face match</div>
<div id="faces" style="height: 50vh; overflow-y: auto"></div>
</div>
</body>
</html>

View File

@ -0,0 +1,76 @@
/**
* Runs in a worker thread started by `node-match` demo app
*
*/
const threads = require('worker_threads');
let debug = false;
/** @type SharedArrayBuffer */
let buffer;
/** @type Float32Array */
let view;
let threshold = 0;
let records = 0;
const descLength = 1024; // descriptor length in bytes
function distance(descBuffer, index, options = { order: 2, multiplier: 20 }) {
const descriptor = new Float32Array(descBuffer);
let sum = 0;
for (let i = 0; i < descriptor.length; i++) {
const diff = (options.order === 2) ? (descriptor[i] - view[index * descLength + i]) : (Math.abs(descriptor[i] - view[index * descLength + i]));
sum += (options.order === 2) ? (diff * diff) : (diff ** options.order);
}
return (options.multiplier || 20) * sum;
}
function match(descBuffer, options = { order: 2, multiplier: 20 }) {
let best = Number.MAX_SAFE_INTEGER;
let index = -1;
for (let i = 0; i < records; i++) {
const res = distance(descBuffer, i, { order: options.order, multiplier: options.multiplier });
if (res < best) {
best = res;
index = i;
}
if (best < threshold || best === 0) break; // short circuit
}
best = (options.order === 2) ? Math.sqrt(best) : best ** (1 / options.order);
const similarity = Math.round(100 * Math.max(0, 100 - best) / 100.0) / 100;
return { index, distance: best, similarity };
}
threads.parentPort?.on('message', (msg) => {
if (typeof msg.descriptor !== 'undefined') { // actual work order to find a match
const t0 = performance.now();
const result = match(msg.descriptor);
const t1 = performance.now();
threads.parentPort?.postMessage({ request: msg.request, time: Math.trunc(t1 - t0), ...result });
return; // short circuit
}
if (msg instanceof SharedArrayBuffer) { // called only once to receive reference to shared array buffer
buffer = msg;
view = new Float32Array(buffer); // initialize f64 view into buffer
if (debug) threads.parentPort?.postMessage(`buffer: ${buffer.byteLength}`);
}
if (typeof msg.records !== 'undefined') { // recived every time when number of records changes
records = msg.records;
if (debug) threads.parentPort?.postMessage(`records: ${records}`);
}
if (typeof msg.debug !== 'undefined') { // set verbose logging
debug = msg.debug;
// if (debug) threads.parentPort?.postMessage(`debug: ${debug}`);
}
if (typeof msg.threshold !== 'undefined') { // set minimum similarity threshold
threshold = msg.threshold;
// if (debug) threads.parentPort?.postMessage(`threshold: ${threshold}`);
}
if (typeof msg.shutdown !== 'undefined') { // got message to close worker
if (debug) threads.parentPort?.postMessage('shutting down');
process.exit(0); // eslint-disable-line no-process-exit
}
});
if (debug) threads.parentPort?.postMessage('started');

View File

@ -0,0 +1,184 @@
/**
* Human demo app for NodeJS that generates random facial descriptors
* and uses NodeJS multi-threading to start multiple threads for face matching
* uses `node-match-worker.js` to perform actual face matching analysis
*/
const fs = require('fs');
const path = require('path');
const threads = require('worker_threads');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// global optinos
const options = {
dbFile: 'demo/facematch/faces.json', // sample face db
dbMax: 10000, // maximum number of records to hold in memory
threadPoolSize: 12, // number of worker threads to create in thread pool
workerSrc: './node-match-worker.js', // code that executes in the worker thread
debug: true, // verbose messages
minThreshold: 0.5, // match returns first record that meets the similarity threshold, set to 0 to always scan all records
descLength: 1024, // descriptor length
};
// test options
const testOptions = {
dbFact: 175, // load db n times to fake huge size
maxJobs: 200, // exit after processing this many jobs
fuzDescriptors: true, // randomize descriptor content before match for harder jobs
};
// global data structures
const data = {
/** @type string[] */
labels: [], // array of strings, length of array serves as overal number of records so has to be maintained carefully
/** @type SharedArrayBuffer | null */
buffer: null,
/** @type Float32Array | null */
view: null,
/** @type threads.Worker[] */
workers: [], // holds instance of workers. worker can be null if exited
requestID: 0, // each request should increment this counter as its used for round robin assignment
};
let t0 = process.hrtime.bigint(); // used for perf counters
const appendRecords = (labels, descriptors) => {
if (!data.view) return 0;
if (descriptors.length !== labels.length) {
log.error('append error:', { descriptors: descriptors.length, labels: labels.length });
}
// if (options.debug) log.state('appending:', { descriptors: descriptors.length, labels: labels.length });
for (let i = 0; i < descriptors.length; i++) {
for (let j = 0; j < descriptors[i].length; j++) {
data.view[data.labels.length * descriptors[i].length + j] = descriptors[i][j]; // add each descriptors element to buffer
}
data.labels.push(labels[i]); // finally add to labels
}
for (const worker of data.workers) { // inform all workers how many records we have
if (worker) worker.postMessage({ records: data.labels.length });
}
return data.labels.length;
};
const getLabel = (index) => data.labels[index];
const getDescriptor = (index) => {
if (!data.view) return [];
const descriptor = [];
for (let i = 0; i < 1024; i++) descriptor.push(data.view[index * options.descLength + i]);
return descriptor;
};
const fuzDescriptor = (descriptor) => {
for (let i = 0; i < descriptor.length; i++) descriptor[i] += Math.random() - 0.5;
return descriptor;
};
const delay = (ms) => new Promise((resolve) => { setTimeout(resolve, ms); });
async function workersClose() {
const current = data.workers.filter((worker) => !!worker).length;
log.info('closing workers:', { poolSize: data.workers.length, activeWorkers: current });
for (const worker of data.workers) {
if (worker) worker.postMessage({ shutdown: true }); // tell worker to exit
}
await delay(250); // wait a little for threads to exit on their own
const remaining = data.workers.filter((worker) => !!worker).length;
if (remaining > 0) {
log.info('terminating remaining workers:', { remaining: current, pool: data.workers.length });
for (const worker of data.workers) {
if (worker) worker.terminate(); // if worker did not exit cleany terminate it
}
}
}
const workerMessage = (index, msg) => {
if (msg.request) {
if (options.debug) log.data('message:', { worker: index, request: msg.request, time: msg.time, label: getLabel(msg.index), similarity: msg.similarity });
if (msg.request >= testOptions.maxJobs) {
const t1 = process.hrtime.bigint();
const elapsed = Math.round(Number(t1 - t0) / 1000 / 1000);
log.state({ matchJobsFinished: testOptions.maxJobs, totalTimeMs: elapsed, averageTimeMs: Math.round(100 * elapsed / testOptions.maxJobs) / 100 });
workersClose();
}
} else {
log.data('message:', { worker: index, msg });
}
};
async function workerClose(id, code) {
const previous = data.workers.filter((worker) => !!worker).length;
delete data.workers[id];
const current = data.workers.filter((worker) => !!worker).length;
if (options.debug) log.state('worker exit:', { id, code, previous, current });
}
async function workersStart(numWorkers) {
const previous = data.workers.filter((worker) => !!worker).length;
log.info('starting worker thread pool:', { totalWorkers: numWorkers, alreadyActive: previous });
for (let i = 0; i < numWorkers; i++) {
if (!data.workers[i]) { // worker does not exist, so create it
const worker = new threads.Worker(path.join(__dirname, options.workerSrc));
worker.on('message', (msg) => workerMessage(i, msg));
worker.on('error', (err) => log.error('worker error:', { err }));
worker.on('exit', (code) => workerClose(i, code));
worker.postMessage(data.buffer); // send buffer to worker
data.workers[i] = worker;
}
data.workers[i]?.postMessage({ records: data.labels.length, threshold: options.minThreshold, debug: options.debug }); // inform worker how many records there are
}
await delay(100); // just wait a bit for everything to settle down
}
const match = (descriptor) => {
// const arr = Float32Array.from(descriptor);
const buffer = new ArrayBuffer(options.descLength * 4);
const view = new Float32Array(buffer);
view.set(descriptor);
const available = data.workers.filter((worker) => !!worker).length; // find number of available workers
if (available > 0) data.workers[data.requestID % available].postMessage({ descriptor: buffer, request: data.requestID }, [buffer]); // round robin to first available worker
else log.error('no available workers');
};
async function loadDB(count) {
const previous = data.labels.length;
if (!fs.existsSync(options.dbFile)) {
log.error('db file does not exist:', options.dbFile);
return;
}
t0 = process.hrtime.bigint();
for (let i = 0; i < count; i++) { // test loop: load entire face db from array of objects n times into buffer
const db = JSON.parse(fs.readFileSync(options.dbFile).toString());
const names = db.map((record) => record.name);
const descriptors = db.map((record) => record.embedding);
appendRecords(names, descriptors);
}
log.data('db loaded:', { existingRecords: previous, newRecords: data.labels.length });
}
async function createBuffer() {
data.buffer = new SharedArrayBuffer(4 * options.dbMax * options.descLength); // preallocate max number of records as sharedarraybuffers cannot grow
data.view = new Float32Array(data.buffer); // create view into buffer
data.labels.length = 0;
log.data('created shared buffer:', { maxDescriptors: (data.view.length || 0) / options.descLength, totalBytes: data.buffer.byteLength, totalElements: data.view.length });
}
async function main() {
log.header();
log.info('options:', options);
await createBuffer(); // create shared buffer array
await loadDB(testOptions.dbFact); // loadDB is a test method that calls actual addRecords
await workersStart(options.threadPoolSize); // can be called at anytime to modify worker pool size
for (let i = 0; i < testOptions.maxJobs; i++) {
const idx = Math.trunc(data.labels.length * Math.random()); // grab a random descriptor index that we'll search for
const descriptor = getDescriptor(idx); // grab a descriptor at index
data.requestID++; // increase request id
if (testOptions.fuzDescriptors) match(fuzDescriptor(descriptor)); // fuz descriptor for harder match
else match(descriptor);
if (options.debug) log.debug('submited job', data.requestID); // we already know what we're searching for so we can compare results
}
log.state('submitted:', { matchJobs: testOptions.maxJobs, poolSize: data.workers.length, activeWorkers: data.workers.filter((worker) => !!worker).length });
}
main();

3
demo/helpers/README.md Normal file
View File

@ -0,0 +1,3 @@
# Helper libraries
Used by main `Human` demo app

View File

@ -1,10 +1,8 @@
/* eslint-disable max-len */
// based on: https://github.com/munrocket/gl-bench
const UICSS = `
#gl-bench { position: absolute; right: 1rem; bottom: 1rem; z-index:1000; -webkit-user-select: none; -moz-user-select: none; user-select: none; }
#gl-bench div { position: relative; display: block; margin: 4px; padding: 0 2px 0 2px; background: darkslategray; border-radius: 0.1rem; cursor: pointer; opacity: 0.9; }
#gl-bench div { position: relative; display: block; margin: 4px; padding: 0 2px 0 2px; background: #303030; border-radius: 0.1rem; cursor: pointer; opacity: 0.9; }
#gl-bench svg { height: 60px; margin: 0 0px 0px 4px; }
#gl-bench text { font-size: 16px; font-family: 'Lato', 'Segoe UI'; dominant-baseline: middle; text-anchor: middle; }
#gl-bench .gl-mem { font-size: 12px; fill: white; }
@ -38,15 +36,13 @@ const UISVG = `
class GLBench {
/** GLBench constructor
* @param { WebGLRenderingContext | WebGL2RenderingContext } gl context
* @param { WebGLRenderingContext | WebGL2RenderingContext | null } gl context
* @param { Object | undefined } settings additional settings
*/
constructor(gl, settings = {}) {
this.css = UICSS;
this.svg = UISVG;
// eslint-disable-next-line @typescript-eslint/no-empty-function
this.paramLogger = () => {};
// eslint-disable-next-line @typescript-eslint/no-empty-function
this.chartLogger = () => {};
this.chartLen = 20;
this.chartHz = 20;
@ -93,7 +89,6 @@ class GLBench {
const addProfiler = (fn, self, target) => {
const t = self.now();
// eslint-disable-next-line prefer-rest-params
fn.apply(target, arguments);
if (self.trackGPU) self.finished.push(glFinish(t, self.activeAccums.slice(0)));
};
@ -108,13 +103,11 @@ class GLBench {
if (gl[fn]) {
gl[fn] = addProfiler(gl[fn], this, gl);
} else {
// eslint-disable-next-line no-console
console.log('bench: cannot attach to webgl function');
}
/*
gl.getExtension = ((fn, self) => {
// eslint-disable-next-line prefer-rest-params
const ext = fn.apply(gl, arguments);
if (ext) {
['drawElementsInstancedANGLE', 'drawBuffersWEBGL'].forEach((fn2) => {
@ -149,7 +142,6 @@ class GLBench {
return (i, cpu, gpu, mem, fps, totalTime, frameId) => {
nodes['gl-cpu'][i].style.strokeDasharray = (cpu * 0.27).toFixed(0) + ' 100';
nodes['gl-gpu'][i].style.strokeDasharray = (gpu * 0.27).toFixed(0) + ' 100';
// eslint-disable-next-line no-nested-ternary
nodes['gl-mem'][i].innerHTML = names[i] ? names[i] : (mem ? 'mem: ' + mem.toFixed(0) + 'mb' : '');
nodes['gl-fps'][i].innerHTML = 'FPS: ' + fps.toFixed(1);
logger(names[i], cpu, gpu, mem, fps, totalTime, frameId);

157
demo/helpers/jsonview.js Normal file
View File

@ -0,0 +1,157 @@
let callbackFunction = null;
function createElement(type, config) {
const htmlElement = document.createElement(type);
if (config === undefined) return htmlElement;
if (config.className) htmlElement.className = config.className;
if (config.content) htmlElement.textContent = config.content;
if (config.style) htmlElement.style = config.style;
if (config.children) config.children.forEach((el) => !el || htmlElement.appendChild(el));
return htmlElement;
}
function createExpandedElement(node) {
const iElem = createElement('i');
if (node.expanded) { iElem.className = 'fas fa-caret-down'; } else { iElem.className = 'fas fa-caret-right'; }
const caretElem = createElement('div', { style: 'width: 18px; text-align: center; cursor: pointer', children: [iElem] });
const handleClick = node.toggle.bind(node);
caretElem.addEventListener('click', handleClick);
const indexElem = createElement('div', { className: 'json json-index', content: node.key });
indexElem.addEventListener('click', handleClick);
const typeElem = createElement('div', { className: 'json json-type', content: node.type });
const keyElem = createElement('div', { className: 'json json-key', content: node.key });
keyElem.addEventListener('click', handleClick);
const sizeElem = createElement('div', { className: 'json json-size' });
sizeElem.addEventListener('click', handleClick);
if (node.type === 'array') {
sizeElem.innerText = `[${node.children.length} items]`;
} else if (node.type === 'object') {
const size = node.children.find((item) => item.key === 'size');
sizeElem.innerText = size ? `{${size.value.toLocaleString()} bytes}` : `{${node.children.length} properties}`;
}
let lineChildren;
if (node.key === null) lineChildren = [caretElem, typeElem, sizeElem];
else if (node.parent.type === 'array') lineChildren = [caretElem, indexElem, sizeElem];
else lineChildren = [caretElem, keyElem, sizeElem];
const lineElem = createElement('div', { className: 'json-line', children: lineChildren });
if (node.depth > 0) lineElem.style = `margin-left: ${node.depth * 24}px;`;
return lineElem;
}
function createNotExpandedElement(node) {
const caretElem = createElement('div', { style: 'width: 18px' });
const keyElem = createElement('div', { className: 'json json-key', content: node.key });
const separatorElement = createElement('div', { className: 'json-separator', content: ':' });
const valueType = ` json-${typeof node.value}`;
const valueContent = node.value.toLocaleString();
const valueElement = createElement('div', { className: `json json-value${valueType}`, content: valueContent });
const lineElem = createElement('div', { className: 'json-line', children: [caretElem, keyElem, separatorElement, valueElement] });
if (node.depth > 0) lineElem.style = `margin-left: ${node.depth * 24}px;`;
return lineElem;
}
function createNode() {
return {
key: '',
parent: {},
value: null,
expanded: false,
type: '',
children: [],
elem: {},
depth: 0,
hideChildren() {
if (Array.isArray(this.children)) {
this.children.forEach((item) => {
item['elem']['classList'].add('hide');
if (item['expanded']) item.hideChildren();
});
}
},
showChildren() {
if (Array.isArray(this.children)) {
this.children.forEach((item) => {
item['elem']['classList'].remove('hide');
if (item['expanded']) item.showChildren();
});
}
},
toggle() {
if (this.expanded) {
this.hideChildren();
const icon = this.elem?.querySelector('.fas');
icon.classList.replace('fa-caret-down', 'fa-caret-right');
if (callbackFunction !== null) callbackFunction(null);
} else {
this.showChildren();
const icon = this.elem?.querySelector('.fas');
icon.classList.replace('fa-caret-right', 'fa-caret-down');
if (this.type === 'object') {
if (callbackFunction !== null) callbackFunction(`${this.parent?.key}/${this.key}`);
}
}
this.expanded = !this.expanded;
},
};
}
function getType(val) {
let type
if (Array.isArray(val)) type = 'array';
else if (val === null) type = 'null';
else type = typeof val;
return type;
}
function traverseObject(obj, parent, filter) {
for (const key in obj) {
const child = createNode();
child.parent = parent;
child.key = key;
child.type = getType(obj[key]);
child.depth = parent.depth + 1;
child.expanded = false;
if (Array.isArray(filter)) {
for (const filtered of filter) {
if (key === filtered) return;
}
}
if (typeof obj[key] === 'object') {
child.children = [];
parent.children.push(child);
traverseObject(obj[key], child, filter);
child.elem = createExpandedElement(child);
} else {
child.value = obj[key];
child.elem = createNotExpandedElement(child);
parent.children.push(child);
}
}
}
function createTree(obj, title, filter) {
const tree = createNode();
tree.type = title;
tree.key = title;
tree.children = [];
tree.expanded = true;
traverseObject(obj, tree, filter);
tree.elem = createExpandedElement(tree);
return tree;
}
function traverseTree(node, callback) {
callback(node);
if (node.children !== null) node.children.forEach((item) => traverseTree(item, callback));
}
async function jsonView(json, element, title = '', filter = []) {
const tree = createTree(json, title, filter);
traverseTree(tree, (node) => {
if (!node.expanded) node.hideChildren();
element.appendChild(node.elem);
});
}
export default jsonView;

View File

@ -1,11 +1,9 @@
// @ts-nocheck
let instance = 0;
let CSScreated = false;
let theme = {
background: 'darkslategray',
hover: 'lightgray',
background: '#303030',
hover: '#505050',
itemBackground: 'black',
itemColor: 'white',
buttonBackground: 'lightblue',
@ -21,14 +19,14 @@ function createCSS() {
if (CSScreated) return;
const css = `
:root { --rounded: 0.1rem; }
.menu { position: absolute; top: 0rem; right: 0; width: max-content; padding: 0 0.2rem 0 0.2rem; line-height: 1.8rem; z-index: 10;
box-shadow: 0 0 8px dimgrey; background: ${theme.background}; border-radius: var(--rounded); border-color: black; border-style: solid; border-width: thin; }
.menu { position: absolute; top: 0rem; right: 0; min-width: 180px; width: max-content; padding: 0.2rem 0.8rem 0 0.8rem; line-height: 1.8rem; z-index: 10; background: ${theme.background}; border: none }
.button { text-shadow: none; }
.menu:hover { box-shadow: 0 0 8px ${theme.hover}; }
.menu-container { display: block; max-height: 100vh; }
.menu-container-fadeout { max-height: 0; overflow: hidden; transition: max-height, 0.5s ease; }
.menu-container-fadein { max-height: 100vh; overflow: hidden; transition: max-height, 0.5s ease; }
.menu-item { display: flex; white-space: nowrap; padding: 0.2rem; cursor: default; width: 100%; }
.menu-item:hover { background: ${theme.hover} }
.menu-title { cursor: pointer; }
.menu-hr { margin: 0.2rem; border: 1px solid rgba(0, 0, 0, 0.5) }
.menu-label { padding: 0; font-weight: 800; }
@ -41,30 +39,30 @@ function createCSS() {
.menu-chart-title { padding: 0; font-size: 0.8rem; font-weight: 800; align-items: center}
.menu-chart-canvas { background: transparent; margin: 0.2rem 0 0.2rem 0.6rem; }
.menu-button { border: 0; background: ${theme.buttonBackground}; width: -webkit-fill-available; padding: 8px; margin: 8px; cursor: pointer; box-shadow: 4px 4px 4px 0 dimgrey;
.menu-button { border: 0; background: ${theme.buttonBackground}; width: -webkit-fill-available; padding: 8px; margin: 8px; cursor: pointer;
border-radius: var(--rounded); justify-content: center; font-family: inherit; font-variant: inherit; font-size: 1rem; font-weight: 800; }
.menu-button:hover { background: ${theme.buttonHover}; box-shadow: 4px 4px 4px 0 black; }
.menu-button:focus { outline: none; }
.menu-checkbox { width: 2.8rem; height: 1rem; background: ${theme.itemBackground}; margin: 0.5rem 0.5rem 0 0; position: relative; border-radius: var(--rounded); }
.menu-checkbox { width: 2.6rem; height: 1rem; background: ${theme.itemBackground}; margin: 0.5rem 1.0rem 0 0; position: relative; border-radius: var(--rounded); }
.menu-checkbox:after { content: 'OFF'; color: ${theme.checkboxOff}; position: absolute; right: 0.2rem; top: -0.4rem; font-weight: 800; font-size: 0.5rem; }
.menu-checkbox:before { content: 'ON'; color: ${theme.checkboxOn}; position: absolute; left: 0.3rem; top: -0.4rem; font-weight: 800; font-size: 0.5rem; }
.menu-checkbox-label { width: 1.3rem; height: 0.8rem; cursor: pointer; position: absolute; top: 0.1rem; left: 0.1rem; z-index: 1; background: ${theme.checkboxOff};
.menu-checkbox-label { width: 1.3rem; height: 1rem; cursor: pointer; position: absolute; top: 0; left: 0rem; z-index: 1; background: ${theme.checkboxOff};
border-radius: var(--rounded); transition: left 0.6s ease; }
input[type=checkbox] { visibility: hidden; }
input[type=checkbox]:checked + label { left: 1.4rem; background: ${theme.checkboxOn}; }
.menu-range { margin: 0.2rem 0.5rem 0 0; width: 3.5rem; background: transparent; color: ${theme.rangeBackground}; }
.menu-range { margin: 0.2rem 1.0rem 0 0; width: 5rem; background: transparent; color: ${theme.rangeBackground}; }
.menu-range:before { color: ${theme.rangeLabel}; margin: 0 0.4rem 0 0; font-weight: 800; font-size: 0.6rem; position: relative; top: 0.3rem; content: attr(value); }
input[type=range] { -webkit-appearance: none; }
input[type=range]::-webkit-slider-runnable-track { width: 100%; height: 1rem; cursor: pointer; background: ${theme.itemBackground}; border-radius: var(--rounded); border: 1px; }
input[type=range]::-moz-range-track { width: 100%; height: 1rem; cursor: pointer; background: ${theme.itemBackground}; border-radius: var(--rounded); border: 1px; }
input[type=range]::-webkit-slider-thumb { border: 1px solid #000000; margin-top: 0.05rem; height: 0.9rem; width: 1rem; border-radius: var(--rounded); background: ${theme.rangeBackground}; cursor: pointer; -webkit-appearance: none; }
input[type=range]::-moz-range-thumb { border: 1px solid #000000; margin-top: 0.05rem; height: 0.9rem; width: 1rem; border-radius: var(--rounded); background: ${theme.rangeBackground}; cursor: pointer; -webkit-appearance: none; }
input[type=range]::-webkit-slider-thumb { border: 1px solid #000000; margin-top: 0; height: 1rem; width: 0.6rem; border-radius: var(--rounded); background: ${theme.rangeBackground}; cursor: pointer; -webkit-appearance: none; }
input[type=range]::-moz-range-thumb { border: 1px solid #000000; margin-top: 0rem; height: 1rem; width: 0.6rem; border-radius: var(--rounded); background: ${theme.rangeBackground}; cursor: pointer; -webkit-appearance: none; }
.svg-background { fill:darkslategrey; cursor:pointer; opacity: 0.6; }
.svg-background { fill:#303030; cursor:pointer; opacity: 0.6; }
.svg-foreground { fill:white; cursor:pointer; opacity: 0.8; }
`;
const el = document.createElement('style');
@ -86,14 +84,15 @@ class Menu {
}
createMenu(parent, title = '', position = { top: null, left: null, bottom: null, right: null }) {
/** @type {HTMLDivElement} */
this.menu = document.createElement('div');
this.menu.id = `menu-${instance}`;
this.menu.className = 'menu';
if (position) {
if (position.top) this.menu.style.top = position.top;
if (position.bottom) this.menu.style.bottom = position.bottom;
if (position.left) this.menu.style.left = position.left;
if (position.right) this.menu.style.right = position.right;
if (position.top) this.menu.style.top = `${position.top}`;
if (position.bottom) this.menu.style.bottom = `${position.bottom}`;
if (position.left) this.menu.style.left = `${position.left}`;
if (position.right) this.menu.style.right = `${position.right}`;
}
this.container = document.createElement('div');
@ -114,7 +113,7 @@ class Menu {
if (this.container && this.menu) {
this.container.classList.toggle('menu-container-fadeout');
this.container.classList.toggle('menu-container-fadein');
this.menu.style.borderStyle = this.container.classList.contains('menu-container-fadeout') ? 'none' : 'solid';
// this.menu.style.borderStyle = this.container.classList.contains('menu-container-fadeout') ? 'none' : 'solid';
}
});
@ -133,11 +132,11 @@ class Menu {
}
get width() {
return this.menu?.offsetWidth || 0;
return this.menu ? this.menu.offsetWidth : 0;
}
get height() {
return this.menu?.offsetHeight || 0;
return this.menu ? this.menu.offsetHeight : 0;
}
hide() {
@ -155,6 +154,7 @@ class Menu {
if (this.container && this.menu) {
this.container.classList.toggle('menu-container-fadeout');
this.container.classList.toggle('menu-container-fadein');
/*
if (this.container.classList.contains('menu-container-fadein') && evt) {
const x = evt.x || (evt.touches && evt.touches[0] ? evt.touches[0].pageX : null);
// const y = evt.y || (evt.touches && evt.touches[0] ? evt.touches[0].pageY : null);
@ -165,10 +165,11 @@ class Menu {
this.menu.style.left = '';
this.menu.style.right = '0';
}
this.menu.style.borderStyle = 'solid';
// this.menu.style.borderStyle = 'solid';
} else {
this.menu.style.borderStyle = 'none';
// this.menu.style.borderStyle = 'none';
}
*/
}
}
@ -203,8 +204,10 @@ class Menu {
el.innerHTML = `<div class="menu-checkbox"><input class="menu-checkbox" type="checkbox" id="${this.newID}" ${object[variable] ? 'checked' : ''}/><label class="menu-checkbox-label" for="${this.ID}"></label></div>${title}`;
if (this.container) this.container.appendChild(el);
el.addEventListener('change', (evt) => {
object[variable] = evt.target?.checked;
if (callback) callback(evt.target?.checked);
if (evt.target) {
object[variable] = evt.target['checked'];
if (callback) callback(evt.target['checked']);
}
});
return el;
}
@ -217,13 +220,13 @@ class Menu {
const def = item === selected ? 'selected' : '';
options += `<option value="${item}" ${def}>${item}</option>`;
}
el.innerHTML = `<div class="menu-list"><select name="${this.ID}" class="menu-list-item">${options}</select><label for="${this.ID}"></label></div>${title}`;
el.innerHTML = `<div class="menu-list"><select name="${this.ID}" title="${title}" class="menu-list-item">${options}</select><label for="${this.ID}"></label></div>${title}`;
el.style.fontFamily = document.body.style.fontFamily;
el.style.fontSize = document.body.style.fontSize;
el.style.fontVariant = document.body.style.fontVariant;
if (this.container) this.container.appendChild(el);
el.addEventListener('change', (evt) => {
if (callback) callback(items[evt.target?.selectedIndex]);
if (callback && evt.target) callback(items[evt.target['selectedIndex']]);
});
return el;
}
@ -231,16 +234,16 @@ class Menu {
addRange(title, object, variable, min, max, step, callback) {
const el = document.createElement('div');
el.className = 'menu-item';
el.innerHTML = `<input class="menu-range" type="range" id="${this.newID}" min="${min}" max="${max}" step="${step}" value="${object[variable]}">${title}`;
el.innerHTML = `<input class="menu-range" type="range" title="${title}" id="${this.newID}" min="${min}" max="${max}" step="${step}" value="${object[variable]}">${title}`;
if (this.container) this.container.appendChild(el);
el.addEventListener('change', (evt) => {
if (evt.target) {
object[variable] = parseInt(evt.target?.value) === parseFloat(evt.target?.value) ? parseInt(evt.target?.value) : parseFloat(evt.target?.value);
evt.target.setAttribute('value', evt.target.value);
if (callback) callback(evt.target?.value);
object[variable] = parseInt(evt.target['value']) === parseFloat(evt.target['value']) ? parseInt(evt.target['value']) : parseFloat(evt.target['value']);
evt.target.setAttribute('value', evt.target['value']);
if (callback) callback(evt.target['value']);
}
});
el.input = el.children[0];
el['input'] = el.children[0];
return el;
}
@ -280,7 +283,6 @@ class Menu {
return el;
}
// eslint-disable-next-line class-methods-use-this
updateValue(title, val, suffix = '') {
const el = document.getElementById(`menu-val-${title}`);
if (el) el.innerText = `${title}: ${val}${suffix}`;
@ -297,12 +299,13 @@ class Menu {
return el;
}
// eslint-disable-next-line class-methods-use-this
async updateChart(id, values) {
if (!values || (values.length === 0)) return;
/** @type {HTMLCanvasElement} */
const canvas = document.getElementById(`menu-canvas-${id}`);
if (!canvas) return;
const ctx = canvas.getContext('2d');
if (!ctx) return;
ctx.fillStyle = theme.background;
ctx.fillRect(0, 0, canvas.width, canvas.height);
const width = canvas.width / values.length;
@ -316,7 +319,7 @@ class Menu {
ctx.fillRect(i * width, 0, width - 4, canvas.height);
ctx.fillStyle = theme.background;
ctx.font = `${width / 1.5}px "Segoe UI"`;
ctx.fillText(Math.round(values[i]), i * width + 1, canvas.height - 1, width - 1);
ctx.fillText(Math.round(values[i]).toString(), i * width + 1, canvas.height - 1, width - 1);
}
}
}

View File

@ -1,870 +0,0 @@
// @ts-nocheck
import { Vector2, Vector3, Spherical, MOUSE, Quaternion, EventDispatcher } from './three.js';
/**
* @author qiao / https://github.com/qiao
* @author mrdoob / http://mrdoob.com
* @author alteredq / http://alteredqualia.com/
* @author WestLangley / http://github.com/WestLangley
* @author erich666 / http://erichaines.com
*/
// This set of controls performs orbiting, dollying (zooming), and panning.
// Unlike TrackballControls, it maintains the "up" direction object.up (+Y by default).
//
// Orbit - left mouse / touch: one-finger move
// Zoom - middle mouse, or mousewheel / touch: two-finger spread or squish
// Pan - right mouse, or left mouse + ctrl/metaKey, or arrow keys / touch: two-finger move
const OrbitControls = function (object, domElement) {
this.object = object;
this.domElement = (domElement !== undefined) ? domElement : document;
// Set to false to disable this control
this.enabled = true;
// "target" sets the location of focus, where the object orbits around
this.target = new Vector3();
// How far you can dolly in and out ( PerspectiveCamera only )
this.minDistance = 0;
this.maxDistance = Infinity;
// How far you can zoom in and out ( OrthographicCamera only )
this.minZoom = 0;
this.maxZoom = Infinity;
// How far you can orbit vertically, upper and lower limits.
// Range is 0 to Math.PI radians.
this.minPolarAngle = 0; // radians
this.maxPolarAngle = Math.PI; // radians
// How far you can orbit horizontally, upper and lower limits.
// If set, must be a sub-interval of the interval [ - Math.PI, Math.PI ].
this.minAzimuthAngle = -Infinity; // radians
this.maxAzimuthAngle = Infinity; // radians
// Set to true to enable damping (inertia)
// If damping is enabled, you must call controls.update() in your animation loop
this.enableDamping = false;
this.dampingFactor = 0.25;
// This option actually enables dollying in and out; left as "zoom" for backwards compatibility.
// Set to false to disable zooming
this.enableZoom = true;
this.zoomSpeed = 1.0;
// Set to false to disable rotating
this.enableRotate = true;
this.rotateSpeed = 1.0;
// Set to false to disable panning
this.enablePan = true;
this.panSpeed = 1.0;
this.screenSpacePanning = false; // if true, pan in screen-space
this.keyPanSpeed = 7.0; // pixels moved per arrow key push
// Set to true to automatically rotate around the target
// If auto-rotate is enabled, you must call controls.update() in your animation loop
this.autoRotate = false;
this.autoRotateSpeed = 2.0; // 30 seconds per round when fps is 60
// Set to false to disable use of the keys
this.enableKeys = true;
// The four arrow keys
this.keys = { LEFT: 37, UP: 38, RIGHT: 39, BOTTOM: 40 };
// Mouse buttons
this.mouseButtons = { LEFT: MOUSE.LEFT, MIDDLE: MOUSE.MIDDLE, RIGHT: MOUSE.RIGHT };
// for reset
this.target0 = this.target.clone();
this.position0 = this.object.position.clone();
this.zoom0 = this.object.zoom;
//
// public methods
//
this.getPolarAngle = function () {
return spherical.phi;
};
this.getAzimuthalAngle = function () {
return spherical.theta;
};
this.saveState = function () {
scope.target0.copy(scope.target);
scope.position0.copy(scope.object.position);
scope.zoom0 = scope.object.zoom;
};
this.reset = function () {
scope.target.copy(scope.target0);
scope.object.position.copy(scope.position0);
scope.object.zoom = scope.zoom0;
scope.object.updateProjectionMatrix();
scope.dispatchEvent(changeEvent);
scope.update();
state = STATE.NONE;
};
// this method is exposed, but perhaps it would be better if we can make it private...
this.update = (function () {
const offset = new Vector3();
// so camera.up is the orbit axis
const quat = new Quaternion().setFromUnitVectors(object.up, new Vector3(0, 1, 0));
const quatInverse = quat.clone().inverse();
const lastPosition = new Vector3();
const lastQuaternion = new Quaternion();
return function update() {
const position = scope.object.position;
offset.copy(position).sub(scope.target);
// rotate offset to "y-axis-is-up" space
offset.applyQuaternion(quat);
// angle from z-axis around y-axis
spherical.setFromVector3(offset);
if (scope.autoRotate && state === STATE.NONE) {
rotateLeft(getAutoRotationAngle());
}
spherical.theta += sphericalDelta.theta;
spherical.phi += sphericalDelta.phi;
// restrict theta to be between desired limits
spherical.theta = Math.max(scope.minAzimuthAngle, Math.min(scope.maxAzimuthAngle, spherical.theta));
// restrict phi to be between desired limits
spherical.phi = Math.max(scope.minPolarAngle, Math.min(scope.maxPolarAngle, spherical.phi));
spherical.makeSafe();
spherical.radius *= scale;
// restrict radius to be between desired limits
spherical.radius = Math.max(scope.minDistance, Math.min(scope.maxDistance, spherical.radius));
// move target to panned location
scope.target.add(panOffset);
offset.setFromSpherical(spherical);
// rotate offset back to "camera-up-vector-is-up" space
offset.applyQuaternion(quatInverse);
position.copy(scope.target).add(offset);
scope.object.lookAt(scope.target);
if (scope.enableDamping === true) {
sphericalDelta.theta *= (1 - scope.dampingFactor);
sphericalDelta.phi *= (1 - scope.dampingFactor);
panOffset.multiplyScalar(1 - scope.dampingFactor);
} else {
sphericalDelta.set(0, 0, 0);
panOffset.set(0, 0, 0);
}
scale = 1;
// update condition is:
// min(camera displacement, camera rotation in radians)^2 > EPS
// using small-angle approximation cos(x/2) = 1 - x^2 / 8
if (zoomChanged
|| lastPosition.distanceToSquared(scope.object.position) > EPS
|| 8 * (1 - lastQuaternion.dot(scope.object.quaternion)) > EPS) {
scope.dispatchEvent(changeEvent);
lastPosition.copy(scope.object.position);
lastQuaternion.copy(scope.object.quaternion);
zoomChanged = false;
return true;
}
return false;
};
}());
this.dispose = function () {
scope.domElement.removeEventListener('contextmenu', onContextMenu, false);
scope.domElement.removeEventListener('mousedown', onMouseDown, false);
scope.domElement.removeEventListener('wheel', onMouseWheel, false);
scope.domElement.removeEventListener('touchstart', onTouchStart, false);
scope.domElement.removeEventListener('touchend', onTouchEnd, false);
scope.domElement.removeEventListener('touchmove', onTouchMove, false);
document.removeEventListener('mousemove', onMouseMove, false);
document.removeEventListener('mouseup', onMouseUp, false);
window.removeEventListener('keydown', onKeyDown, false);
// scope.dispatchEvent( { type: 'dispose' } ); // should this be added here?
};
//
// internals
//
var scope = this;
var changeEvent = { type: 'change' };
const startEvent = { type: 'start' };
const endEvent = { type: 'end' };
var STATE = { NONE: -1, ROTATE: 0, DOLLY: 1, PAN: 2, TOUCH_ROTATE: 3, TOUCH_DOLLY_PAN: 4 };
var state = STATE.NONE;
var EPS = 0.000001;
// current position in spherical coordinates
var spherical = new Spherical();
var sphericalDelta = new Spherical();
var scale = 1;
var panOffset = new Vector3();
var zoomChanged = false;
const rotateStart = new Vector2();
const rotateEnd = new Vector2();
const rotateDelta = new Vector2();
const panStart = new Vector2();
const panEnd = new Vector2();
const panDelta = new Vector2();
const dollyStart = new Vector2();
const dollyEnd = new Vector2();
const dollyDelta = new Vector2();
function getAutoRotationAngle() {
return 2 * Math.PI / 60 / 60 * scope.autoRotateSpeed;
}
function getZoomScale() {
return Math.pow(0.95, scope.zoomSpeed);
}
function rotateLeft(angle) {
sphericalDelta.theta -= angle;
}
function rotateUp(angle) {
sphericalDelta.phi -= angle;
}
const panLeft = (function () {
const v = new Vector3();
return function panLeft(distance, objectMatrix) {
v.setFromMatrixColumn(objectMatrix, 0); // get X column of objectMatrix
v.multiplyScalar(-distance);
panOffset.add(v);
};
}());
const panUp = (function () {
const v = new Vector3();
return function panUp(distance, objectMatrix) {
if (scope.screenSpacePanning === true) {
v.setFromMatrixColumn(objectMatrix, 1);
} else {
v.setFromMatrixColumn(objectMatrix, 0);
v.crossVectors(scope.object.up, v);
}
v.multiplyScalar(distance);
panOffset.add(v);
};
}());
// deltaX and deltaY are in pixels; right and down are positive
const pan = (function () {
const offset = new Vector3();
return function pan(deltaX, deltaY) {
const element = scope.domElement === document ? scope.domElement.body : scope.domElement;
if (scope.object.isPerspectiveCamera) {
// perspective
const position = scope.object.position;
offset.copy(position).sub(scope.target);
let targetDistance = offset.length();
// half of the fov is center to top of screen
targetDistance *= Math.tan((scope.object.fov / 2) * Math.PI / 180.0);
// we use only clientHeight here so aspect ratio does not distort speed
panLeft(2 * deltaX * targetDistance / element.clientHeight, scope.object.matrix);
panUp(2 * deltaY * targetDistance / element.clientHeight, scope.object.matrix);
} else if (scope.object.isOrthographicCamera) {
// orthographic
panLeft(deltaX * (scope.object.right - scope.object.left) / scope.object.zoom / element.clientWidth,
scope.object.matrix);
panUp(deltaY * (scope.object.top - scope.object.bottom) / scope.object.zoom / element.clientHeight, scope
.object.matrix);
} else {
// camera neither orthographic nor perspective
scope.enablePan = false;
}
};
}());
function dollyIn(dollyScale) {
if (scope.object.isPerspectiveCamera) {
scale /= dollyScale;
} else if (scope.object.isOrthographicCamera) {
scope.object.zoom = Math.max(scope.minZoom, Math.min(scope.maxZoom, scope.object.zoom * dollyScale));
scope.object.updateProjectionMatrix();
zoomChanged = true;
} else {
scope.enableZoom = false;
}
}
function dollyOut(dollyScale) {
if (scope.object.isPerspectiveCamera) {
scale *= dollyScale;
} else if (scope.object.isOrthographicCamera) {
scope.object.zoom = Math.max(scope.minZoom, Math.min(scope.maxZoom, scope.object.zoom / dollyScale));
scope.object.updateProjectionMatrix();
zoomChanged = true;
} else {
scope.enableZoom = false;
}
}
//
// event callbacks - update the object state
//
function handleMouseDownRotate(event) {
// console.log( 'handleMouseDownRotate' );
rotateStart.set(event.clientX, event.clientY);
}
function handleMouseDownDolly(event) {
// console.log( 'handleMouseDownDolly' );
dollyStart.set(event.clientX, event.clientY);
}
function handleMouseDownPan(event) {
// console.log( 'handleMouseDownPan' );
panStart.set(event.clientX, event.clientY);
}
function handleMouseMoveRotate(event) {
// console.log( 'handleMouseMoveRotate' );
rotateEnd.set(event.clientX, event.clientY);
rotateDelta.subVectors(rotateEnd, rotateStart).multiplyScalar(scope.rotateSpeed);
const element = scope.domElement === document ? scope.domElement.body : scope.domElement;
rotateLeft(2 * Math.PI * rotateDelta.x / element.clientHeight); // yes, height
rotateUp(2 * Math.PI * rotateDelta.y / element.clientHeight);
rotateStart.copy(rotateEnd);
scope.update();
}
function handleMouseMoveDolly(event) {
// console.log( 'handleMouseMoveDolly' );
dollyEnd.set(event.clientX, event.clientY);
dollyDelta.subVectors(dollyEnd, dollyStart);
if (dollyDelta.y > 0) {
dollyIn(getZoomScale());
} else if (dollyDelta.y < 0) {
dollyOut(getZoomScale());
}
dollyStart.copy(dollyEnd);
scope.update();
}
function handleMouseMovePan(event) {
// console.log( 'handleMouseMovePan' );
panEnd.set(event.clientX, event.clientY);
panDelta.subVectors(panEnd, panStart).multiplyScalar(scope.panSpeed);
pan(panDelta.x, panDelta.y);
panStart.copy(panEnd);
scope.update();
}
function handleMouseUp(event) {
// console.log( 'handleMouseUp' );
}
function handleMouseWheel(event) {
// console.log( 'handleMouseWheel' );
if (event.deltaY < 0) {
dollyOut(getZoomScale());
} else if (event.deltaY > 0) {
dollyIn(getZoomScale());
}
scope.update();
}
function handleKeyDown(event) {
// console.log( 'handleKeyDown' );
switch (event.keyCode) {
case scope.keys.UP:
pan(0, scope.keyPanSpeed);
scope.update();
break;
case scope.keys.BOTTOM:
pan(0, -scope.keyPanSpeed);
scope.update();
break;
case scope.keys.LEFT:
pan(scope.keyPanSpeed, 0);
scope.update();
break;
case scope.keys.RIGHT:
pan(-scope.keyPanSpeed, 0);
scope.update();
break;
}
}
function handleTouchStartRotate(event) {
// console.log( 'handleTouchStartRotate' );
rotateStart.set(event.touches[0].pageX, event.touches[0].pageY);
}
function handleTouchStartDollyPan(event) {
// console.log( 'handleTouchStartDollyPan' );
if (scope.enableZoom) {
const dx = event.touches[0].pageX - event.touches[1].pageX;
const dy = event.touches[0].pageY - event.touches[1].pageY;
const distance = Math.sqrt(dx * dx + dy * dy);
dollyStart.set(0, distance);
}
if (scope.enablePan) {
const x = 0.5 * (event.touches[0].pageX + event.touches[1].pageX);
const y = 0.5 * (event.touches[0].pageY + event.touches[1].pageY);
panStart.set(x, y);
}
}
function handleTouchMoveRotate(event) {
// console.log( 'handleTouchMoveRotate' );
rotateEnd.set(event.touches[0].pageX, event.touches[0].pageY);
rotateDelta.subVectors(rotateEnd, rotateStart).multiplyScalar(scope.rotateSpeed);
const element = scope.domElement === document ? scope.domElement.body : scope.domElement;
rotateLeft(2 * Math.PI * rotateDelta.x / element.clientHeight); // yes, height
rotateUp(2 * Math.PI * rotateDelta.y / element.clientHeight);
rotateStart.copy(rotateEnd);
scope.update();
}
function handleTouchMoveDollyPan(event) {
// console.log( 'handleTouchMoveDollyPan' );
if (scope.enableZoom) {
const dx = event.touches[0].pageX - event.touches[1].pageX;
const dy = event.touches[0].pageY - event.touches[1].pageY;
const distance = Math.sqrt(dx * dx + dy * dy);
dollyEnd.set(0, distance);
dollyDelta.set(0, Math.pow(dollyEnd.y / dollyStart.y, scope.zoomSpeed));
dollyIn(dollyDelta.y);
dollyStart.copy(dollyEnd);
}
if (scope.enablePan) {
const x = 0.5 * (event.touches[0].pageX + event.touches[1].pageX);
const y = 0.5 * (event.touches[0].pageY + event.touches[1].pageY);
panEnd.set(x, y);
panDelta.subVectors(panEnd, panStart).multiplyScalar(scope.panSpeed);
pan(panDelta.x, panDelta.y);
panStart.copy(panEnd);
}
scope.update();
}
function handleTouchEnd(event) {
// console.log( 'handleTouchEnd' );
}
//
// event handlers - FSM: listen for events and reset state
//
function onMouseDown(event) {
if (scope.enabled === false) return;
event.preventDefault();
switch (event.button) {
case scope.mouseButtons.LEFT:
if (event.ctrlKey || event.metaKey) {
if (scope.enablePan === false) return;
handleMouseDownPan(event);
state = STATE.PAN;
} else {
if (scope.enableRotate === false) return;
handleMouseDownRotate(event);
state = STATE.ROTATE;
}
break;
case scope.mouseButtons.MIDDLE:
if (scope.enableZoom === false) return;
handleMouseDownDolly(event);
state = STATE.DOLLY;
break;
case scope.mouseButtons.RIGHT:
if (scope.enablePan === false) return;
handleMouseDownPan(event);
state = STATE.PAN;
break;
}
if (state !== STATE.NONE) {
document.addEventListener('mousemove', onMouseMove, false);
document.addEventListener('mouseup', onMouseUp, false);
scope.dispatchEvent(startEvent);
}
}
function onMouseMove(event) {
if (scope.enabled === false) return;
event.preventDefault();
switch (state) {
case STATE.ROTATE:
if (scope.enableRotate === false) return;
handleMouseMoveRotate(event);
break;
case STATE.DOLLY:
if (scope.enableZoom === false) return;
handleMouseMoveDolly(event);
break;
case STATE.PAN:
if (scope.enablePan === false) return;
handleMouseMovePan(event);
break;
}
}
function onMouseUp(event) {
if (scope.enabled === false) return;
handleMouseUp(event);
document.removeEventListener('mousemove', onMouseMove, false);
document.removeEventListener('mouseup', onMouseUp, false);
scope.dispatchEvent(endEvent);
state = STATE.NONE;
}
function onMouseWheel(event) {
if (scope.enabled === false || scope.enableZoom === false || (state !== STATE.NONE && state !== STATE.ROTATE)) return;
event.preventDefault();
event.stopPropagation();
scope.dispatchEvent(startEvent);
handleMouseWheel(event);
scope.dispatchEvent(endEvent);
}
function onKeyDown(event) {
if (scope.enabled === false || scope.enableKeys === false || scope.enablePan === false) return;
handleKeyDown(event);
}
function onTouchStart(event) {
if (scope.enabled === false) return;
event.preventDefault();
switch (event.touches.length) {
case 1: // one-fingered touch: rotate
if (scope.enableRotate === false) return;
handleTouchStartRotate(event);
state = STATE.TOUCH_ROTATE;
break;
case 2: // two-fingered touch: dolly-pan
if (scope.enableZoom === false && scope.enablePan === false) return;
handleTouchStartDollyPan(event);
state = STATE.TOUCH_DOLLY_PAN;
break;
default:
state = STATE.NONE;
}
if (state !== STATE.NONE) {
scope.dispatchEvent(startEvent);
}
}
function onTouchMove(event) {
if (scope.enabled === false) return;
event.preventDefault();
event.stopPropagation();
switch (event.touches.length) {
case 1: // one-fingered touch: rotate
if (scope.enableRotate === false) return;
if (state !== STATE.TOUCH_ROTATE) return; // is this needed?
handleTouchMoveRotate(event);
break;
case 2: // two-fingered touch: dolly-pan
if (scope.enableZoom === false && scope.enablePan === false) return;
if (state !== STATE.TOUCH_DOLLY_PAN) return; // is this needed?
handleTouchMoveDollyPan(event);
break;
default:
state = STATE.NONE;
}
}
function onTouchEnd(event) {
if (scope.enabled === false) return;
handleTouchEnd(event);
scope.dispatchEvent(endEvent);
state = STATE.NONE;
}
function onContextMenu(event) {
if (scope.enabled === false) return;
event.preventDefault();
}
//
scope.domElement.addEventListener('contextmenu', onContextMenu, false);
scope.domElement.addEventListener('mousedown', onMouseDown, false);
scope.domElement.addEventListener('wheel', onMouseWheel, false);
scope.domElement.addEventListener('touchstart', onTouchStart, false);
scope.domElement.addEventListener('touchend', onTouchEnd, false);
scope.domElement.addEventListener('touchmove', onTouchMove, false);
window.addEventListener('keydown', onKeyDown, false);
// force an update at start
this.update();
};
OrbitControls.prototype = Object.create(EventDispatcher.prototype);
OrbitControls.prototype.constructor = OrbitControls;
Object.defineProperties(OrbitControls.prototype, {
center: {
get() {
return this.target;
},
},
// backward compatibility
noZoom: {
get() {
return !this.enableZoom;
},
set(value) {
this.enableZoom = !value;
},
},
noRotate: {
get() {
return !this.enableRotate;
},
set(value) {
this.enableRotate = !value;
},
},
noPan: {
get() {
return !this.enablePan;
},
set(value) {
this.enablePan = !value;
},
},
noKeys: {
get() {
return !this.enableKeys;
},
set(value) {
this.enableKeys = !value;
},
},
staticMoving: {
get() {
return !this.enableDamping;
},
set(value) {
this.enableDamping = !value;
},
},
dynamicDampingFactor: {
get() {
return this.dampingFactor;
},
set(value) {
this.dampingFactor = value;
},
},
});
export { OrbitControls };

File diff suppressed because one or more lines are too long

85
demo/helpers/webrtc.js Normal file
View File

@ -0,0 +1,85 @@
const debug = true;
async function log(...msg) {
if (debug) {
const dt = new Date();
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
console.log(ts, 'webrtc', ...msg); // eslint-disable-line no-console
}
}
/**
* helper implementation of webrtc
* performs:
* - discovery
* - handshake
* - connct to webrtc stream
* - assign webrtc stream to video element
*
* for development purposes i'm using test webrtc server that reads rtsp stream from a security camera:
* <https://github.com/vladmandic/stream-rtsp>
*
* @param {string} server
* @param {string} streamName
* @param {HTMLVideoElement} elementName
* @return {promise}
*/
async function webRTC(server, streamName, elementName) {
const suuid = streamName;
log('client starting');
log(`server: ${server} stream: ${suuid}`);
const stream = new MediaStream();
const connection = new RTCPeerConnection();
connection.oniceconnectionstatechange = () => log('connection', connection.iceConnectionState);
connection.onnegotiationneeded = async () => {
let offer;
if (connection.localDescription) {
offer = await connection.createOffer();
await connection.setLocalDescription(offer);
const res = await fetch(`${server}/stream/receiver/${suuid}`, {
method: 'POST',
headers: { 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8' },
body: new URLSearchParams({
suuid: `${suuid}`,
data: `${btoa(connection.localDescription.sdp || '')}`,
}),
});
}
const data = res && res.ok ? await res.text() : '';
if (data.length === 0 || !offer) {
log('cannot connect:', server);
} else {
connection.setRemoteDescription(new RTCSessionDescription({
type: 'answer',
sdp: atob(data),
}));
log('negotiation start:', offer);
}
};
connection.ontrack = (event) => {
stream.addTrack(event.track);
const video = (typeof elementName === 'string') ? document.getElementById(elementName) : elementName;
if (video instanceof HTMLVideoElement) video.srcObject = stream;
else log('element is not a video element:', elementName);
// video.onloadeddata = async () => log('resolution:', video.videoWidth, video.videoHeight);
log('received track:', event.track);
};
const res = await fetch(`${server}/stream/codec/${suuid}`);
const streams = res && res.ok ? await res.json() : [];
if (streams.length === 0) log('received no streams');
else log('received streams:', streams);
for (const s of streams) connection.addTransceiver(s.Type, { direction: 'sendrecv' });
const channel = connection.createDataChannel(suuid, { maxRetransmits: 10 });
channel.onmessage = (e) => log('channel message:', channel.label, 'payload', e.data);
channel.onerror = (e) => log('channel error:', channel.label, 'payload', e);
// channel.onbufferedamountlow = (e) => log('channel buffering:', channel.label, 'payload', e);
channel.onclose = () => log('channel close', channel.label);
channel.onopen = () => {
log('channel open', channel.label);
setInterval(() => channel.send('ping'), 1000); // send ping becouse PION doesn't handle RTCSessionDescription.close()
};
}
export default webRTC;

17
demo/icons.css Normal file

File diff suppressed because one or more lines are too long

133
demo/index-pwa.js Normal file
View File

@ -0,0 +1,133 @@
/**
* PWA Service Worker for Human main demo
*/
/* eslint-disable no-restricted-globals */
/// <reference lib="webworker" />
const skipCaching = false;
const cacheName = 'Human';
const cacheFiles = ['/favicon.ico', 'manifest.webmanifest']; // assets and models are cached on first access
let cacheModels = true; // *.bin; *.json
let cacheWASM = true; // *.wasm
let cacheOther = false; // *
let listening = false;
const stats = { hit: 0, miss: 0 };
const log = (...msg) => {
const dt = new Date();
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
console.log(ts, 'pwa', ...msg); // eslint-disable-line no-console
};
async function updateCached(req) {
fetch(req)
.then((update) => {
// update cache if request is ok
if (update.ok) {
caches
.open(cacheName)
.then((cache) => cache.put(req, update))
.catch((err) => log('cache update error', err)); // eslint-disable-line promise/no-nesting
}
return true;
})
.catch((err) => {
log('fetch error', err);
return false;
});
}
async function getCached(evt) {
// just fetch
if (skipCaching) return fetch(evt.request);
// get from cache or fetch if not in cache
let found = await caches.match(evt.request);
if (found && found.ok) {
stats.hit += 1;
} else {
stats.miss += 1;
found = await fetch(evt.request);
}
// if still don't have it, return offline page
if (!found || !found.ok) {
found = await caches.match('offline.html');
}
// update cache in the background
if (found && found.type === 'basic' && found.ok) {
const uri = new URL(evt.request.url);
if (uri.pathname.endsWith('.bin') || uri.pathname.endsWith('.json')) {
if (cacheModels) updateCached(evt.request);
} else if (uri.pathname.endsWith('.wasm')) {
if (cacheWASM) updateCached(evt.request);
} else if (cacheOther) {
updateCached(evt.request);
}
}
return found;
}
function cacheInit() {
caches.open(cacheName)
.then((cache) => cache.addAll(cacheFiles)
.then( // eslint-disable-line promise/no-nesting
() => log('cache refresh:', cacheFiles.length, 'files'),
(err) => log('cache error', err),
))
.catch(() => log('cache error'));
}
if (!listening) {
// get messages from main app to update configuration
self.addEventListener('message', (evt) => {
log('event message:', evt.data);
switch (evt.data.key) {
case 'cacheModels': cacheModels = evt.data.val; break;
case 'cacheWASM': cacheWASM = evt.data.val; break;
case 'cacheOther': cacheOther = evt.data.val; break;
default:
}
});
self.addEventListener('install', (evt) => {
log('install');
self.skipWaiting();
evt.waitUntil(cacheInit);
});
self.addEventListener('activate', (evt) => {
log('activate');
evt.waitUntil(self.clients.claim());
});
self.addEventListener('fetch', (evt) => {
const uri = new URL(evt.request.url);
// if (uri.pathname === '/') { log('cache skip /', evt.request); return; } // skip root access requests
if (evt.request.cache === 'only-if-cached' && evt.request.mode !== 'same-origin') return; // required due to chrome bug
if (uri.origin !== self.location.origin) return; // skip non-local requests
if (evt.request.method !== 'GET') return; // only cache get requests
if (evt.request.url.includes('/api/')) return; // don't cache api requests, failures are handled at the time of call
const response = getCached(evt);
if (response) evt.respondWith(response);
else log('fetch response missing');
});
// only trigger controllerchange once
let refreshed = false;
self.addEventListener('controllerchange', (evt) => {
log(`PWA: ${evt.type}`);
if (refreshed) return;
refreshed = true;
self.location.reload();
});
listening = true;
}

View File

@ -1,29 +1,37 @@
import Human from '../dist/human.esm.js';
/**
* Web worker used by main demo app
* Loaded from index.js
*/
/// <reference lib="webworker"/>
// load Human using IIFE script as Chome Mobile does not support Modules as Workers
self.importScripts('../dist/human.js'); // eslint-disable-line no-restricted-globals
let busy = false;
const human = new Human();
// eslint-disable-next-line new-cap, no-undef
const human = new Human.default();
function log(...msg) {
const dt = new Date();
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
// eslint-disable-next-line no-console
if (msg) console.log(ts, 'Human:', ...msg);
}
onmessage = async (msg) => {
onmessage = async (msg) => { // receive message from main thread
if (busy) return;
busy = true;
// received from index.js using:
// worker.postMessage({ image: image.data.buffer, width: canvas.width, height: canvas.height, config }, [image.data.buffer]);
const image = new ImageData(new Uint8ClampedArray(msg.data.image), msg.data.width, msg.data.height);
let result = {};
try {
result = await human.detect(image, msg.data.userConfig);
} catch (err) {
result.error = err.message;
log('worker thread error:', err.message);
result = await human.detect(image, msg.data.userConfig);
result.tensors = human.tf.engine().state.numTensors; // append to result object so main thread get info
result.backend = human.tf.getBackend(); // append to result object so main thread get info
if (result.canvas) { // convert canvas to imageData and send it by reference
const canvas = new OffscreenCanvas(result.canvas.width, result.canvas.height);
const ctx = canvas.getContext('2d');
if (ctx) ctx.drawImage(result.canvas, 0, 0);
const img = ctx ? ctx.getImageData(0, 0, result.canvas.width, result.canvas.height) : null;
result.canvas = null; // must strip original canvas from return value as it cannot be transfered from worker thread
if (img) postMessage({ result, image: img.data.buffer, width: msg.data.width, height: msg.data.height }, [img.data.buffer]);
else postMessage({ result }); // send message back to main thread with canvas
} else {
postMessage({ result }); // send message back to main thread without canvas
}
// must strip canvas from return value as it cannot be transfered from worker thread
if (result.canvas) result.canvas = null;
postMessage({ result });
busy = false;
};

View File

@ -1,41 +1,42 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Human</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="viewport" content="width=device-width">
<meta name="viewport" content="width=device-width" id="viewport">
<meta name="keywords" content="Human">
<meta name="application-name" content="Human">
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="theme-color" content="#000000">
<link rel="manifest" href="./manifest.webmanifest">
<link rel="shortcut icon" href="../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../assets/icon.png">
<!-- <script src="../node_modules/@tensorflow/tfjs/dist/tf.es2017.js"></script> -->
<link rel="stylesheet" type="text/css" href="./icons.css">
<script src="./index.js" type="module"></script>
<style>
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../assets/lato-light.woff2') }
@font-face { font-family: 'FA'; font-display: swap; font-style: normal; font-weight: 900; src: local('FA'), url('../assets/fa-solid-900.woff2'); }
html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
body { margin: 0; background: black; color: white; overflow-x: hidden }
body { margin: 0; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
body::-webkit-scrollbar { display: none; }
hr { width: 100%; }
.play { position: absolute; width: 250px; height: 250px; z-index: 9; bottom: 15%; left: 50%; margin-left: -125px; display: none; }
.play { position: absolute; width: 256px; height: 256px; z-index: 9; bottom: 15%; left: 50%; margin-left: -125px; display: none; filter: grayscale(1); }
.play:hover { filter: grayscale(0); }
.btn-background { fill:grey; cursor: pointer; opacity: 0.6; }
.btn-background:hover { opacity: 1; }
.btn-foreground { fill:white; cursor: pointer; opacity: 0.8; }
.btn-foreground:hover { opacity: 1; }
.status { position: absolute; width: 100vw; bottom: 10%; text-align: center; font-size: 4rem; font-weight: 100; text-shadow: 2px 2px darkslategrey; }
.status { position: absolute; width: 100vw; bottom: 10%; text-align: center; font-size: 3rem; font-weight: 100; text-shadow: 2px 2px #303030; }
.thumbnail { margin: 8px; box-shadow: 0 0 4px 4px dimgrey; }
.thumbnail:hover { box-shadow: 0 0 8px 8px dimgrey; filter: grayscale(1); }
.log { position: absolute; bottom: 0; margin: 0.4rem; font-size: 0.9rem; }
.menubar { width: 100vw; background: darkslategray; display: flex; justify-content: space-evenly; text-align: center; padding: 8px; cursor: pointer; }
.log { position: absolute; bottom: 0; margin: 0.4rem 0.4rem 0 0.4rem; font-size: 0.9rem; }
.menubar { width: 100vw; background: #303030; display: flex; justify-content: space-evenly; text-align: center; padding: 8px; cursor: pointer; }
.samples-container { display: flex; flex-wrap: wrap; }
.video { display: none; }
.canvas { margin: 0 auto; }
.bench { position: absolute; right: 0; bottom: 0; }
.compare-image { width: 200px; position: absolute; top: 150px; left: 30px; box-shadow: 0 0 2px 2px black; background: black; }
.loader { width: 300px; height: 300px; border: 3px solid transparent; border-radius: 50%; border-top: 4px solid #f15e41; animation: spin 4s linear infinite; position: absolute; bottom: 25%; left: 50%; margin-left: -150px; z-index: 15; }
.compare-image { width: 200px; position: absolute; top: 150px; left: 30px; box-shadow: 0 0 2px 2px black; background: black; display: none; }
.loader { width: 300px; height: 300px; border: 3px solid transparent; border-radius: 50%; border-top: 4px solid #f15e41; animation: spin 4s linear infinite; position: absolute; bottom: 15%; left: 50%; margin-left: -150px; z-index: 15; }
.loader::before, .loader::after { content: ""; position: absolute; top: 6px; bottom: 6px; left: 6px; right: 6px; border-radius: 50%; border: 4px solid transparent; }
.loader::before { border-top-color: #bad375; animation: 3s spin linear infinite; }
.loader::after { border-top-color: #26a9e0; animation: spin 1.5s linear infinite; }
@ -60,38 +61,58 @@
.button-model::before { content: "\f2c2"; }
.button-start::before { content: "\f144"; }
.button-stop::before { content: "\f28b"; }
.icon { width: 180px; text-align: -webkit-center; text-align: -moz-center; filter: grayscale(1); }
.icon:hover { background: #505050; filter: grayscale(0); }
.hint { opacity: 0; transition-duration: 0.5s; transition-property: opacity; font-style: italic; position: fixed; top: 5rem; padding: 8px; margin: 8px; box-shadow: 0 0 2px 2px #303030; }
.input-file { align-self: center; width: 5rem; }
.results { position: absolute; left: 0; top: 5rem; background: #303030; width: 20rem; height: 90%; font-size: 0.8rem; overflow-y: auto; display: none }
.results::-webkit-scrollbar { background-color: #303030; }
.results::-webkit-scrollbar-thumb { background: black; border-radius: 10px; }
.json-line { margin: 4px 0; display: flex; justify-content: flex-start; }
.json { margin-right: 8px; margin-left: 8px; }
.json-type { color: lightyellow; }
.json-key { color: white; }
.json-index { color: lightcoral; }
.json-value { margin-left: 20px; }
.json-number { color: lightgreen; }
.json-boolean { color: lightyellow; }
.json-string { color: lightblue; }
.json-size { color: gray; }
.hide { display: none; }
.fas { display: inline-block; width: 0; height: 0; border-style: solid; }
.fa-caret-down { border-width: 10px 8px 0 8px; border-color: white transparent }
.fa-caret-right { border-width: 10px 0 8px 10px; border-color: transparent transparent transparent white; }
</style>
</head>
<body>
<div id="play" class="play">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512">
<path d="M256 8C119 8 8 119 8 256s111 248 248 248 248-111 248-248S393 8 256 8zm115.7 272l-176 101c-15.8 8.8-35.7-2.5-35.7-21V152c0-18.4 19.8-29.8 35.7-21l176 107c16.4 9.2 16.4 32.9 0 42z" class="btn-background"/>
<path d="M371.7 280l-176 101c-15.8 8.8-35.7-2.5-35.7-21V152c0-18.4 19.8-29.8 35.7-21l176 107c16.4 9.2 16.4 32.9 0 42z" class="btn-foreground"/>
</svg>
</div>
<div id="play" class="play icon-play"></div>
<div id="background">
<div class='wave one'></div>
<div class='wave two'></div>
<div class='wave three'></div>
<div class="wave one"></div>
<div class="wave two"></div>
<div class="wave three"></div>
</div>
<div id="loader" class="loader"></div>
<div id="status" class="status"></div>
<div id="menubar" class="menubar">
<span class="button button-display" id="btnDisplay">Display<br>Options</span>
<span class="button button-image" id="btnImage">Image<br>Processing</span>
<span class="button button-process" id="btnProcess">Model<br>Processing</span>
<span class="button button-model" id="btnModel">Model<br>Selection</span>
<span class="button button-start" id="btnStart">Start<br>Video</span>
<div id="btnDisplay" class="icon"><div class="icon-binoculars"> </div>display</div>
<div id="btnImage" class="icon"><div class="icon-brush"></div>input</div>
<div id="btnProcess" class="icon"><div class="icon-stats"></div>options</div>
<div id="btnModel" class="icon"><div class="icon-games"></div>models</div>
<div id="btnStart" class="icon"><div class="icon-webcam"></div><span id="btnStartText">start video</span></div>
</div>
<div id="media">
<canvas id="canvas" class="canvas"></canvas>
<video id="video" playsinline class="video"></video>
</div>
<div id="compare-container" style="display: none" class="compare-image">
<div id="compare-container" class="compare-image">
<canvas id="compare-canvas" width="200" height="200"></canvas>
<div id="similarity"></div>
</div>
<div id="samples-container" class="samples-container"></div>
<div id="hint" class="hint"></div>
<div id="log" class="log"></div>
<div id="results" class="results"></div>
</body>
</html>

File diff suppressed because it is too large Load Diff

View File

@ -1,10 +1,10 @@
{
"name": "Human Library",
"short_name": "Human",
"icons": [{ "src": "../assets/icon.png", "sizes": "500x484", "type": "image/png", "purpose": "any maskable" }],
"icons": [{ "src": "../assets/icon.png", "sizes": "512x512", "type": "image/png", "purpose": "any maskable" }],
"start_url": "./index.html",
"scope": "/",
"display": "standalone",
"background_color": "#000000",
"theme_color": "#000000"
}
}

View File

@ -0,0 +1,71 @@
# Human Multithreading Demos
- **Browser** demo `multithread` & `worker`
Runs each `human` module in a separate web worker for highest possible performance
- **NodeJS** demo `node-multiprocess` & `node-multiprocess-worker`
Runs multiple parallel `human` by dispaching them to pool of pre-created worker processes
<br><hr><br>
## NodeJS Multi-process Demo
`nodejs/node-multiprocess.js` and `nodejs/node-multiprocess-worker.js`: Demo using NodeJS with CommonJS module
Demo that starts n child worker processes for parallel execution
```shell
node demo/nodejs/node-multiprocess.js
```
<!-- eslint-skip -->
```json
2021-06-01 08:54:19 INFO: @vladmandic/human version 2.0.0
2021-06-01 08:54:19 INFO: User: vlado Platform: linux Arch: x64 Node: v16.0.0
2021-06-01 08:54:19 INFO: Human multi-process test
2021-06-01 08:54:19 STATE: Enumerated images: ./assets 15
2021-06-01 08:54:19 STATE: Main: started worker: 130362
2021-06-01 08:54:19 STATE: Main: started worker: 130363
2021-06-01 08:54:19 STATE: Main: started worker: 130369
2021-06-01 08:54:19 STATE: Main: started worker: 130370
2021-06-01 08:54:20 STATE: Worker: PID: 130370 TensorFlow/JS 3.6.0 Human 2.0.0 Backend: tensorflow
2021-06-01 08:54:20 STATE: Worker: PID: 130362 TensorFlow/JS 3.6.0 Human 2.0.0 Backend: tensorflow
2021-06-01 08:54:20 STATE: Worker: PID: 130369 TensorFlow/JS 3.6.0 Human 2.0.0 Backend: tensorflow
2021-06-01 08:54:20 STATE: Worker: PID: 130363 TensorFlow/JS 3.6.0 Human 2.0.0 Backend: tensorflow
2021-06-01 08:54:21 STATE: Main: dispatching to worker: 130370
2021-06-01 08:54:21 INFO: Latency: worker initializtion: 1348 message round trip: 0
2021-06-01 08:54:21 DATA: Worker received message: 130370 { test: true }
2021-06-01 08:54:21 STATE: Main: dispatching to worker: 130362
2021-06-01 08:54:21 DATA: Worker received message: 130362 { image: 'samples/ai-face.jpg' }
2021-06-01 08:54:21 DATA: Worker received message: 130370 { image: 'samples/ai-body.jpg' }
2021-06-01 08:54:21 STATE: Main: dispatching to worker: 130369
2021-06-01 08:54:21 STATE: Main: dispatching to worker: 130363
2021-06-01 08:54:21 DATA: Worker received message: 130369 { image: 'assets/human-sample-upper.jpg' }
2021-06-01 08:54:21 DATA: Worker received message: 130363 { image: 'assets/sample-me.jpg' }
2021-06-01 08:54:24 DATA: Main: worker finished: 130362 detected faces: 1 bodies: 1 hands: 0 objects: 1
2021-06-01 08:54:24 STATE: Main: dispatching to worker: 130362
2021-06-01 08:54:24 DATA: Worker received message: 130362 { image: 'assets/sample1.jpg' }
2021-06-01 08:54:25 DATA: Main: worker finished: 130369 detected faces: 1 bodies: 1 hands: 0 objects: 1
2021-06-01 08:54:25 STATE: Main: dispatching to worker: 130369
2021-06-01 08:54:25 DATA: Main: worker finished: 130370 detected faces: 1 bodies: 1 hands: 0 objects: 1
2021-06-01 08:54:25 STATE: Main: dispatching to worker: 130370
2021-06-01 08:54:25 DATA: Worker received message: 130369 { image: 'assets/sample2.jpg' }
2021-06-01 08:54:25 DATA: Main: worker finished: 130363 detected faces: 1 bodies: 1 hands: 0 objects: 2
2021-06-01 08:54:25 STATE: Main: dispatching to worker: 130363
2021-06-01 08:54:25 DATA: Worker received message: 130370 { image: 'assets/sample3.jpg' }
2021-06-01 08:54:25 DATA: Worker received message: 130363 { image: 'assets/sample4.jpg' }
2021-06-01 08:54:30 DATA: Main: worker finished: 130362 detected faces: 3 bodies: 1 hands: 0 objects: 7
2021-06-01 08:54:30 STATE: Main: dispatching to worker: 130362
2021-06-01 08:54:30 DATA: Worker received message: 130362 { image: 'assets/sample5.jpg' }
2021-06-01 08:54:31 DATA: Main: worker finished: 130369 detected faces: 3 bodies: 1 hands: 0 objects: 5
2021-06-01 08:54:31 STATE: Main: dispatching to worker: 130369
2021-06-01 08:54:31 DATA: Worker received message: 130369 { image: 'assets/sample6.jpg' }
2021-06-01 08:54:31 DATA: Main: worker finished: 130363 detected faces: 4 bodies: 1 hands: 2 objects: 2
2021-06-01 08:54:31 STATE: Main: dispatching to worker: 130363
2021-06-01 08:54:39 STATE: Main: worker exit: 130370 0
2021-06-01 08:54:39 DATA: Main: worker finished: 130362 detected faces: 1 bodies: 1 hands: 0 objects: 1
2021-06-01 08:54:39 DATA: Main: worker finished: 130369 detected faces: 1 bodies: 1 hands: 1 objects: 3
2021-06-01 08:54:39 STATE: Main: worker exit: 130362 0
2021-06-01 08:54:39 STATE: Main: worker exit: 130369 0
2021-06-01 08:54:41 DATA: Main: worker finished: 130363 detected faces: 9 bodies: 1 hands: 0 objects: 10
2021-06-01 08:54:41 STATE: Main: worker exit: 130363 0
2021-06-01 08:54:41 INFO: Processed: 15 images in total: 22006 ms working: 20658 ms average: 1377 ms
```

View File

@ -1,32 +1,33 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Human</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="viewport" content="width=device-width, shrink-to-fit=yes">
<meta name="viewport" content="width=device-width" id="viewport">
<meta name="keywords" content="Human">
<meta name="application-name" content="Human">
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<link rel="manifest" href="./manifest.webmanifest">
<link rel="shortcut icon" href="../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../assets/icon.png">
<script src="./face3d.js" type="module"></script>
<meta name="theme-color" content="#000000">
<link rel="manifest" href="../../manifest.webmanifest">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../../assets/icon.png">
<script src="../multithread/index.js" type="module"></script>
<style>
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../assets/lato-light.woff2') }
@font-face { font-family: 'FA'; font-display: swap; font-style: normal; font-weight: 900; src: local('FA'), url('../assets/fa-solid-900.woff2'); }
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
body { margin: 0; background: black; color: white; overflow-x: hidden; }
body { margin: 0; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
body::-webkit-scrollbar { display: none; }
.status { position: absolute; width: 100vw; bottom: 10%; text-align: center; font-size: 3rem; font-weight: 100; text-shadow: 2px 2px #303030; }
.log { position: absolute; bottom: 0; margin: 0.4rem 0.4rem 0 0.4rem; font-size: 0.9rem; }
.video { display: none; }
.canvas { margin: 0 auto; }
</style>
</head>
<body>
<div id="media">
<canvas id="canvas" class="canvas"></canvas>
<video id="video" playsinline class="video"></video>
<div id="log"></div>
</div>
<div id="status" class="status"></div>
<canvas id="canvas" class="canvas"></canvas>
<video id="video" playsinline class="video"></video>
<div id="log" class="log"></div>
</body>
</html>

264
demo/multithread/index.js Normal file
View File

@ -0,0 +1,264 @@
/**
* Human demo for browsers
*
* @description Demo app that enables all Human modules and runs them in separate worker threads
*
*/
import { Human } from '../../dist/human.esm.js'; // equivalent of @vladmandic/human
import GLBench from '../helpers/gl-bench.js';
const workerJS = '../multithread/worker.js';
const config = {
main: { // processes input and runs gesture analysis
warmup: 'none',
backend: 'webgl',
modelBasePath: '../../models/',
async: false,
filter: { enabled: true },
face: { enabled: false },
object: { enabled: false },
gesture: { enabled: true },
hand: { enabled: false },
body: { enabled: false },
segmentation: { enabled: false },
},
face: { // runs all face models
warmup: 'none',
backend: 'webgl',
modelBasePath: '../../models/',
async: false,
filter: { enabled: false },
face: { enabled: true },
object: { enabled: false },
gesture: { enabled: false },
hand: { enabled: false },
body: { enabled: false },
segmentation: { enabled: false },
},
body: { // runs body model
warmup: 'none',
backend: 'webgl',
modelBasePath: '../../models/',
async: false,
filter: { enabled: false },
face: { enabled: false },
object: { enabled: false },
gesture: { enabled: false },
hand: { enabled: false },
body: { enabled: true },
segmentation: { enabled: false },
},
hand: { // runs hands model
warmup: 'none',
backend: 'webgl',
modelBasePath: '../../models/',
async: false,
filter: { enabled: false },
face: { enabled: false },
object: { enabled: false },
gesture: { enabled: false },
hand: { enabled: true },
body: { enabled: false },
segmentation: { enabled: false },
},
object: { // runs object model
warmup: 'none',
backend: 'webgl',
modelBasePath: '../../models/',
async: false,
filter: { enabled: false },
face: { enabled: false },
object: { enabled: true },
gesture: { enabled: false },
hand: { enabled: false },
body: { enabled: false },
segmentation: { enabled: false },
},
};
let human;
let canvas;
let video;
let bench;
const busy = {
face: false,
hand: false,
body: false,
object: false,
};
const workers = {
/** @type {Worker | null} */
face: null,
/** @type {Worker | null} */
body: null,
/** @type {Worker | null} */
hand: null,
/** @type {Worker | null} */
object: null,
};
const time = {
main: 0,
draw: 0,
face: '[warmup]',
body: '[warmup]',
hand: '[warmup]',
object: '[warmup]',
};
const start = {
main: 0,
draw: 0,
face: 0,
body: 0,
hand: 0,
object: 0,
};
const result = { // initialize empty result object which will be partially filled with results from each thread
performance: {},
hand: [],
body: [],
face: [],
object: [],
};
function log(...msg) {
const dt = new Date();
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
console.log(ts, ...msg); // eslint-disable-line no-console
}
async function drawResults() {
start.draw = human.now();
const interpolated = human.next(result);
await human.draw.all(canvas, interpolated);
time.draw = Math.round(1 + human.now() - start.draw);
const fps = Math.round(10 * 1000 / time.main) / 10;
const draw = Math.round(10 * 1000 / time.draw) / 10;
const div = document.getElementById('log');
if (div) div.innerText = `Human: version ${human.version} | Performance: Main ${time.main}ms Face: ${time.face}ms Body: ${time.body}ms Hand: ${time.hand}ms Object ${time.object}ms | FPS: ${fps} / ${draw}`;
requestAnimationFrame(drawResults);
}
async function receiveMessage(msg) {
result[msg.data.type] = msg.data.result;
busy[msg.data.type] = false;
time[msg.data.type] = Math.round(human.now() - start[msg.data.type]);
}
async function runDetection() {
start.main = human.now();
if (!bench) {
bench = new GLBench(null, { trackGPU: false, chartHz: 20, chartLen: 20 });
bench.begin('human');
}
const ctx = canvas.getContext('2d');
ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
if (!busy.face) {
busy.face = true;
start.face = human.now();
if (workers.face) workers.face.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.face, type: 'face' }, [imageData.data.buffer.slice(0)]);
}
if (!busy.body) {
busy.body = true;
start.body = human.now();
if (workers.body) workers.body.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.body, type: 'body' }, [imageData.data.buffer.slice(0)]);
}
if (!busy.hand) {
busy.hand = true;
start.hand = human.now();
if (workers.hand) workers.hand.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.hand, type: 'hand' }, [imageData.data.buffer.slice(0)]);
}
if (!busy.object) {
busy.object = true;
start.object = human.now();
if (workers.object) workers.object.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.object, type: 'object' }, [imageData.data.buffer.slice(0)]);
}
time.main = Math.round(human.now() - start.main);
bench.nextFrame();
requestAnimationFrame(runDetection);
}
async function setupCamera() {
video = document.getElementById('video');
canvas = document.getElementById('canvas');
const output = document.getElementById('log');
let stream;
const constraints = {
audio: false,
video: {
facingMode: 'user',
resizeMode: 'crop-and-scale',
width: { ideal: document.body.clientWidth },
aspectRatio: document.body.clientWidth / document.body.clientHeight,
},
};
// enumerate devices for diag purposes
navigator.mediaDevices.enumerateDevices()
.then((devices) => log('enumerated devices:', devices))
.catch(() => log('mediaDevices error'));
log('camera constraints', constraints);
try {
stream = await navigator.mediaDevices.getUserMedia(constraints);
} catch (err) {
if (output) output.innerText += `\n${err.name}: ${err.message}`;
log('camera error:', err);
}
if (stream) {
const tracks = stream.getVideoTracks();
log('enumerated viable tracks:', tracks);
const track = stream.getVideoTracks()[0];
const settings = track.getSettings();
log('selected video source:', track, settings);
} else {
log('missing video stream');
}
const promise = !stream || new Promise((resolve) => {
video.onloadeddata = () => {
canvas.style.height = '100vh';
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
video.play();
resolve(true);
};
});
// attach input to video element
if (stream && video) video.srcObject = stream;
return promise;
}
async function startWorkers() {
if (!workers.face) workers.face = new Worker(workerJS);
if (!workers.body) workers.body = new Worker(workerJS);
if (!workers.hand) workers.hand = new Worker(workerJS);
if (!workers.object) workers.object = new Worker(workerJS);
workers.face.onmessage = receiveMessage;
workers.body.onmessage = receiveMessage;
workers.hand.onmessage = receiveMessage;
workers.object.onmessage = receiveMessage;
}
async function main() {
if (typeof Worker === 'undefined' || typeof OffscreenCanvas === 'undefined') {
return;
}
human = new Human(config.main);
const div = document.getElementById('log');
if (div) div.innerText = `Human: version ${human.version}`;
await startWorkers();
await setupCamera();
runDetection();
drawResults();
}
window.onload = main;

View File

@ -0,0 +1,85 @@
/**
* Human demo for NodeJS
*
* Used by node-multiprocess.js as an on-demand started worker process
* Receives messages from parent process and sends results
*/
const fs = require('fs');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// workers actual import tfjs and human modules
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
const Human = require('../../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
let human = null;
const myConfig = {
// backend: 'tensorflow',
modelBasePath: 'file://models/',
debug: false,
async: true,
face: {
enabled: true,
detector: { enabled: true, rotation: false },
mesh: { enabled: true },
iris: { enabled: true },
description: { enabled: true },
emotion: { enabled: true },
},
hand: {
enabled: true,
},
// body: { modelPath: 'blazepose.json', enabled: true },
body: { enabled: true },
object: { enabled: true },
};
// read image from a file and create tensor to be used by human
// this way we don't need any monkey patches
// you can add any pre-proocessing here such as resizing, etc.
async function image(img) {
const buffer = fs.readFileSync(img);
const tensor = human.tf.tidy(() => human.tf.node.decodeImage(buffer).toFloat().expandDims());
return tensor;
}
// actual human detection
async function detect(img) {
const tensor = await image(img);
const result = await human.detect(tensor);
if (process.send) { // check if ipc exists
process.send({ image: img, detected: result }); // send results back to main
process.send({ ready: true }); // send signal back to main that this worker is now idle and ready for next image
}
tf.dispose(tensor);
}
async function main() {
process.on('unhandledRejection', (err) => {
// @ts-ignore // no idea if exception message is compelte
log.error(err?.message || err || 'no error message');
});
// on worker start first initialize message handler so we don't miss any messages
process.on('message', (msg) => {
// if main told worker to exit
if (msg.exit && process.exit) process.exit(); // eslint-disable-line no-process-exit
if (msg.test && process.send) process.send({ test: true });
if (msg.image) detect(msg.image); // if main told worker to process image
log.data('Worker received message:', process.pid, msg); // generic log
});
// create instance of human
human = new Human(myConfig);
// wait until tf is ready
await human.tf.ready();
// pre-load models
log.state('Worker: PID:', process.pid, `TensorFlow/JS ${human.tf.version['tfjs-core']} Human ${human.version} Backend: ${human.tf.getBackend()}`);
await human.load();
// now we're ready, so send message back to main that it knows it can use this worker
if (process.send) process.send({ ready: true });
}
main();

View File

@ -0,0 +1,97 @@
/**
* Human demo for NodeJS
*
* Uses NodeJS fork functionality with inter-processing-messaging
* Starts a pool of worker processes and dispatch work items to each worker when they are available
* Uses node-multiprocess-worker.js for actual processing
*/
const fs = require('fs');
const path = require('path');
const childProcess = require('child_process'); // eslint-disable-line camelcase
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// note that main process does not import human or tfjs at all, it's all done from worker process
const workerFile = 'demo/multithread/node-multiprocess-worker.js';
const imgPathRoot = './samples/in'; // modify to include your sample images
const numWorkers = 4; // how many workers will be started
const workers = []; // this holds worker processes
const images = []; // this holds queue of enumerated images
const t = []; // timers
let numImages;
// trigered by main when worker sends ready message
// if image pool is empty, signal worker to exit otherwise dispatch image to worker and remove image from queue
async function submitDetect(worker) {
if (!t[2]) t[2] = process.hrtime.bigint(); // first time do a timestamp so we can measure initial latency
if (images.length === numImages) worker.send({ test: true }); // for first image in queue just measure latency
if (images.length === 0) worker.send({ exit: true }); // nothing left in queue
else {
log.state('Main: dispatching to worker:', worker.pid);
worker.send({ image: images[0] });
images.shift();
}
}
// loop that waits for all workers to complete
function waitCompletion() {
const activeWorkers = workers.reduce((any, worker) => (any += worker.connected ? 1 : 0), 0);
if (activeWorkers > 0) setImmediate(() => waitCompletion());
else {
t[1] = process.hrtime.bigint();
log.info('Processed:', numImages, 'images in', 'total:', Math.trunc(Number(t[1] - t[0]) / 1000000), 'ms', 'working:', Math.trunc(Number(t[1] - t[2]) / 1000000), 'ms', 'average:', Math.trunc(Number(t[1] - t[2]) / numImages / 1000000), 'ms');
}
}
function measureLatency() {
t[3] = process.hrtime.bigint();
const latencyInitialization = Math.trunc(Number(t[2] - t[0]) / 1000 / 1000);
const latencyRoundTrip = Math.trunc(Number(t[3] - t[2]) / 1000 / 1000);
log.info('Latency: worker initializtion: ', latencyInitialization, 'message round trip:', latencyRoundTrip);
}
async function main() {
process.on('unhandledRejection', (err) => {
// @ts-ignore // no idea if exception message is compelte
log.error(err?.message || err || 'no error message');
});
log.header();
log.info('Human multi-process test');
// enumerate all images into queue
const dir = fs.readdirSync(imgPathRoot);
for (const imgFile of dir) {
if (imgFile.toLocaleLowerCase().endsWith('.jpg')) images.push(path.join(imgPathRoot, imgFile));
}
numImages = images.length;
log.state('Enumerated images:', imgPathRoot, numImages);
t[0] = process.hrtime.bigint();
t[1] = process.hrtime.bigint();
t[2] = process.hrtime.bigint();
// manage worker processes
for (let i = 0; i < numWorkers; i++) {
// create worker process
workers[i] = await childProcess.fork(workerFile, ['special']);
// parse message that worker process sends back to main
// if message is ready, dispatch next image in queue
// if message is processing result, just print how many faces were detected
// otherwise it's an unknown message
workers[i].on('message', (msg) => {
if (msg.ready) submitDetect(workers[i]);
else if (msg.image) log.data('Main: worker finished:', workers[i].pid, 'detected faces:', msg.detected.face?.length, 'bodies:', msg.detected.body?.length, 'hands:', msg.detected.hand?.length, 'objects:', msg.detected.object?.length);
else if (msg.test) measureLatency();
else log.data('Main: worker message:', workers[i].pid, msg);
});
// just log when worker exits
workers[i].on('exit', (msg) => log.state('Main: worker exit:', workers[i].pid, msg));
// just log which worker was started
log.state('Main: started worker:', workers[i].pid);
}
// wait for all workers to complete
waitCompletion();
}
main();

View File

@ -0,0 +1,18 @@
/// <reference lib="webworker" />
// load Human using IIFE script as Chome Mobile does not support Modules as Workers
self.importScripts('../../dist/human.js'); // eslint-disable-line no-restricted-globals
let human;
onmessage = async (msg) => {
// received from index.js using:
// worker.postMessage({ image: image.data.buffer, width: canvas.width, height: canvas.height, config }, [image.data.buffer]);
// Human is registered as global namespace using IIFE script
if (!human) human = new Human.default(msg.data.config); // eslint-disable-line no-undef, new-cap
const image = new ImageData(new Uint8ClampedArray(msg.data.image), msg.data.width, msg.data.height);
let result = {};
result = await human.detect(image, msg.data.config);
postMessage({ result: result[msg.data.type], type: msg.data.type });
};

View File

@ -1,121 +0,0 @@
const log = require('@vladmandic/pilogger');
const fs = require('fs');
const process = require('process');
// for NodeJS, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human
const tf = require('@tensorflow/tfjs-node'); // or const tf = require('@tensorflow/tfjs-node-gpu');
// load specific version of Human library that matches TensorFlow mode
const Human = require('../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
let human = null;
const myConfig = {
backend: 'tensorflow',
modelBasePath: 'file://models/',
debug: true,
videoOptimized: false,
async: false,
face: {
enabled: true,
detector: { enabled: true, rotation: false },
mesh: { enabled: true },
iris: { enabled: true },
description: { enabled: true },
emotion: { enabled: true },
},
hand: {
enabled: true,
},
// body: { modelPath: 'efficientpose.json', enabled: true },
// body: { modelPath: 'blazepose.json', enabled: true },
body: { enabled: true },
object: { enabled: true },
};
async function init() {
// wait until tf is ready
await tf.ready();
// create instance of human
human = new Human(myConfig);
// pre-load models
log.info('Human:', human.version);
log.info('Active Configuration', human.config);
await human.load();
const loaded = Object.keys(human.models).filter((a) => human.models[a]);
log.info('Loaded:', loaded);
log.info('Memory state:', human.tf.engine().memory());
}
async function detect(input) {
// read input image file and create tensor to be used for processing
const buffer = fs.readFileSync(input);
const decoded = human.tf.node.decodeImage(buffer);
const casted = decoded.toFloat();
const image = casted.expandDims(0);
decoded.dispose();
casted.dispose();
// image shape contains image dimensions and depth
log.state('Processing:', image.shape);
// run actual detection
const result = await human.detect(image, myConfig);
// no need to print results as they are printed to console during detection from within the library due to human.config.debug set
// dispose image tensor as we no longer need it
image.dispose();
// print data to console
log.data('Results:');
for (let i = 0; i < result.face.length; i++) {
const face = result.face[i];
const emotion = face.emotion.reduce((prev, curr) => (prev.score > curr.score ? prev : curr));
log.data(` Face: #${i} boxConfidence:${face.boxConfidence} faceConfidence:${face.boxConfidence} age:${face.age} genderConfidence:${face.genderConfidence} gender:${face.gender} emotionScore:${emotion.score} emotion:${emotion.emotion} iris:${face.iris}`);
}
for (let i = 0; i < result.body.length; i++) {
const body = result.body[i];
log.data(` Body: #${i} score:${body.score}`);
}
for (let i = 0; i < result.hand.length; i++) {
const hand = result.hand[i];
log.data(` Hand: #${i} confidence:${hand.confidence}`);
}
for (let i = 0; i < result.gesture.length; i++) {
const [key, val] = Object.entries(result.gesture[i]);
log.data(` Gesture: ${key[0]}#${key[1]} gesture:${val[1]}`);
}
for (let i = 0; i < result.object.length; i++) {
const object = result.object[i];
log.data(` Object: #${i} score:${object.score} label:${object.label}`);
}
result.face.length = 0;
return result;
}
async function test() {
// test with embedded full body image
let result;
log.state('Processing embedded warmup image: face');
myConfig.warmup = 'face';
result = await human.warmup(myConfig);
log.state('Processing embedded warmup image: full');
myConfig.warmup = 'full';
result = await human.warmup(myConfig);
// no need to print results as they are printed to console during detection from within the library due to human.config.debug set
return result;
}
async function main() {
log.header();
log.info('Current folder:', process.env.PWD);
await init();
if (process.argv.length !== 3) {
log.warn('Parameters: <input image> missing');
await test();
} else if (!fs.existsSync(process.argv[2])) {
log.error(`File not found: ${process.argv[2]}`);
} else {
await detect(process.argv[2]);
}
}
main();

121
demo/nodejs/README.md Normal file
View File

@ -0,0 +1,121 @@
# Human Demos for NodeJS
- `node`: Process images from files, folders or URLs
uses native methods for image loading and decoding without external dependencies
- `node-canvas`: Process image from file or URL and draw results to a new image file using `node-canvas`
uses `node-canvas` library to load and decode images from files, draw detection results and write output to a new image file
- `node-video`: Processing of video input using `ffmpeg`
uses `ffmpeg` to decode video input (can be a file, stream or device such as webcam) and
output results in a pipe that are captured by demo app as frames and processed by `Human` library
- `node-webcam`: Processing of webcam screenshots using `fswebcam`
uses `fswebcam` to connect to web cam and take screenshots at regular interval which are then processed by `Human` library
- `node-event`: Showcases usage of `Human` eventing to get notifications on processing
- `node-similarity`: Compares two input images for similarity of detected faces
- `process-folder`: Processing all images in input folder and creates output images
interally used to generate samples gallery
<br>
## Main Demo
`nodejs/node.js`: Demo using NodeJS with CommonJS module
Simple demo that can process any input image
Note that you can run demo as-is and it will perform detection on provided sample images,
or you can pass a path to image to analyze, either on local filesystem or using URL
```shell
node demo/nodejs/node.js
```
<!-- eslint-skip -->
```js
2021-06-01 08:52:15 INFO: @vladmandic/human version 2.0.0
2021-06-01 08:52:15 INFO: User: vlado Platform: linux Arch: x64 Node: v16.0.0
2021-06-01 08:52:15 INFO: Current folder: /home/vlado/dev/human
2021-06-01 08:52:15 INFO: Human: 2.0.0
2021-06-01 08:52:15 INFO: Active Configuration {
backend: 'tensorflow',
modelBasePath: 'file://models/',
wasmPath: '../node_modules/@tensorflow/tfjs-backend-wasm/dist/',
debug: true,
async: false,
warmup: 'full',
cacheSensitivity: 0.75,
filter: {
enabled: true,
width: 0,
height: 0,
flip: true,
return: true,
brightness: 0,
contrast: 0,
sharpness: 0,
blur: 0,
saturation: 0,
hue: 0,
negative: false,
sepia: false,
vintage: false,
kodachrome: false,
technicolor: false,
polaroid: false,
pixelate: 0
},
gesture: { enabled: true },
face: {
enabled: true,
detector: { modelPath: 'blazeface.json', rotation: false, maxDetected: 10, skipFrames: 15, minConfidence: 0.2, iouThreshold: 0.1, return: false, enabled: true },
mesh: { enabled: true, modelPath: 'facemesh.json' },
iris: { enabled: true, modelPath: 'iris.json' },
description: { enabled: true, modelPath: 'faceres.json', skipFrames: 16, minConfidence: 0.1 },
emotion: { enabled: true, minConfidence: 0.1, skipFrames: 17, modelPath: 'emotion.json' }
},
body: { enabled: true, modelPath: 'movenet-lightning.json', maxDetected: 1, minConfidence: 0.2 },
hand: {
enabled: true,
rotation: true,
skipFrames: 18,
minConfidence: 0.1,
iouThreshold: 0.1,
maxDetected: 2,
landmarks: true,
detector: { modelPath: 'handdetect.json' },
skeleton: { modelPath: 'handskeleton.json' }
},
object: { enabled: true, modelPath: 'centernet.json', minConfidence: 0.2, iouThreshold: 0.4, maxDetected: 10, skipFrames: 19 }
}
08:52:15.673 Human: version: 2.0.0
08:52:15.674 Human: tfjs version: 3.6.0
08:52:15.674 Human: platform: linux x64
08:52:15.674 Human: agent: NodeJS v16.0.0
08:52:15.674 Human: setting backend: tensorflow
08:52:15.710 Human: load model: file://models/blazeface.json
08:52:15.743 Human: load model: file://models/facemesh.json
08:52:15.744 Human: load model: file://models/iris.json
08:52:15.760 Human: load model: file://models/emotion.json
08:52:15.847 Human: load model: file://models/handdetect.json
08:52:15.847 Human: load model: file://models/handskeleton.json
08:52:15.914 Human: load model: file://models/movenet-lightning.json
08:52:15.957 Human: load model: file://models/centernet.json
08:52:16.015 Human: load model: file://models/faceres.json
08:52:16.015 Human: tf engine state: 50796152 bytes 1318 tensors
2021-06-01 08:52:16 INFO: Loaded: [ 'face', 'movenet', 'handpose', 'emotion', 'centernet', 'faceres', [length]: 6 ]
2021-06-01 08:52:16 INFO: Memory state: { unreliable: true, numTensors: 1318, numDataBuffers: 1318, numBytes: 50796152 }
2021-06-01 08:52:16 INFO: Loading image: private/daz3d/daz3d-kiaria-02.jpg
2021-06-01 08:52:16 STATE: Processing: [ 1, 1300, 1000, 3, [length]: 4 ]
2021-06-01 08:52:17 DATA: Results:
2021-06-01 08:52:17 DATA: Face: #0 boxScore:0.88 faceScore:1 age:16.3 genderScore:0.97 gender:female emotionScore:0.85 emotion:happy iris:61.05
2021-06-01 08:52:17 DATA: Body: #0 score:0.82 keypoints:17
2021-06-01 08:52:17 DATA: Hand: #0 score:0.89
2021-06-01 08:52:17 DATA: Hand: #1 score:0.97
2021-06-01 08:52:17 DATA: Gesture: face#0 gesture:facing left
2021-06-01 08:52:17 DATA: Gesture: body#0 gesture:leaning right
2021-06-01 08:52:17 DATA: Gesture: hand#0 gesture:pinky forward middlefinger up
2021-06-01 08:52:17 DATA: Gesture: hand#1 gesture:pinky forward middlefinger up
2021-06-01 08:52:17 DATA: Gesture: iris#0 gesture:looking left
2021-06-01 08:52:17 DATA: Object: #0 score:0.55 label:person
2021-06-01 08:52:17 DATA: Object: #1 score:0.23 label:bottle
2021-06-01 08:52:17 DATA: Persons:
2021-06-01 08:52:17 DATA: #0: Face:score:1 age:16.3 gender:female iris:61.05 Body:score:0.82 keypoints:17 LeftHand:no RightHand:yes Gestures:4
```

66
demo/nodejs/node-bench.js Normal file
View File

@ -0,0 +1,66 @@
/**
* Human simple demo for NodeJS
*/
const childProcess = require('child_process'); // eslint-disable-line camelcase
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
const canvas = require('canvas'); // eslint-disable-line node/no-unpublished-require
const config = {
cacheSensitivity: 0.01,
wasmPlatformFetch: true,
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
};
const count = 10;
async function loadImage(input) {
const inputImage = await canvas.loadImage(input);
const inputCanvas = new canvas.Canvas(inputImage.width, inputImage.height);
const inputCtx = inputCanvas.getContext('2d');
inputCtx.drawImage(inputImage, 0, 0);
const imageData = inputCtx.getImageData(0, 0, inputCanvas.width, inputCanvas.height);
process.send({ input, resolution: [inputImage.width, inputImage.height] });
return imageData;
}
async function runHuman(module, backend) {
if (backend === 'wasm') require('@tensorflow/tfjs-backend-wasm'); // eslint-disable-line node/no-unpublished-require, global-require
const Human = require('../../dist/' + module); // eslint-disable-line global-require, import/no-dynamic-require
config.backend = backend;
const human = new Human.Human(config);
human.env.Canvas = canvas.Canvas;
human.env.Image = canvas.Image;
human.env.ImageData = canvas.ImageData;
process.send({ human: human.version, module });
await human.init();
process.send({ desired: human.config.backend, wasm: human.env.wasm, tfjs: human.tf.version.tfjs, tensorflow: human.env.tensorflow });
const imageData = await loadImage('samples/in/ai-body.jpg');
const t0 = human.now();
await human.load();
const t1 = human.now();
await human.warmup();
const t2 = human.now();
for (let i = 0; i < count; i++) await human.detect(imageData);
const t3 = human.now();
process.send({ backend: human.tf.getBackend(), load: Math.round(t1 - t0), warmup: Math.round(t2 - t1), detect: Math.round(t3 - t2), count, memory: human.tf.memory().numBytes });
}
async function executeWorker(args) {
return new Promise((resolve) => {
const worker = childProcess.fork(process.argv[1], args);
worker.on('message', (msg) => log.data(msg));
worker.on('exit', () => resolve(true));
});
}
async function main() {
if (process.argv[2]) {
await runHuman(process.argv[2], process.argv[3]);
} else {
await executeWorker(['human.node.js', 'tensorflow']);
await executeWorker(['human.node-gpu.js', 'tensorflow']);
await executeWorker(['human.node-wasm.js', 'wasm']);
}
}
main();

View File

@ -0,0 +1,82 @@
/**
* Human demo for NodeJS using Canvas library
*
* Requires [canvas](https://www.npmjs.com/package/canvas) to provide Canvas functionality in NodeJS environment
*/
const fs = require('fs');
const process = require('process');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// in nodejs environments tfjs-node is required to be loaded before human
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
const canvas = require('canvas'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
const config = { // just enable all and leave default settings
debug: false,
face: { enabled: true, detector: { maxDetected: 10 } }, // includes mesh, iris, emotion, descriptor
hand: { enabled: true, maxDetected: 20, minConfidence: 0.5, detector: { modelPath: 'handtrack.json' } }, // use alternative hand model
body: { enabled: true },
object: { enabled: true },
gestures: { enabled: true },
};
async function main() {
log.header();
globalThis.Canvas = canvas.Canvas; // patch global namespace with canvas library
globalThis.ImageData = canvas.ImageData; // patch global namespace with canvas library
// human.env.Canvas = canvas.Canvas; // alternatively monkey-patch human to use external canvas library
// human.env.ImageData = canvas.ImageData; // alternatively monkey-patch human to use external canvas library
// init
const human = new Human.Human(config); // create instance of human
log.info('Human:', human.version, 'TF:', tf.version_core);
await human.load(); // pre-load models
log.info('Loaded models:', human.models.loaded());
log.info('Memory state:', human.tf.engine().memory());
// parse cmdline
const input = process.argv[2];
let output = process.argv[3];
if (!output.toLowerCase().endsWith('.jpg')) output += '.jpg';
if (process.argv.length !== 4) log.error('Parameters: <input-image> <output-image> missing');
else if (!fs.existsSync(input) && !input.startsWith('http')) log.error(`File not found: ${process.argv[2]}`);
else {
// everything seems ok
const inputImage = await canvas.loadImage(input); // load image using canvas library
log.info('Loaded image', input, inputImage.width, inputImage.height);
const inputCanvas = new canvas.Canvas(inputImage.width, inputImage.height); // create canvas
const inputCtx = inputCanvas.getContext('2d');
inputCtx.drawImage(inputImage, 0, 0); // draw input image onto canvas
const imageData = inputCtx.getImageData(0, 0, inputCanvas.width, inputCanvas.height);
// run detection
const result = await human.detect(imageData);
// print results summary
const persons = result.persons; // invoke persons getter, only used to print summary on console
for (let i = 0; i < persons.length; i++) {
const face = persons[i].face;
const faceTxt = face ? `score:${face.score} age:${face.age} gender:${face.gender} iris:${face.iris}` : null;
const body = persons[i].body;
const bodyTxt = body ? `score:${body.score} keypoints:${body.keypoints.length}` : null;
log.data(`Detected: #${i}: Face:${faceTxt} Body:${bodyTxt} LeftHand:${persons[i].hands.left ? 'yes' : 'no'} RightHand:${persons[i].hands.right ? 'yes' : 'no'} Gestures:${persons[i].gestures.length}`);
}
// draw detected results onto canvas and save it to a file
const outputCanvas = new canvas.Canvas(inputImage.width, inputImage.height); // create canvas
const outputCtx = outputCanvas.getContext('2d');
outputCtx.drawImage(result.canvas || inputImage, 0, 0); // draw input image onto canvas
human.draw.all(outputCanvas, result); // use human build-in method to draw results as overlays on canvas
const outFile = fs.createWriteStream(output); // write canvas to new image file
outFile.on('finish', () => log.state('Output image:', output, outputCanvas.width, outputCanvas.height));
outFile.on('error', (err) => log.error('Output error:', output, err));
const stream = outputCanvas.createJPEGStream({ quality: 0.5, progressive: true, chromaSubsampling: true });
stream.pipe(outFile);
}
}
main();

95
demo/nodejs/node-event.js Normal file
View File

@ -0,0 +1,95 @@
/**
* Human demo for NodeJS
*/
const fs = require('fs');
const process = require('process');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// in nodejs environments tfjs-node is required to be loaded before human
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
let human = null;
const myConfig = {
modelBasePath: 'file://models/',
debug: false,
async: true,
filter: { enabled: false },
face: {
enabled: true,
detector: { enabled: true },
mesh: { enabled: true },
iris: { enabled: true },
description: { enabled: true },
emotion: { enabled: true },
},
hand: { enabled: true },
body: { enabled: true },
object: { enabled: true },
};
async function detect(input) {
// read input image from file or url into buffer
let buffer;
log.info('Loading image:', input);
if (input.startsWith('http:') || input.startsWith('https:')) {
const res = await fetch(input);
if (res && res.ok) buffer = Buffer.from(await res.arrayBuffer());
else log.error('Invalid image URL:', input, res.status, res.statusText, res.headers.get('content-type'));
} else {
buffer = fs.readFileSync(input);
}
log.data('Image bytes:', buffer?.length, 'buffer:', buffer?.slice(0, 32));
// decode image using tfjs-node so we don't need external depenencies
if (!buffer) return;
const tensor = human.tf.node.decodeImage(buffer, 3);
// run detection
await human.detect(tensor, myConfig);
human.tf.dispose(tensor); // dispose image tensor as we no longer need it
}
async function main() {
log.header();
human = new Human.Human(myConfig);
log.info('Human:', human.version, 'TF:', tf.version_core);
if (human.events) {
human.events.addEventListener('warmup', () => {
log.info('Event Warmup');
});
human.events.addEventListener('load', () => {
log.info('Event Loaded:', human.models.loaded(), human.tf.engine().memory());
});
human.events.addEventListener('image', () => {
log.info('Event Image:', human.process.tensor.shape);
});
human.events.addEventListener('detect', () => {
log.data('Event Detected:');
const persons = human.result.persons;
for (let i = 0; i < persons.length; i++) {
const face = persons[i].face;
const faceTxt = face ? `score:${face.score} age:${face.age} gender:${face.gender} iris:${face.distance}` : null;
const body = persons[i].body;
const bodyTxt = body ? `score:${body.score} keypoints:${body.keypoints?.length}` : null;
log.data(` #${i}: Face:${faceTxt} Body:${bodyTxt} LeftHand:${persons[i].hands.left ? 'yes' : 'no'} RightHand:${persons[i].hands.right ? 'yes' : 'no'} Gestures:${persons[i].gestures.length}`);
}
});
}
await human.tf.ready(); // wait until tf is ready
const input = process.argv[2]; // process input
if (input) await detect(input);
else log.error('Missing <input>');
}
main();

30
demo/nodejs/node-fetch.js Normal file
View File

@ -0,0 +1,30 @@
/**
* Human demo for NodeJS using http fetch to get image file
*
* Requires [node-fetch](https://www.npmjs.com/package/node-fetch) to provide `fetch` functionality in NodeJS environment
*/
const fs = require('fs');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// in nodejs environments tfjs-node is required to be loaded before human
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
const humanConfig = {
modelBasePath: 'https://vladmandic.github.io/human/models/',
};
async function main(inputFile) {
global.fetch = (await import('node-fetch')).default; // eslint-disable-line node/no-unpublished-import, import/no-unresolved, node/no-missing-import, node/no-extraneous-import
const human = new Human.Human(humanConfig); // create instance of human using default configuration
log.info('Human:', human.version, 'TF:', tf.version_core);
await human.load(); // optional as models would be loaded on-demand first time they are required
await human.warmup(); // optional as model warmup is performed on-demand first time its executed
const buffer = fs.readFileSync(inputFile); // read file data into buffer
const tensor = human.tf.node.decodeImage(buffer); // decode jpg data
const result = await human.detect(tensor); // run detection; will initialize backend and on-demand load models
log.data(result.gesture);
}
main('samples/in/ai-body.jpg');

View File

@ -0,0 +1,64 @@
/**
* Human Person Similarity test for NodeJS
*/
const fs = require('fs');
const process = require('process');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// in nodejs environments tfjs-node is required to be loaded before human
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
let human = null;
const myConfig = {
modelBasePath: 'file://models/',
debug: true,
face: { emotion: { enabled: false } },
body: { enabled: false },
hand: { enabled: false },
gesture: { enabled: false },
};
async function init() {
human = new Human.Human(myConfig);
await human.tf.ready();
log.info('Human:', human.version, 'TF:', tf.version_core);
await human.load();
log.info('Loaded:', human.models.loaded());
log.info('Memory state:', human.tf.engine().memory());
}
async function detect(input) {
if (!fs.existsSync(input)) {
throw new Error('Cannot load image:', input);
}
const buffer = fs.readFileSync(input);
const tensor = human.tf.node.decodeImage(buffer, 3);
log.state('Loaded image:', input, tensor.shape);
const result = await human.detect(tensor, myConfig);
human.tf.dispose(tensor);
log.state('Detected faces:', result.face.length);
return result;
}
async function main() {
log.configure({ inspect: { breakLength: 265 } });
log.header();
if (process.argv.length !== 4) {
log.error('Parameters: <first image> <second image> missing');
return;
}
await init();
const res1 = await detect(process.argv[2]);
const res2 = await detect(process.argv[3]);
if (!res1 || !res1.face || res1.face.length === 0 || !res2 || !res2.face || res2.face.length === 0) {
throw new Error('Could not detect face descriptors');
}
const similarity = human.match.similarity(res1.face[0].embedding, res2.face[0].embedding, { order: 2 });
log.data('Similarity: ', similarity);
}
main();

View File

@ -0,0 +1,32 @@
/**
* Human simple demo for NodeJS
*/
const fs = require('fs');
const process = require('process');
// in nodejs environments tfjs-node is required to be loaded before human
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
const humanConfig = {
// add any custom config here
debug: true,
body: { enabled: false },
};
async function detect(inputFile) {
const human = new Human.Human(humanConfig); // create instance of human using default configuration
console.log('Human:', human.version, 'TF:', tf.version_core); // eslint-disable-line no-console
await human.load(); // optional as models would be loaded on-demand first time they are required
await human.warmup(); // optional as model warmup is performed on-demand first time its executed
const buffer = fs.readFileSync(inputFile); // read file data into buffer
const tensor = human.tf.node.decodeImage(buffer); // decode jpg data
console.log('loaded input file:', inputFile, 'resolution:', tensor.shape); // eslint-disable-line no-console
const result = await human.detect(tensor); // run detection; will initialize backend and on-demand load models
console.log(result); // eslint-disable-line no-console
}
if (process.argv.length === 3) detect(process.argv[2]); // if input file is provided as cmdline parameter use it
else detect('samples/in/ai-body.jpg'); // else use built-in test inputfile

91
demo/nodejs/node-video.js Normal file
View File

@ -0,0 +1,91 @@
/**
* Human demo for NodeJS
* Unsupported sample of using external utility ffmpeg to capture to decode video input and process it using Human
*
* Uses ffmpeg to process video input and output stream of motion jpeg images which are then parsed for frame start/end markers by pipe2jpeg
* Each frame triggers an event with jpeg buffer that then can be decoded and passed to human for processing
* If you want process at specific intervals, set output fps to some value
* If you want to process an input stream, set real-time flag and set input as required
*
* Note that [pipe2jpeg](https://www.npmjs.com/package/pipe2jpeg) is not part of Human dependencies and should be installed manually
* Working version of `ffmpeg` must be present on the system
*/
const process = require('process');
const spawn = require('child_process').spawn;
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// in nodejs environments tfjs-node is required to be loaded before human
// const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Pipe2Jpeg = require('pipe2jpeg'); // eslint-disable-line node/no-missing-require, import/no-unresolved
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
let count = 0; // counter
let busy = false; // busy flag
let inputFile = './test.mp4';
if (process.argv.length === 3) inputFile = process.argv[2];
const humanConfig = {
modelBasePath: 'file://models/',
debug: false,
async: true,
filter: { enabled: false },
face: {
enabled: true,
detector: { enabled: true, rotation: false },
mesh: { enabled: true },
iris: { enabled: true },
description: { enabled: true },
emotion: { enabled: true },
},
hand: { enabled: false },
body: { enabled: false },
object: { enabled: false },
};
const human = new Human.Human(humanConfig);
const pipe2jpeg = new Pipe2Jpeg();
const ffmpegParams = [
'-loglevel', 'quiet',
// input
// '-re', // optional process video in real-time not as fast as possible
'-i', `${inputFile}`, // input file
// output
'-an', // drop audio
'-c:v', 'mjpeg', // use motion jpeg as output encoder
'-pix_fmt', 'yuvj422p', // typical for mp4, may need different settings for some videos
'-f', 'image2pipe', // pipe images as output
// '-vf', 'fps=5,scale=800:600', // optional video filter, do anything here such as process at fixed 5fps or resize to specific resulution
'pipe:1', // output to unix pipe that is then captured by pipe2jpeg
];
async function detect(jpegBuffer) {
if (busy) return; // skip processing if busy
busy = true;
const tensor = human.tf.node.decodeJpeg(jpegBuffer, 3); // decode jpeg buffer to raw tensor
const res = await human.detect(tensor);
human.tf.dispose(tensor); // must dispose tensor
// start custom processing here
log.data('frame', { frame: ++count, size: jpegBuffer.length, shape: tensor.shape, face: res?.face?.length, body: res?.body?.length, hand: res?.hand?.length, gesture: res?.gesture?.length });
if (res?.face?.[0]) log.data('person', { score: [res.face[0].boxScore, res.face[0].faceScore], age: res.face[0].age || 0, gender: [res.face[0].genderScore || 0, res.face[0].gender], emotion: res.face[0].emotion?.[0] });
// at the of processing mark loop as not busy so it can process next frame
busy = false;
}
async function main() {
log.header();
await human.tf.ready();
// pre-load models
log.info({ human: human.version, tf: human.tf.version_core });
log.info({ input: inputFile });
pipe2jpeg.on('data', (jpegBuffer) => detect(jpegBuffer));
const ffmpeg = spawn('ffmpeg', ffmpegParams, { stdio: ['ignore', 'pipe', 'ignore'] });
ffmpeg.on('error', (error) => log.error('ffmpeg error:', error));
ffmpeg.on('exit', (code, signal) => log.info('ffmpeg exit', code, signal));
ffmpeg.stdout.pipe(pipe2jpeg);
}
main();

View File

@ -0,0 +1,94 @@
/**
* Human demo for NodeJS
* Unsupported sample of using external utility fswebcam to capture screenshot from attached webcam in regular intervals and process it using Human
*
* Note that [node-webcam](https://www.npmjs.com/package/node-webcam) is not part of Human dependencies and should be installed manually
* Working version of `fswebcam` must be present on the system
*/
let initial = true; // remember if this is the first run to print additional details
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
const nodeWebCam = require('node-webcam'); // eslint-disable-line import/no-unresolved, node/no-missing-require
// in nodejs environments tfjs-node is required to be loaded before human
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
// options for node-webcam
const tempFile = 'webcam-snap'; // node-webcam requires writting snapshot to a file, recommended to use tmpfs to avoid excessive disk writes
const optionsCamera = {
callbackReturn: 'buffer', // this means whatever `fswebcam` writes to disk, no additional processing so it's fastest
saveShots: false, // don't save processed frame to disk, note that temp file is still created by fswebcam thus recommendation for tmpfs
};
const camera = nodeWebCam.create(optionsCamera);
// options for human
const optionsHuman = {
modelBasePath: 'file://models/',
};
const human = new Human.Human(optionsHuman);
function buffer2tensor(buffer) {
return human.tf.tidy(() => {
if (!buffer) return null;
const decode = human.tf.node.decodeImage(buffer, 3);
let expand;
if (decode.shape[2] === 4) { // input is in rgba format, need to convert to rgb
const channels = human.tf.split(decode, 4, 2); // tf.split(tensor, 4, 2); // split rgba to channels
const rgb = human.tf.stack([channels[0], channels[1], channels[2]], 2); // stack channels back to rgb and ignore alpha
expand = human.tf.reshape(rgb, [1, decode.shape[0], decode.shape[1], 3]); // move extra dim from the end of tensor and use it as batch number instead
} else {
expand = human.tf.expandDims(decode, 0); // inpur ia rgb so use as-is
}
const cast = human.tf.cast(expand, 'float32');
return cast;
});
}
async function detect() {
// trigger next frame every 5 sec
// triggered here before actual capture and detection since we assume it will complete in less than 5sec
// so it's as close as possible to real 5sec and not 5sec + detection time
// if there is a chance of race scenario where detection takes longer than loop trigger, then trigger should be at the end of the function instead
setTimeout(() => detect(), 5000);
camera.capture(tempFile, (err, data) => { // gets the (default) jpeg data from from webcam
if (err) {
log.error('error capturing webcam:', err);
} else {
const tensor = buffer2tensor(data); // create tensor from image buffer
if (initial) log.data('input tensor:', tensor.shape);
human.detect(tensor) // eslint-disable-line promise/no-promise-in-callback
.then((result) => {
if (result && result.face && result.face.length > 0) {
for (let i = 0; i < result.face.length; i++) {
const face = result.face[i];
const emotion = face.emotion?.reduce((prev, curr) => (prev.score > curr.score ? prev : curr));
log.data(`detected face: #${i} boxScore:${face.boxScore} faceScore:${face.faceScore} age:${face.age} genderScore:${face.genderScore} gender:${face.gender} emotionScore:${emotion?.score} emotion:${emotion?.emotion} iris:${face.iris}`);
}
} else {
log.data(' Face: N/A');
}
return result;
})
.catch(() => log.error('human detect error'));
}
initial = false;
});
// alternatively to triggering every 5sec sec, simply trigger next frame as fast as possible
// setImmediate(() => process());
}
async function main() {
log.info('human:', human.version, 'tf:', tf.version_core);
camera.list((list) => {
log.data('detected camera:', list);
});
await human.load();
detect();
}
log.header();
main();

213
demo/nodejs/node.js Normal file
View File

@ -0,0 +1,213 @@
/**
* Human demo for NodeJS
*/
const fs = require('fs');
const path = require('path');
const process = require('process');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// in nodejs environments tfjs-node is required to be loaded before human
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
let human = null;
const myConfig = {
// backend: 'tensorflow',
modelBasePath: 'file://models/',
debug: true,
async: false,
filter: {
enabled: true,
flip: true,
},
face: {
enabled: true,
detector: { enabled: true, rotation: false },
mesh: { enabled: true },
iris: { enabled: true },
description: { enabled: true },
emotion: { enabled: true },
},
hand: {
enabled: true,
},
// body: { modelPath: 'blazepose.json', enabled: true },
body: { enabled: true },
object: { enabled: true },
};
async function init() {
// create instance of human
human = new Human.Human(myConfig);
// wait until tf is ready
await human.tf.ready();
log.info('human:', human.version, 'tf:', tf.version_core);
// pre-load models
log.info('Human:', human.version);
// log.info('Active Configuration', human.config);
await human.load();
log.info('Loaded:', human.models.loaded());
// log.info('Memory state:', human.tf.engine().memory());
log.data(tf.backend().binding ? tf.backend().binding.TF_Version : null);
}
async function detect(input) {
// read input image file and create tensor to be used for processing
let buffer;
log.info('Loading image:', input);
if (input.startsWith('http:') || input.startsWith('https:')) {
const res = await fetch(input);
if (res && res.ok) buffer = Buffer.from(await res.arrayBuffer());
else log.error('Invalid image URL:', input, res.status, res.statusText, res.headers.get('content-type'));
} else {
buffer = fs.readFileSync(input);
}
log.data('Image bytes:', buffer?.length, 'buffer:', buffer?.slice(0, 32));
// decode image using tfjs-node so we don't need external depenencies
// can also be done using canvas.js or some other 3rd party image library
if (!buffer) return {};
const tensor = human.tf.tidy(() => {
const decode = human.tf.node.decodeImage(buffer, 3);
let expand;
if (decode.shape[2] === 4) { // input is in rgba format, need to convert to rgb
const channels = human.tf.split(decode, 4, 2); // tf.split(tensor, 4, 2); // split rgba to channels
const rgb = human.tf.stack([channels[0], channels[1], channels[2]], 2); // stack channels back to rgb and ignore alpha
expand = human.tf.reshape(rgb, [1, decode.shape[0], decode.shape[1], 3]); // move extra dim from the end of tensor and use it as batch number instead
} else {
expand = human.tf.expandDims(decode, 0);
}
const cast = human.tf.cast(expand, 'float32');
return cast;
});
// image shape contains image dimensions and depth
log.state('Processing:', tensor.shape);
// run actual detection
let result;
try {
result = await human.detect(tensor, myConfig);
} catch (err) {
log.error('caught', err);
}
// dispose image tensor as we no longer need it
human.tf.dispose(tensor);
// print data to console
log.data('Results:');
if (result && result.face && result.face.length > 0) {
for (let i = 0; i < result.face.length; i++) {
const face = result.face[i];
const emotion = face.emotion.reduce((prev, curr) => (prev.score > curr.score ? prev : curr));
log.data(` Face: #${i} boxScore:${face.boxScore} faceScore:${face.faceScore} age:${face.age} genderScore:${face.genderScore} gender:${face.gender} emotionScore:${emotion.score} emotion:${emotion.emotion} distance:${face.distance}`);
}
} else {
log.data(' Face: N/A');
}
if (result && result.body && result.body.length > 0) {
for (let i = 0; i < result.body.length; i++) {
const body = result.body[i];
log.data(` Body: #${i} score:${body.score} keypoints:${body.keypoints?.length}`);
}
} else {
log.data(' Body: N/A');
}
if (result && result.hand && result.hand.length > 0) {
for (let i = 0; i < result.hand.length; i++) {
const hand = result.hand[i];
log.data(` Hand: #${i} score:${hand.score} keypoints:${hand.keypoints?.length}`);
}
} else {
log.data(' Hand: N/A');
}
if (result && result.gesture && result.gesture.length > 0) {
for (let i = 0; i < result.gesture.length; i++) {
const [key, val] = Object.entries(result.gesture[i]);
log.data(` Gesture: ${key[0]}#${key[1]} gesture:${val[1]}`);
}
} else {
log.data(' Gesture: N/A');
}
if (result && result.object && result.object.length > 0) {
for (let i = 0; i < result.object.length; i++) {
const object = result.object[i];
log.data(` Object: #${i} score:${object.score} label:${object.label}`);
}
} else {
log.data(' Object: N/A');
}
// print data to console
if (result) {
// invoke persons getter
const persons = result.persons;
// write result objects to file
// fs.writeFileSync('result.json', JSON.stringify(result, null, 2));
log.data('Persons:');
for (let i = 0; i < persons.length; i++) {
const face = persons[i].face;
const faceTxt = face ? `score:${face.score} age:${face.age} gender:${face.gender} iris:${face.iris}` : null;
const body = persons[i].body;
const bodyTxt = body ? `score:${body.score} keypoints:${body.keypoints?.length}` : null;
log.data(` #${i}: Face:${faceTxt} Body:${bodyTxt} LeftHand:${persons[i].hands.left ? 'yes' : 'no'} RightHand:${persons[i].hands.right ? 'yes' : 'no'} Gestures:${persons[i].gestures.length}`);
}
}
return result;
}
async function test() {
process.on('unhandledRejection', (err) => {
// @ts-ignore // no idea if exception message is compelte
log.error(err?.message || err || 'no error message');
});
// test with embedded full body image
let result;
log.state('Processing embedded warmup image: face');
myConfig.warmup = 'face';
result = await human.warmup(myConfig);
log.state('Processing embedded warmup image: full');
myConfig.warmup = 'full';
result = await human.warmup(myConfig);
// no need to print results as they are printed to console during detection from within the library due to human.config.debug set
return result;
}
async function main() {
log.configure({ inspect: { breakLength: 265 } });
log.header();
log.info('Current folder:', process.env.PWD);
await init();
const f = process.argv[2];
if (process.argv.length !== 3) {
log.warn('Parameters: <input image | folder> missing');
await test();
} else if (!fs.existsSync(f) && !f.startsWith('http')) {
log.error(`File not found: ${process.argv[2]}`);
} else if (fs.existsSync(f)) {
const stat = fs.statSync(f);
if (stat.isDirectory()) {
const dir = fs.readdirSync(f);
for (const file of dir) {
await detect(path.join(f, file));
}
} else {
await detect(f);
}
} else {
await detect(f);
}
}
main();

View File

@ -0,0 +1,119 @@
/**
* Human demo for NodeJS
*
* Takes input and output folder names parameters and processes all images
* found in input folder and creates annotated images in output folder
*
* Requires [canvas](https://www.npmjs.com/package/canvas) to provide Canvas functionality in NodeJS environment
*/
const fs = require('fs');
const path = require('path');
const process = require('process');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
const canvas = require('canvas'); // eslint-disable-line node/no-unpublished-require
// for nodejs, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human
const tf = require('@tensorflow/tfjs-node-gpu'); // eslint-disable-line node/no-unpublished-require
const Human = require('../../dist/human.node-gpu.js'); // this is 'const Human = require('../dist/human.node-gpu.js').default;'
const config = { // just enable all and leave default settings
modelBasePath: 'file://models',
debug: true,
softwareKernels: true, // slower but enhanced precision since face rotation can work in software mode in nodejs environments
cacheSensitivity: 0.01,
face: { enabled: true, detector: { maxDetected: 100, minConfidence: 0.1 } },
object: { enabled: true, maxDetected: 100, minConfidence: 0.1 },
gesture: { enabled: true },
hand: { enabled: true, maxDetected: 100, minConfidence: 0.2 },
body: { enabled: true, maxDetected: 100, minConfidence: 0.1, modelPath: 'https://vladmandic.github.io/human-models/models/movenet-multipose.json' },
};
const poolSize = 4;
const human = new Human.Human(config); // create instance of human
async function saveFile(shape, buffer, result, outFile) {
return new Promise(async (resolve, reject) => { // eslint-disable-line no-async-promise-executor
const outputCanvas = new canvas.Canvas(shape[2], shape[1]); // create canvas
const outputCtx = outputCanvas.getContext('2d');
const inputImage = await canvas.loadImage(buffer); // load image using canvas library
outputCtx.drawImage(inputImage, 0, 0); // draw input image onto canvas
human.draw.all(outputCanvas, result); // use human build-in method to draw results as overlays on canvas
const outStream = fs.createWriteStream(outFile); // write canvas to new image file
outStream.on('finish', () => {
log.data('Output image:', outFile, outputCanvas.width, outputCanvas.height);
resolve();
});
outStream.on('error', (err) => {
log.error('Output error:', outFile, err);
reject();
});
const stream = outputCanvas.createJPEGStream({ quality: 0.5, progressive: true, chromaSubsampling: true });
stream.pipe(outStream);
});
}
async function processFile(image, inFile, outFile) {
const buffer = fs.readFileSync(inFile);
const tensor = tf.tidy(() => {
const decode = tf.node.decodeImage(buffer, 3);
const expand = tf.expandDims(decode, 0);
const cast = tf.cast(expand, 'float32');
return cast;
});
log.state('Loaded image:', inFile, tensor.shape);
const result = await human.detect(tensor);
human.tf.dispose(tensor);
log.data(`Detected: ${image}:`, 'Face:', result.face.length, 'Body:', result.body.length, 'Hand:', result.hand.length, 'Objects:', result.object.length, 'Gestures:', result.gesture.length);
if (outFile) await saveFile(tensor.shape, buffer, result, outFile);
}
async function main() {
log.header();
globalThis.Canvas = canvas.Canvas; // patch global namespace with canvas library
globalThis.ImageData = canvas.ImageData; // patch global namespace with canvas library
log.info('Human:', human.version, 'TF:', tf.version_core);
const configErrors = await human.validate();
if (configErrors.length > 0) log.error('Configuration errors:', configErrors);
await human.load(); // pre-load models
log.info('Loaded models:', human.models.loaded());
const inDir = process.argv[2];
const outDir = process.argv[3];
if (!inDir) {
log.error('Parameters: <input-directory> missing');
return;
}
if (inDir && (!fs.existsSync(inDir) || !fs.statSync(inDir).isDirectory())) {
log.error('Invalid input directory:', fs.existsSync(inDir) ?? fs.statSync(inDir).isDirectory());
return;
}
if (!outDir) {
log.info('Parameters: <output-directory> missing, images will not be saved');
}
if (outDir && (!fs.existsSync(outDir) || !fs.statSync(outDir).isDirectory())) {
log.error('Invalid output directory:', fs.existsSync(outDir) ?? fs.statSync(outDir).isDirectory());
return;
}
const dir = fs.readdirSync(inDir);
const images = dir.filter((f) => fs.statSync(path.join(inDir, f)).isFile() && (f.toLocaleLowerCase().endsWith('.jpg') || f.toLocaleLowerCase().endsWith('.jpeg')));
log.info(`Processing folder: ${inDir} entries:`, dir.length, 'images', images.length);
const t0 = performance.now();
const promises = [];
for (let i = 0; i < images.length; i++) {
const inFile = path.join(inDir, images[i]);
const outFile = outDir ? path.join(outDir, images[i]) : null;
promises.push(processFile(images[i], inFile, outFile));
if (i % poolSize === 0) await Promise.all(promises);
}
await Promise.all(promises);
const t1 = performance.now();
log.info(`Processed ${images.length} images in ${Math.round(t1 - t0)} ms`);
}
main();

36
demo/offline.html Normal file
View File

@ -0,0 +1,36 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Human: Offline</title>
<meta name="viewport" content="width=device-width, shrink-to-fit=yes">
<meta name="mobile-web-app-capable" content="yes">
<meta name="application-name" content="Human">
<meta name="keywords" content="Human">
<meta name="description" content="Human; Author: Vladimir Mandic <mandic00@live.com>">
<meta name="msapplication-tooltip" content="Human; Author: Vladimir Mandic <mandic00@live.com>">
<meta name="theme-color" content="#000000">
<link rel="manifest" href="manifest.webmanifest">
<link rel="shortcut icon" href="/favicon.ico" type="image/x-icon">
<link rel="icon" sizes="256x256" href="../assets/icon.png">
<link rel="apple-touch-icon" href="../assets/icon.png">
<link rel="apple-touch-startup-image" href="../assets/icon.png">
<style>
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../assets/lato-light.woff2') }
body { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; background: black; color: #ebebeb; }
h1 { font-size: 2rem; margin-top: 1.2rem; font-weight: bold; }
a { color: white; }
a:link { color: lightblue; text-decoration: none; }
a:hover { color: lightskyblue; text-decoration: none; }
.row { width: 90vw; margin: auto; margin-top: 100px; text-align: center; }
</style>
</head>
<body>
<div class="row text-center">
<h1>
<a href="/">Human: Offline</a><br>
<img alt="icon" src="../assets/icon.png">
</h1>
</div>
</body>
</html>

View File

@ -0,0 +1,61 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Human Demo</title>
<meta name="viewport" content="width=device-width, shrink-to-fit=yes">
<meta name="mobile-web-app-capable" content="yes">
<meta name="application-name" content="Human Demo">
<meta name="keywords" content="Human Demo">
<meta name="description" content="Human Demo; Author: Vladimir Mandic <mandic00@live.com>">
<link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../favicon.ico" type="image/x-icon">
<link rel="icon" sizes="256x256" href="../assets/icons/dash-256.png">
<link rel="apple-touch-icon" href="../assets/icons/dash-256.png">
<link rel="apple-touch-startup-image" href="../assets/icons/dash-256.png">
<style>
@font-face { font-family: 'CenturyGothic'; font-display: swap; font-style: normal; font-weight: 400; src: local('CenturyGothic'), url('../assets/century-gothic.ttf') format('truetype'); }
html { font-size: 18px; }
body { font-size: 1rem; font-family: "CenturyGothic", "Segoe UI", sans-serif; font-variant: small-caps; width: -webkit-fill-available; height: 100%; background: black; color: white; overflow: hidden; margin: 0; }
select { font-size: 1rem; font-family: "CenturyGothic", "Segoe UI", sans-serif; font-variant: small-caps; background: gray; color: white; border: none; }
</style>
<script src="../segmentation/index.js" type="module"></script>
</head>
<body>
<noscript><h1>javascript is required</h1></noscript>
<nav>
<div id="nav" class="nav"></div>
</nav>
<header>
<div id="header" class="header" style="position: fixed; top: 0; right: 0; padding: 4px; margin: 16px; background: rgba(0, 0, 0, 0.5); z-index: 10; line-height: 2rem;">
<label for="mode">mode</label>
<select id="mode" name="mode">
<option value="default">remove background</option>
<option value="alpha">draw alpha channel</option>
<option value="foreground">full foreground</option>
<option value="state">recurrent state</option>
</select><br>
<label for="composite">composite</label>
<select id="composite" name="composite"></select><br>
<label for="ratio">downsample ratio</label>
<input type="range" name="ratio" id="ratio" min="0.1" max="1" value="0.5" step="0.05">
<div id="fps" style="margin-top: 8px"></div>
</div>
</header>
<main>
<div id="main" class="main">
<video id="webcam" style="position: fixed; top: 0; left: 0; width: 50vw; height: 50vh"></video>
<img id="background" alt="background" style="position: fixed; top: 0; right: 0; width: 50vw; height: 50vh" controls></img>
<canvas id="output" style="position: fixed; bottom: 0; left: 0; height: 50vh"></canvas>
<canvas id="merge" style="position: fixed; bottom: 0; right: 0; height: 50vh"></canvas>
</div>
</main>
<footer>
<div id="footer" class="footer"></div>
</footer>
<aside>
<div id="aside" class="aside"></div>
</aside>
</body>
</html>

View File

@ -0,0 +1,99 @@
/**
* Human demo for browsers
* @default Human Library
* @summary <https://github.com/vladmandic/human>
* @author <https://github.com/vladmandic>
* @copyright <https://github.com/vladmandic>
* @license MIT
*/
import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
const humanConfig = { // user configuration for human, used to fine-tune behavior
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
filter: { enabled: true, equalization: false, flip: false },
face: { enabled: false },
body: { enabled: false },
hand: { enabled: false },
object: { enabled: false },
gesture: { enabled: false },
segmentation: {
enabled: true,
modelPath: 'rvm.json', // can use rvm, selfie or meet
ratio: 0.5,
mode: 'default',
},
};
const backgroundImage = '../../samples/in/background.jpg';
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
const log = (...msg) => console.log(...msg); // eslint-disable-line no-console
async function main() {
// gather dom elements
const dom = {
background: document.getElementById('background'),
webcam: document.getElementById('webcam'),
output: document.getElementById('output'),
merge: document.getElementById('merge'),
mode: document.getElementById('mode'),
composite: document.getElementById('composite'),
ratio: document.getElementById('ratio'),
fps: document.getElementById('fps'),
};
// set defaults
dom.fps.innerText = 'initializing';
dom.ratio.valueAsNumber = human.config.segmentation.ratio;
dom.background.src = backgroundImage;
dom.composite.innerHTML = ['source-atop', 'color', 'color-burn', 'color-dodge', 'copy', 'darken', 'destination-atop', 'destination-in', 'destination-out', 'destination-over', 'difference', 'exclusion', 'hard-light', 'hue', 'lighten', 'lighter', 'luminosity', 'multiply', 'overlay', 'saturation', 'screen', 'soft-light', 'source-in', 'source-out', 'source-over', 'xor'].map((gco) => `<option value="${gco}">${gco}</option>`).join(''); // eslint-disable-line max-len
const ctxMerge = dom.merge.getContext('2d');
log('human version:', human.version, '| tfjs version:', human.tf.version['tfjs-core']);
log('platform:', human.env.platform, '| agent:', human.env.agent);
await human.load(); // preload all models
log('backend:', human.tf.getBackend(), '| available:', human.env.backends);
log('models stats:', human.models.stats());
log('models loaded:', human.models.loaded());
await human.warmup(); // warmup function to initialize backend for future faster detection
const numTensors = human.tf.engine().state.numTensors;
// initialize webcam
dom.webcam.onplay = () => { // start processing on video play
log('start processing');
dom.output.width = human.webcam.width;
dom.output.height = human.webcam.height;
dom.merge.width = human.webcam.width;
dom.merge.height = human.webcam.height;
loop(); // eslint-disable-line no-use-before-define
};
await human.webcam.start({ element: dom.webcam, crop: true, width: window.innerWidth / 2, height: window.innerHeight / 2 }); // use human webcam helper methods and associate webcam stream with a dom element
if (!human.webcam.track) dom.fps.innerText = 'webcam error';
// processing loop
async function loop() {
if (!human.webcam.element || human.webcam.paused) return; // check if webcam is valid and playing
human.config.segmentation.mode = dom.mode.value; // get segmentation mode from ui
human.config.segmentation.ratio = dom.ratio.valueAsNumber; // get segmentation downsample ratio from ui
const t0 = Date.now();
const rgba = await human.segmentation(human.webcam.element, human.config); // run model and process results
const t1 = Date.now();
if (!rgba) {
dom.fps.innerText = 'error';
return;
}
dom.fps.innerText = `fps: ${Math.round(10000 / (t1 - t0)) / 10}`; // mark performance
human.draw.tensor(rgba, dom.output); // draw raw output
human.tf.dispose(rgba); // dispose tensors
ctxMerge.globalCompositeOperation = 'source-over';
ctxMerge.drawImage(dom.background, 0, 0); // draw original video to first stacked canvas
ctxMerge.globalCompositeOperation = dom.composite.value;
ctxMerge.drawImage(dom.output, 0, 0); // draw processed output to second stacked canvas
if (numTensors !== human.tf.engine().state.numTensors) log({ leak: human.tf.engine().state.numTensors - numTensors }); // check for memory leaks
requestAnimationFrame(loop);
}
}
window.onload = main;

28
demo/tracker/README.md Normal file
View File

@ -0,0 +1,28 @@
## Tracker
### Based on
<https://github.com/opendatacam/node-moving-things-tracker>
### Build
- remove reference to `lodash`:
> `isEqual` in <tracker.js>
- replace external lib:
> curl https://raw.githubusercontent.com/ubilabs/kd-tree-javascript/master/kdTree.js -o lib/kdTree-min.js
- build with `esbuild`:
> node_modules/.bin/esbuild --bundle tracker.js --format=esm --platform=browser --target=esnext --keep-names --tree-shaking=false --analyze --outfile=/home/vlado/dev/human/demo/tracker/tracker.js --banner:js="/* eslint-disable */"
### Usage
computeDistance(item1, item2)
disableKeepInMemory()
enableKeepInMemory()
getAllTrackedItems()
getJSONDebugOfTrackedItems(roundInt = true)
getJSONOfAllTrackedItems()
getJSONOfTrackedItems(roundInt = true)
getTrackedItemsInMOTFormat(frameNb)
reset()
setParams(newParams)
updateTrackedItemsWithNewFrame(detectionsOfThisFrame, frameNb)

65
demo/tracker/index.html Normal file
View File

@ -0,0 +1,65 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Human</title>
<meta name="viewport" content="width=device-width" id="viewport">
<meta name="keywords" content="Human">
<meta name="application-name" content="Human">
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="theme-color" content="#000000">
<link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../../assets/icon.png">
<script src="./index.js" type="module"></script>
<style>
html { font-family: 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
body { margin: 0; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
body::-webkit-scrollbar { display: none; }
input[type="file"] { font-family: 'Segoe UI'; font-size: 14px; font-variant: small-caps; }
::-webkit-file-upload-button { background: #333333; color: white; border: 0; border-radius: 0; padding: 6px 16px; box-shadow: 4px 4px 4px #222222; font-family: 'Segoe UI'; font-size: 14px; font-variant: small-caps; }
</style>
</head>
<body>
<div style="display: flex">
<video id="video" playsinline style="width: 25vw" controls controlslist="nofullscreen nodownload noremoteplayback" disablepictureinpicture loop></video>
<canvas id="canvas" style="width: 75vw"></canvas>
</div>
<div class="uploader" style="padding: 8px">
<input type="file" name="inputvideo" id="inputvideo" accept="video/*"></input>
<input type="checkbox" id="interpolation" name="interpolation"></input>
<label for="tracker">interpolation</label>
</div>
<form id="config" style="padding: 8px; line-height: 1.6rem;">
tracker |
<input type="checkbox" id="tracker" name="tracker" checked></input>
<label for="tracker">enabled</label> |
<input type="checkbox" id="keepInMemory" name="keepInMemory"></input>
<label for="keepInMemory">keepInMemory</label> |
<br>
tracker source |
<input type="radio" id="box-face" name="box" value="face" checked>
<label for="box-face">face</label> |
<input type="radio" id="box-body" name="box" value="body">
<label for="box-face">body</label> |
<input type="radio" id="box-object" name="box" value="object">
<label for="box-face">object</label> |
<br>
tracker config |
<input type="range" id="unMatchedFramesTolerance" name="unMatchedFramesTolerance" min="0" max="300" step="1", value="60"></input>
<label for="unMatchedFramesTolerance">unMatchedFramesTolerance</label> |
<input type="range" id="iouLimit" name="unMatchedFramesTolerance" min="0" max="1" step="0.01", value="0.1"></input>
<label for="iouLimit">iouLimit</label> |
<input type="range" id="distanceLimit" name="unMatchedFramesTolerance" min="0" max="1" step="0.01", value="0.1"></input>
<label for="distanceLimit">distanceLimit</label> |
<input type="radio" id="matchingAlgorithm-kdTree" name="matchingAlgorithm" value="kdTree" checked>
<label for="matchingAlgorithm-kdTree">kdTree</label> |
<input type="radio" id="matchingAlgorithm-munkres" name="matchingAlgorithm" value="munkres">
<label for="matchingAlgorithm-kdTree">munkres</label> |
</form>
<pre id="status" style="position: absolute; top: 12px; right: 20px; background-color: grey; padding: 8px; box-shadow: 2px 2px black"></pre>
<pre id="log" style="padding: 8px"></pre>
<div id="performance" style="position: absolute; bottom: 0; width: 100%; padding: 8px; font-size: 0.8rem;"></div>
</body>
</html>

10
demo/tracker/index.js Normal file

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

208
demo/tracker/index.ts Normal file
View File

@ -0,0 +1,208 @@
/**
* Human demo for browsers
* @default Human Library
* @summary <https://github.com/vladmandic/human>
* @author <https://github.com/vladmandic>
* @copyright <https://github.com/vladmandic>
* @license MIT
*/
import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
import tracker from './tracker.js';
const humanConfig: Partial<H.Config> = { // user configuration for human, used to fine-tune behavior
debug: true,
backend: 'webgl',
// cacheSensitivity: 0,
// cacheModels: false,
// warmup: 'none',
modelBasePath: 'https://vladmandic.github.io/human-models/models',
filter: { enabled: true, equalization: false, flip: false },
face: {
enabled: true,
detector: { rotation: false, maxDetected: 10, minConfidence: 0.3 },
mesh: { enabled: true },
attention: { enabled: false },
iris: { enabled: false },
description: { enabled: false },
emotion: { enabled: false },
antispoof: { enabled: false },
liveness: { enabled: false },
},
body: { enabled: false, maxDetected: 6, modelPath: 'movenet-multipose.json' },
hand: { enabled: false },
object: { enabled: false, maxDetected: 10 },
segmentation: { enabled: false },
gesture: { enabled: false },
};
interface TrackerConfig {
unMatchedFramesTolerance: number, // number of frame when an object is not matched before considering it gone; ignored if fastDelete is set
iouLimit: number, // exclude things from beeing matched if their IOU less than; 1 means total overlap; 0 means no overlap
fastDelete: boolean, // remove new objects immediately if they could not be matched in the next frames; if set, ignores unMatchedFramesTolerance
distanceLimit: number, // distance limit for matching; if values need to be excluded from matching set their distance to something greater than the distance limit
matchingAlgorithm: 'kdTree' | 'munkres', // algorithm used to match tracks with new detections
}
interface TrackerResult {
id: number,
confidence: number,
bearing: number,
isZombie: boolean,
name: string,
x: number,
y: number,
w: number,
h: number,
}
const trackerConfig: TrackerConfig = {
unMatchedFramesTolerance: 100,
iouLimit: 0.05,
fastDelete: false,
distanceLimit: 1e4,
matchingAlgorithm: 'kdTree',
};
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
const dom = { // grab instances of dom objects so we dont have to look them up later
video: document.getElementById('video') as HTMLVideoElement,
canvas: document.getElementById('canvas') as HTMLCanvasElement,
log: document.getElementById('log') as HTMLPreElement,
fps: document.getElementById('status') as HTMLPreElement,
tracker: document.getElementById('tracker') as HTMLInputElement,
interpolation: document.getElementById('interpolation') as HTMLInputElement,
config: document.getElementById('config') as HTMLFormElement,
ctx: (document.getElementById('canvas') as HTMLCanvasElement).getContext('2d') as CanvasRenderingContext2D,
};
const timestamp = { detect: 0, draw: 0, tensors: 0, start: 0 }; // holds information used to calculate performance and possible memory leaks
const fps = { detectFPS: 0, drawFPS: 0, frames: 0, averageMs: 0 }; // holds calculated fps information for both detect and screen refresh
const log = (...msg) => { // helper method to output messages
dom.log.innerText += msg.join(' ') + '\n';
console.log(...msg); // eslint-disable-line no-console
};
const status = (msg) => dom.fps.innerText = msg; // print status element
async function detectionLoop() { // main detection loop
if (!dom.video.paused && dom.video.readyState >= 2) {
if (timestamp.start === 0) timestamp.start = human.now();
// log('profiling data:', await human.profile(dom.video));
await human.detect(dom.video, humanConfig); // actual detection; were not capturing output in a local variable as it can also be reached via human.result
const tensors = human.tf.memory().numTensors; // check current tensor usage for memory leaks
if (tensors - timestamp.tensors !== 0) log('allocated tensors:', tensors - timestamp.tensors); // printed on start and each time there is a tensor leak
timestamp.tensors = tensors;
fps.detectFPS = Math.round(1000 * 1000 / (human.now() - timestamp.detect)) / 1000;
fps.frames++;
fps.averageMs = Math.round(1000 * (human.now() - timestamp.start) / fps.frames) / 1000;
}
timestamp.detect = human.now();
requestAnimationFrame(detectionLoop); // start new frame immediately
}
function drawLoop() { // main screen refresh loop
if (!dom.video.paused && dom.video.readyState >= 2) {
const res: H.Result = dom.interpolation.checked ? human.next(human.result) : human.result; // interpolate results if enabled
let tracking: H.FaceResult[] | H.BodyResult[] | H.ObjectResult[] = [];
if (human.config.face.enabled) tracking = res.face;
else if (human.config.body.enabled) tracking = res.body;
else if (human.config.object.enabled) tracking = res.object;
else log('unknown object type');
let data: TrackerResult[] = [];
if (dom.tracker.checked) {
const items = tracking.map((obj) => ({
x: obj.box[0] + obj.box[2] / 2,
y: obj.box[1] + obj.box[3] / 2,
w: obj.box[2],
h: obj.box[3],
name: obj.label || (human.config.face.enabled ? 'face' : 'body'),
confidence: obj.score,
}));
tracker.updateTrackedItemsWithNewFrame(items, fps.frames);
data = tracker.getJSONOfTrackedItems(true) as TrackerResult[];
}
human.draw.canvas(dom.video, dom.canvas); // copy input video frame to output canvas
for (let i = 0; i < tracking.length; i++) {
// @ts-ignore
const name = tracking[i].label || (human.config.face.enabled ? 'face' : 'body');
dom.ctx.strokeRect(tracking[i].box[0], tracking[i].box[1], tracking[i].box[1], tracking[i].box[2]);
dom.ctx.fillText(`id: ${tracking[i].id} ${Math.round(100 * tracking[i].score)}% ${name}`, tracking[i].box[0] + 4, tracking[i].box[1] + 16);
if (data[i]) {
dom.ctx.fillText(`t: ${data[i].id} ${Math.round(100 * data[i].confidence)}% ${data[i].name} ${data[i].isZombie ? 'zombie' : ''}`, tracking[i].box[0] + 4, tracking[i].box[1] + 34);
}
}
}
const now = human.now();
fps.drawFPS = Math.round(1000 * 1000 / (now - timestamp.draw)) / 1000;
timestamp.draw = now;
status(dom.video.paused ? 'paused' : `fps: ${fps.detectFPS.toFixed(1).padStart(5, ' ')} detect | ${fps.drawFPS.toFixed(1).padStart(5, ' ')} draw`); // write status
setTimeout(drawLoop, 30); // use to slow down refresh from max refresh rate to target of 30 fps
}
async function handleVideo(file: File) {
const url = URL.createObjectURL(file);
dom.video.src = url;
await dom.video.play();
log('loaded video:', file.name, 'resolution:', [dom.video.videoWidth, dom.video.videoHeight], 'duration:', dom.video.duration);
dom.canvas.width = dom.video.videoWidth;
dom.canvas.height = dom.video.videoHeight;
dom.ctx.strokeStyle = 'white';
dom.ctx.fillStyle = 'white';
dom.ctx.font = '16px Segoe UI';
dom.video.playbackRate = 0.25;
}
function initInput() {
document.body.addEventListener('dragenter', (evt) => evt.preventDefault());
document.body.addEventListener('dragleave', (evt) => evt.preventDefault());
document.body.addEventListener('dragover', (evt) => evt.preventDefault());
document.body.addEventListener('drop', async (evt) => {
evt.preventDefault();
if (evt.dataTransfer) evt.dataTransfer.dropEffect = 'copy';
const file = evt.dataTransfer?.files?.[0];
if (file) await handleVideo(file);
log(dom.video.readyState);
});
(document.getElementById('inputvideo') as HTMLInputElement).onchange = async (evt) => {
evt.preventDefault();
const file = evt.target?.['files']?.[0];
if (file) await handleVideo(file);
};
dom.config.onchange = () => {
trackerConfig.distanceLimit = (document.getElementById('distanceLimit') as HTMLInputElement).valueAsNumber;
trackerConfig.iouLimit = (document.getElementById('iouLimit') as HTMLInputElement).valueAsNumber;
trackerConfig.unMatchedFramesTolerance = (document.getElementById('unMatchedFramesTolerance') as HTMLInputElement).valueAsNumber;
trackerConfig.unMatchedFramesTolerance = (document.getElementById('unMatchedFramesTolerance') as HTMLInputElement).valueAsNumber;
trackerConfig.matchingAlgorithm = (document.getElementById('matchingAlgorithm-kdTree') as HTMLInputElement).checked ? 'kdTree' : 'munkres';
tracker.setParams(trackerConfig);
if ((document.getElementById('keepInMemory') as HTMLInputElement).checked) tracker.enableKeepInMemory();
else tracker.disableKeepInMemory();
tracker.reset();
log('tracker config change', JSON.stringify(trackerConfig));
humanConfig.face!.enabled = (document.getElementById('box-face') as HTMLInputElement).checked; // eslint-disable-line @typescript-eslint/no-non-null-assertion
humanConfig.body!.enabled = (document.getElementById('box-body') as HTMLInputElement).checked; // eslint-disable-line @typescript-eslint/no-non-null-assertion
humanConfig.object!.enabled = (document.getElementById('box-object') as HTMLInputElement).checked; // eslint-disable-line @typescript-eslint/no-non-null-assertion
};
dom.tracker.onchange = (evt) => {
log('tracker', (evt.target as HTMLInputElement).checked ? 'enabled' : 'disabled');
tracker.setParams(trackerConfig);
tracker.reset();
};
}
async function main() { // main entry point
log('human version:', human.version, '| tfjs version:', human.tf.version['tfjs-core']);
log('platform:', human.env.platform, '| agent:', human.env.agent);
status('loading...');
await human.load(); // preload all models
log('backend:', human.tf.getBackend(), '| available:', human.env.backends);
log('models loaded:', human.models.loaded());
status('initializing...');
await human.warmup(); // warmup function to initialize backend for future faster detection
initInput(); // initialize input
await detectionLoop(); // start detection loop
drawLoop(); // start draw loop
}
window.onload = main;

Some files were not shown because too many files have changed in this diff Show More