Compare commits

...

No commits in common. "main" and "2.2.1" have entirely different histories.
main ... 2.2.1

799 changed files with 214577 additions and 93772 deletions

View File

@ -1,27 +0,0 @@
{
"$schema": "https://developer.microsoft.com/json-schemas/api-extractor/v7/api-extractor.schema.json",
"mainEntryPointFilePath": "types/lib/src/human.d.ts",
"compiler": {
"skipLibCheck": true
},
"newlineKind": "lf",
"dtsRollup": {
"enabled": true,
"untrimmedFilePath": "types/human.d.ts"
},
"docModel": { "enabled": false },
"tsdocMetadata": { "enabled": false },
"apiReport": { "enabled": false },
"messages": {
"compilerMessageReporting": {
"default": { "logLevel": "warning" }
},
"extractorMessageReporting": {
"default": { "logLevel": "warning" },
"ae-missing-release-tag": { "logLevel": "none" }
},
"tsdocMessageReporting": {
"default": { "logLevel": "warning" }
}
}
}

View File

@ -1,221 +1,83 @@
{
"globals": {
},
"rules": {
"@typescript-eslint/no-require-imports":"off"
},
"overrides": [
{
"files": ["**/*.ts"],
"parser": "@typescript-eslint/parser",
"parserOptions": { "ecmaVersion": "latest", "project": ["./tsconfig.json"] },
"plugins": ["@typescript-eslint"],
"env": {
"browser": true,
"commonjs": false,
"node": false,
"es2021": true
},
"extends": [
"airbnb-base",
"eslint:recommended",
"plugin:@typescript-eslint/eslint-recommended",
"plugin:@typescript-eslint/recommended",
"plugin:@typescript-eslint/recommended-requiring-type-checking",
"plugin:@typescript-eslint/strict",
"plugin:import/recommended",
"plugin:promise/recommended"
],
"rules": {
"@typescript-eslint/ban-ts-comment":"off",
"@typescript-eslint/dot-notation":"off",
"@typescript-eslint/no-empty-interface":"off",
"@typescript-eslint/no-inferrable-types":"off",
"@typescript-eslint/no-misused-promises":"off",
"@typescript-eslint/no-unnecessary-condition":"off",
"@typescript-eslint/no-unsafe-argument":"off",
"@typescript-eslint/no-unsafe-assignment":"off",
"@typescript-eslint/no-unsafe-call":"off",
"@typescript-eslint/no-unsafe-member-access":"off",
"@typescript-eslint/no-unsafe-return":"off",
"@typescript-eslint/no-require-imports":"off",
"@typescript-eslint/no-empty-object-type":"off",
"@typescript-eslint/non-nullable-type-assertion-style":"off",
"@typescript-eslint/prefer-for-of":"off",
"@typescript-eslint/prefer-nullish-coalescing":"off",
"@typescript-eslint/prefer-ts-expect-error":"off",
"@typescript-eslint/restrict-plus-operands":"off",
"@typescript-eslint/restrict-template-expressions":"off",
"dot-notation":"off",
"guard-for-in":"off",
"import/extensions": ["off", "always"],
"import/no-unresolved":"off",
"import/prefer-default-export":"off",
"lines-between-class-members":"off",
"max-len": [1, 275, 3],
"no-async-promise-executor":"off",
"no-await-in-loop":"off",
"no-bitwise":"off",
"no-continue":"off",
"no-lonely-if":"off",
"no-mixed-operators":"off",
"no-param-reassign":"off",
"no-plusplus":"off",
"no-regex-spaces":"off",
"no-restricted-syntax":"off",
"no-return-assign":"off",
"no-void":"off",
"object-curly-newline":"off",
"prefer-destructuring":"off",
"prefer-template":"off",
"radix":"off"
}
},
{
"files": ["**/*.d.ts"],
"parser": "@typescript-eslint/parser",
"parserOptions": { "ecmaVersion": "latest", "project": ["./tsconfig.json"] },
"plugins": ["@typescript-eslint"],
"env": {
"browser": true,
"commonjs": false,
"node": false,
"es2021": true
},
"extends": [
"airbnb-base",
"eslint:recommended",
"plugin:@typescript-eslint/eslint-recommended",
"plugin:@typescript-eslint/recommended",
"plugin:@typescript-eslint/recommended-requiring-type-checking",
"plugin:@typescript-eslint/strict",
"plugin:import/recommended",
"plugin:promise/recommended"
],
"rules": {
"@typescript-eslint/array-type":"off",
"@typescript-eslint/ban-types":"off",
"@typescript-eslint/consistent-indexed-object-style":"off",
"@typescript-eslint/consistent-type-definitions":"off",
"@typescript-eslint/no-empty-interface":"off",
"@typescript-eslint/no-explicit-any":"off",
"@typescript-eslint/no-invalid-void-type":"off",
"@typescript-eslint/no-unnecessary-type-arguments":"off",
"@typescript-eslint/no-unnecessary-type-constraint":"off",
"comma-dangle":"off",
"indent":"off",
"lines-between-class-members":"off",
"max-classes-per-file":"off",
"max-len":"off",
"no-multiple-empty-lines":"off",
"no-shadow":"off",
"no-use-before-define":"off",
"quotes":"off",
"semi":"off"
}
},
{
"files": ["**/*.js"],
"parserOptions": { "sourceType": "module", "ecmaVersion": "latest" },
"plugins": [],
"globals": {},
"env": {
"browser": true,
"commonjs": true,
"node": true,
"es2021": true
"es2020": true
},
"parser": "@typescript-eslint/parser",
"parserOptions": {
"ecmaVersion": 2020
},
"plugins": [
"@typescript-eslint"
],
"extends": [
"airbnb-base",
"eslint:recommended",
"plugin:@typescript-eslint/eslint-recommended",
"plugin:@typescript-eslint/recommended",
"plugin:import/errors",
"plugin:import/warnings",
"plugin:json/recommended-with-comments",
"plugin:node/recommended",
"plugin:promise/recommended"
],
"rules": {
"dot-notation":"off",
"import/extensions": ["error", "always"],
"import/no-extraneous-dependencies":"off",
"max-len": [1, 275, 3],
"no-await-in-loop":"off",
"no-bitwise":"off",
"no-continue":"off",
"no-mixed-operators":"off",
"no-param-reassign":"off",
"no-plusplus":"off",
"no-regex-spaces":"off",
"no-restricted-syntax":"off",
"no-return-assign":"off",
"node/no-unsupported-features/es-syntax":"off",
"object-curly-newline":"off",
"prefer-destructuring":"off",
"prefer-template":"off",
"radix":"off"
}
},
{
"files": ["**/*.json"],
"parserOptions": { "ecmaVersion": "latest" },
"plugins": ["json"],
"env": {
"browser": false,
"commonjs": false,
"node": false,
"es2021": false
},
"extends": []
},
{
"files": ["**/*.html"],
"parserOptions": { "sourceType": "module", "ecmaVersion": "latest" },
"parser": "@html-eslint/parser",
"plugins": ["html", "@html-eslint"],
"env": {
"browser": true,
"commonjs": false,
"node": false,
"es2021": false
},
"extends": ["plugin:@html-eslint/recommended"],
"rules": {
"@html-eslint/element-newline":"off",
"@html-eslint/attrs-newline":"off",
"@html-eslint/indent": ["error", 2]
}
},
{
"files": ["**/*.md"],
"plugins": ["markdown"],
"processor": "markdown/markdown",
"rules": {
"no-undef":"off"
}
},
{
"files": ["**/*.md/*.js"],
"rules": {
"@typescript-eslint/no-unused-vars":"off",
"@typescript-eslint/triple-slash-reference":"off",
"import/newline-after-import":"off",
"import/no-unresolved":"off",
"no-console":"off",
"no-global-assign":"off",
"no-multi-spaces":"off",
"no-restricted-globals":"off",
"no-undef":"off",
"no-unused-vars":"off",
"node/no-missing-import":"off",
"node/no-missing-require":"off",
"promise/catch-or-return":"off"
}
}
],
"ignorePatterns": [
"node_modules",
"assets",
"demo/helpers",
"dist",
"demo/helpers/*.js",
"demo/typescript/*.js",
"demo/faceid/*.js",
"demo/tracker/*.js",
"typedoc"
]
"media",
"models",
"node_modules"
],
"rules": {
"@typescript-eslint/ban-ts-comment": "off",
"@typescript-eslint/explicit-module-boundary-types": "off",
"@typescript-eslint/no-shadow": "error",
"@typescript-eslint/no-var-requires": "off",
"@typescript-eslint/triple-slash-reference": "off",
"camelcase": "off",
"dot-notation": "off",
"func-names": "off",
"guard-for-in": "off",
"import/extensions": "off",
"import/no-extraneous-dependencies": "off",
"import/no-named-as-default": "off",
"import/no-unresolved": "off",
"import/prefer-default-export": "off",
"lines-between-class-members": "off",
"max-len": [1, 275, 3],
"newline-per-chained-call": "off",
"no-async-promise-executor": "off",
"no-await-in-loop": "off",
"no-bitwise": "off",
"no-case-declarations":"off",
"no-continue": "off",
"no-lonely-if": "off",
"no-loop-func": "off",
"no-mixed-operators": "off",
"no-param-reassign":"off",
"no-plusplus": "off",
"no-process-exit": "off",
"no-regex-spaces": "off",
"no-restricted-globals": "off",
"no-restricted-syntax": "off",
"no-return-assign": "off",
"no-shadow": "off",
"no-underscore-dangle": "off",
"node/no-missing-import": ["error", { "tryExtensions": [".js", ".json", ".ts"] }],
"node/no-unpublished-import": "off",
"node/no-unpublished-require": "off",
"node/no-unsupported-features/es-syntax": "off",
"node/shebang": "off",
"object-curly-newline": "off",
"prefer-destructuring": "off",
"prefer-template":"off",
"promise/always-return": "off",
"promise/catch-or-return": "off",
"promise/no-nesting": "off",
"radix": "off"
}
}

11
.github/FUNDING.yml vendored
View File

@ -1,11 +0,0 @@
github: [vladmandic]
patreon: # Replace with a single Patreon username
open_collective: # Replace with a single Open Collective username
ko_fi: # Replace with a single Ko-fi username
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']

10
.gitignore vendored
View File

@ -1,9 +1,3 @@
node_modules/
types/lib
node_modules
pnpm-lock.yaml
package-lock.json
*.swp
samples/**/*.mp4
samples/**/*.webm
temp
tmp
assets/tf*

View File

@ -3,11 +3,8 @@
"web-recommended"
],
"browserslist": [
"chrome >= 90",
"edge >= 90",
"firefox >= 100",
"android >= 90",
"safari >= 15"
"last 1 versions",
"not ie < 20"
],
"hints": {
"no-inline-styles": "off",

View File

@ -1,7 +1,6 @@
{
"MD012": false,
"MD013": false,
"MD029": false,
"MD033": false,
"MD036": false,
"MD041": false

View File

@ -4,4 +4,5 @@ samples
typedoc
test
wiki
types/lib
dist/tfjs.esm.js
dist/tfjs.esm.js.map

6
.npmrc
View File

@ -1,5 +1 @@
force=true
omit=dev
legacy-peer-deps=true
strict-peer-dependencies=false
node-options='--no-deprecation'
force = true

10
.vscode/settings.json vendored
View File

@ -1,10 +0,0 @@
{
"search.exclude": {
"dist/*": true,
"node_modules/*": true,
"types": true,
"typedoc": true,
},
"search.useGlobalIgnoreFiles": true,
"search.useParentIgnoreFiles": true
}

View File

@ -1,432 +1,19 @@
# @vladmandic/human
# packageJson
Version: **3.3.5**
Description: **Human: AI-powered 3D Face Detection & Rotation Tracking, Face Description & Recognition, Body Pose Tracking, 3D Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction, Gesture Recognition**
Version: **undefined**
Description: **undefined**
Author: **Vladimir Mandic <mandic00@live.com>**
License: **MIT**
Author: **undefined**
License: **undefined**
Repository: **<https://github.com/vladmandic/human>**
## Changelog
### **3.3.5** 2025/02/05 mandic00@live.com
### **origin/main** 2024/10/24 mandic00@live.com
- add human.draw.tensor method
### **3.3.4** 2024/10/24 mandic00@live.com
### **3.3.3** 2024/10/14 mandic00@live.com
- add loaded property to model stats and mark models not loaded correctly.
- release build
### **3.3.2** 2024/09/11 mandic00@live.com
- full rebuild
### **3.3.1** 2024/09/11 mandic00@live.com
- add config.face.detector.square option
- human 3.3 alpha test run
- human 3.3 alpha with new build environment
- release rebuild
- fix flazeface tensor scale and update build platform
### **3.2.2** 2024/04/17 mandic00@live.com
### **release: 3.2.1** 2024/02/15 mandic00@live.com
### **3.2.1** 2024/02/15 mandic00@live.com
### **3.2.0** 2023/12/06 mandic00@live.com
- set browser false when navigator object is empty
- https://github.com/vladmandic/human/issues/402
### **release: 3.1.2** 2023/09/18 mandic00@live.com
- full rebuild
### **3.1.2** 2023/09/18 mandic00@live.com
- major toolkit upgrade
- full rebuild
- major toolkit upgrade
### **3.1.1** 2023/08/05 mandic00@live.com
- fixes plus tfjs upgrade for new release
### **3.0.7** 2023/06/12 mandic00@live.com
- full rebuild
- fix memory leak in histogramequalization
- initial work on tracker
### **3.0.6** 2023/03/21 mandic00@live.com
- add optional crop to multiple models
- fix movenet-multipose
- add electron detection
- fix gender-ssrnet-imdb
- add movenet-multipose workaround
- rebuild and publish
- add face.detector.minsize configurable setting
- add affectnet
### **3.0.5** 2023/02/02 mandic00@live.com
- add gear-e models
- detect react-native
- redo blazeface annotations
### **3.0.4** 2023/01/29 mandic00@live.com
- make naviator calls safe
- fix facedetector-only configs
### **3.0.3** 2023/01/07 mandic00@live.com
- full rebuild
### **3.0.2** 2023/01/06 mandic00@live.com
- default face.rotation disabled
### **release: 3.0.1** 2022/11/22 mandic00@live.com
### **3.0.1** 2022/11/22 mandic00@live.com
- support dynamic loads
- polish demos
- add facedetect demo and fix model async load
- enforce markdown linting
- cleanup git history
- default empty result
- refactor draw and models namespaces
- refactor distance
- add basic anthropometry
- added webcam id specification
- include external typedefs
- prepare external typedefs
- rebuild all
- include project files for types
- architectural improvements
- refresh dependencies
- add named exports
- add draw label templates
- reduce dev dependencies
- tensor rank strong typechecks
- rebuild dependencies
### **2.11.1** 2022/10/09 mandic00@live.com
- add rvm segmentation model
- add human.webcam methods
- create funding.yml
- fix rotation interpolation
### **2.10.3** 2022/09/21 mandic00@live.com
- add human.video method
### **2.10.2** 2022/09/11 mandic00@live.com
- add node.js esm compatibility (#292)
- release
### **2.10.1** 2022/09/07 mandic00@live.com
- release candidate
- add config flags
- test update
- release preview
- optimize startup sequence
- reorder backend init code
- test embedding
- embedding test
- add browser iife tests
- minor bug fixes and increased test coverage
- extend release tests
- add model load exception handling
- add softwarekernels config option
- expand type safety
- full eslint rule rewrite
### **2.9.4** 2022/08/20 mandic00@live.com
- add browser test
- add tensorflow library detection
- fix wasm detection
- enumerate additional models
- release refresh
### **2.9.3** 2022/08/10 mandic00@live.com
- rehault testing framework
- release refresh
- add insightface
### **2.9.2** 2022/08/08 mandic00@live.com
- release rebuild
### **2.9.1** 2022/07/25 mandic00@live.com
- full rebuild
- release cleanup
- tflite experiments
- add load monitor test
- beta for upcoming major release
- swtich to release version of tfjs
- placeholder for face contours
- improve face compare in main demo
- add webview support
- fix(gear): ensure gear.modelpath is used for loadmodel()
- npm default install should be prod only
- fix npm v7 compatibility
- add getmodelstats method
- rebuild
- release build
### **2.8.1** 2022/06/08 mandic00@live.com
- webgpu and wasm optimizations
- add faceboxes prototype
- full rebuild
### **2.7.4** 2022/05/24 mandic00@live.com
### **2.7.3** 2022/05/24 mandic00@live.com
- add face.mesh.keepinvalid config flag
- initial work for new facemesh model
### **2.7.2** 2022/05/12 mandic00@live.com
- fix demo when used with video files
- major release
### **2.7.1** 2022/05/09 mandic00@live.com
- support 4k input
- add attention draw methods
- fix coloring function
- enable precompile as part of warmup
- prepare release beta
- change default face crop
- beta release 2.7
- refactor draw methods
- implement face attention model
- add electronjs demo
- rebuild
### **2.6.5** 2022/04/01 mandic00@live.com
- bundle offscreencanvas types
- prototype precompile pass
- fix changelog generation
- fix indexdb config check
### **2.6.4** 2022/02/27 mandic00@live.com
- fix types typo
- refresh
- add config option wasmplatformfetch
### **2.6.3** 2022/02/10 mandic00@live.com
- rebuild
### **2.6.2** 2022/02/07 mandic00@live.com
- release rebuild
### **2.6.1** 2022/01/20 mandic00@live.com
- implement model caching using indexdb
- prototype global fetch handler
- fix face box and hand tracking when in front of face
### **2.5.8** 2022/01/14 mandic00@live.com
- fix samples
- fix(src): typo
- change on how face box is calculated
### **2.5.7** 2021/12/27 mandic00@live.com
- fix posenet
- release refresh
### **2.5.6** 2021/12/15 mandic00@live.com
- strong type for string enums
- rebuild
- fix node detection in electron environment
### **2.5.5** 2021/12/01 mandic00@live.com
- added human-motion
- add offscreencanvas typedefs
- release preview
- fix face box scaling on detection
- cleanup
### **2.5.4** 2021/11/22 mandic00@live.com
- prototype blazepose detector
- minor fixes
- add body 3d interpolation
- edit blazepose keypoints
- new build process
### **2.5.3** 2021/11/18 mandic00@live.com
- create typedef rollup
- optimize centernet
- cache frequent tf constants
- add extra face rotation prior to mesh
- release 2.5.2
- improve error handling
### **2.5.2** 2021/11/14 mandic00@live.com
- fix mobilefacenet module
- fix gear and ssrnet modules
- fix for face crop when mesh is disabled
- implement optional face masking
- add similarity score range normalization
- add faceid demo
- documentation overhaul
- auto tensor shape and channels handling
- disable use of path2d in node
- add liveness module and facerecognition demo
- initial version of facerecognition demo
- rebuild
- add type defs when working with relative path imports
- disable humangl backend if webgl 1.0 is detected
- add additional hand gestures
### **2.5.1** 2021/11/08 mandic00@live.com
- new human.compare api
- added links to release notes
- new frame change detection algorithm
- add histogram equalization
- implement wasm missing ops
- performance and memory optimizations
- fix react compatibility issues
- improve box rescaling for all modules
- improve precision using wasm backend
- refactor predict with execute
- patch tfjs type defs
- start 2.5 major version
- build and docs cleanup
- fix firefox bug
### **2.4.3** 2021/10/28 mandic00@live.com
- additional human.performance counters
### **2.4.2** 2021/10/27 mandic00@live.com
- add ts demo
- switch from es2018 to es2020 for main build
- switch to custom tfjs for demos
- release 2.4
### **2.4.1** 2021/10/25 mandic00@live.com
- refactoring plus jsdoc comments
- increase face similarity match resolution
- time based caching
- turn on minification
- initial work on skiptime
- added generic types
- enhanced typing exports
- add optional autodetected custom wasm path
### **2.3.6** 2021/10/21 mandic00@live.com
- fix for human.draw labels and typedefs
- refactor human.env to a class type
- add human.custom.esm using custom tfjs build
### **2.3.5** 2021/10/19 mandic00@live.com
- removed direct usage of performance.now
### **2.3.4** 2021/10/19 mandic00@live.com
- minor blazepose optimizations
- compress samples
- remove posenet from default package
- enhanced movenet postprocessing
- use transferrable buffer for worker messages
- add optional anti-spoofing module
- add node-match advanced example using worker thread pool
- package updates
- optimize image preprocessing
- set webgpu optimized flags
- major precision improvements to movenet and handtrack
- image processing fixes
- redesign body and hand caching and interpolation
- demo default config cleanup
- improve gaze and face angle visualizations in draw
- release 2.3.1
### **2.3.1** 2021/10/06 mandic00@live.com
- workaround for chrome offscreencanvas bug
- fix backend conflict in webworker
- add blazepose v2 and add annotations to body results
- fix backend order initialization
- added docker notes
- breaking change: new similarity and match methods
- tweaked default values
- enable handtrack as default model
- redesign face processing
- refactoring
- define app specific types
- implement box caching for movenet
- autodetect number of bodies and hands
- upload new samples
- new samples gallery and major code folder restructure
- new release
### **2.2.3** 2021/09/24 mandic00@live.com
- optimize model loading
- support segmentation for nodejs
- redo segmentation and handtracking
- prototype handtracking
- automated browser tests
- support for dynamic backend switching
- initial automated browser tests
- enhanced automated test coverage
- more automated tests
- added configuration validation
- prevent validation failed on some model combinations
- webgl exception handling
### **2.2.2** 2021/09/17 mandic00@live.com
- experimental webgl status monitoring
- major release
### **2.2.1** 2021/09/16 mandic00@live.com
- add vr model demo
### **origin/main** 2021/09/16 mandic00@live.com
- all tests passing
- redefine draw helpers interface
- add simple webcam and webrtc demo
@ -451,6 +38,8 @@
- implement event emitters
- fix iife loader
- simplify dependencies
- fix file permissions
- remove old build server
- change build process
- add benchmark info
- simplify canvas handling in nodejs
@ -493,6 +82,7 @@
### **2.1.1** 2021/07/29 mandic00@live.com
- proposal #141
- add note on manually disping tensor
- modularize model loading
@ -506,7 +96,9 @@
- reorganize demos
- fix centernet box width & height
- add body segmentation sample
- add release notes
### **release: 2.0.1** 2021/06/08 mandic00@live.com
- release 2.0
### **2.0.1** 2021/06/08 mandic00@live.com
@ -535,6 +127,7 @@
- implemented human.next global interpolation method
- finished draw buffering and smoothing and enabled by default
- implemented service worker
- quantized centernet
- release candidate
- added usage restrictions
- quantize handdetect model
@ -568,6 +161,8 @@
### **1.9.1** 2021/05/21 mandic00@live.com
- caching improvements
- sanitize server input
- remove nanodet weights from default distribution
- add experimental mb3-centernet object detection
- individual model skipframes values still max high threshold for caching
- config.videooptimized has been removed and config.cachesensitivity has been added instead
@ -589,7 +184,9 @@
### **1.8.2** 2021/05/04 mandic00@live.com
- release 1.8 with major changes and tfjs 3.6.0
### **release 1.8 with major changes and tfjs 3.6.0** 2021/04/30 mandic00@live.com
### **1.8.1** 2021/04/30 mandic00@live.com
@ -623,6 +220,7 @@
- added filter.flip feature
- added demo load image from http
- mobile demo optimization and iris gestures
- full test run
- full rebuild
- new look
- added benchmarks
@ -732,6 +330,7 @@
- add experimental nanodet object detection
- full models signature
- cleanup
### **1.1.7** 2021/03/16 mandic00@live.com
@ -777,6 +376,7 @@
### **1.0.3** 2021/03/10 mandic00@live.com
- strong typing for public classes and hide private classes
- re-added blazeface-front
- enhanced age, gender, emotion detection
- full rebuild
@ -785,73 +385,151 @@
- remove blazeface-front, blazepose-upper, faceboxes
- remove blazeface-front and faceboxes
### **release: 1.0.1** 2021/03/09 mandic00@live.com
### **1.0.1** 2021/03/09 mandic00@live.com
- fix for face detector when mesh is disabled
- optimize for npm
- 0.40.9
### **0.40.9** 2021/03/08 mandic00@live.com
- fix performance issue when running with low confidence
- 0.40.8
- 0.40.7
### **0.40.8** 2021/03/08 mandic00@live.com
### **0.40.7** 2021/03/06 mandic00@live.com
- implemented 3d face angle calculations
- 0.40.6
### **0.40.6** 2021/03/06 mandic00@live.com
- add curve draw output
- 0.40.5
### **0.40.5** 2021/03/05 mandic00@live.com
- fix human.draw
- 0.40.4
### **0.40.4** 2021/03/05 mandic00@live.com
- cleanup blazepose code
- fix demo
- 0.40.3
- 0.40.2
### **0.40.3** 2021/03/05 mandic00@live.com
### **0.40.2** 2021/03/05 mandic00@live.com
- added blazepose-upper
- 0.40.1
### **0.40.1** 2021/03/04 mandic00@live.com
- implement blazepose and update demos
- add todo list
- 0.30.6
### **0.30.6** 2021/03/03 mandic00@live.com
- fine tuning age and face models
- 0.30.5
### **0.30.5** 2021/03/02 mandic00@live.com
- add debug logging flag
- 0.30.4
### **0.30.4** 2021/03/01 mandic00@live.com
- added skipinitial flag
- 0.30.3
### **0.30.3** 2021/02/28 mandic00@live.com
- typo
- 0.30.2
### **0.30.2** 2021/02/26 mandic00@live.com
- rebuild
- fix typo
- 0.30.1
- 0.20.11
- 0.20.10
- 0.20.9
- 0.20.8
- 0.20.7
- build fix
- 0.20.6
### **0.30.1** 2021/02/25 mandic00@live.com
### **0.20.11** 2021/02/24 mandic00@live.com
### **0.20.10** 2021/02/22 mandic00@live.com
### **0.20.9** 2021/02/21 mandic00@live.com
- remove extra items
- simmilarity fix
### **0.20.8** 2021/02/21 mandic00@live.com
- embedding fix
- 0.20.5
### **0.20.7** 2021/02/21 mandic00@live.com
- build fix
### **0.20.6** 2021/02/21 mandic00@live.com
- embedding fix
### **0.20.5** 2021/02/21 mandic00@live.com
- fix imagefx and add dev builds
### **0.20.4** 2021/02/19 mandic00@live.com
- 0.20.4
- 0.20.3
### **0.20.3** 2021/02/17 mandic00@live.com
- rebuild
- 0.20.2
### **0.20.2** 2021/02/13 mandic00@live.com
- merge branch 'main' of https://github.com/vladmandic/human into main
- create codeql-analysis.yml
- create security.md
- add templates
- 0.20.1
### **0.20.1** 2021/02/08 mandic00@live.com
- menu fixes
- convert to typescript
- 0.11.5
### **0.11.5** 2021/02/06 mandic00@live.com
- added faceboxes alternative model
- 0.11.4
- 0.11.3
- 0.11.2
### **0.11.4** 2021/02/06 mandic00@live.com
### **0.11.3** 2021/02/02 mandic00@live.com
### **0.11.2** 2021/01/30 mandic00@live.com
- added warmup for nodejs
- 0.11.1
- 0.10.2
- 0.10.1
### **update for tfjs 3.0.0** 2021/01/29 mandic00@live.com
### **0.11.1** 2021/01/29 mandic00@live.com
### **0.10.2** 2021/01/22 mandic00@live.com
### **0.10.1** 2021/01/20 mandic00@live.com
### **0.9.26** 2021/01/18 mandic00@live.com
- fix face detection when mesh is disabled
- added minification notes
- version bump
### **0.9.25** 2021/01/13 mandic00@live.com
@ -913,6 +591,7 @@
- conditional hand rotation
- staggered skipframes
- fix permissions
### **0.9.13** 2020/12/08 mandic00@live.com
@ -964,7 +643,9 @@
### **0.9.3** 2020/11/16 mandic00@live.com
- switched to minified build
- web worker fixes
### **release: 1.2** 2020/11/15 mandic00@live.com
- full rebuild
### **0.9.2** 2020/11/14 mandic00@live.com
@ -1021,6 +702,7 @@
- optimized model loader
- merge branch 'main' of https://github.com/vladmandic/human into main
- created wiki
- delete bug_report.md
- optimize font resizing
- fix nms sync call
@ -1044,6 +726,7 @@
- optimized camera and mobile layout
- fixed worker and filter compatibility
- removed test code
### **0.7.2** 2020/11/04 mandic00@live.com
@ -1120,6 +803,7 @@
### **0.4.8** 2020/10/28 mandic00@live.com
- revert "updated menu handler"
- fix webpack compatibility issue
### **0.4.7** 2020/10/27 mandic00@live.com
@ -1207,6 +891,7 @@
### **0.2.8** 2020/10/13 mandic00@live.com
- added example image
### **0.2.7** 2020/10/13 mandic00@live.com
@ -1222,6 +907,7 @@
### **0.2.4** 2020/10/12 mandic00@live.com
- removed extra files
### **0.2.3** 2020/10/12 mandic00@live.com
@ -1229,6 +915,9 @@
### **0.2.2** 2020/10/12 mandic00@live.com
### **release: 1.0** 2020/10/12 mandic00@live.com
### **0.2.1** 2020/10/12 mandic00@live.com
- added sample image

337
README.md
View File

@ -1,9 +1,9 @@
[![](https://img.shields.io/static/v1?label=Sponsor&message=%E2%9D%A4&logo=GitHub&color=%23fe8e86)](https://github.com/sponsors/vladmandic)
![Git Version](https://img.shields.io/github/package-json/v/vladmandic/human?style=flat-square&svg=true&label=git)
![NPM Version](https://img.shields.io/npm/v/@vladmandic/human.png?style=flat-square)
![Last Commit](https://img.shields.io/github/last-commit/vladmandic/human?style=flat-square&svg=true)
![License](https://img.shields.io/github/license/vladmandic/human?style=flat-square&svg=true)
![GitHub Status Checks](https://img.shields.io/github/checks-status/vladmandic/human/main?style=flat-square&svg=true)
![Vulnerabilities](https://img.shields.io/snyk/vulnerabilities/github/vladmandic/human?style=flat-square&svg=true)
# Human Library
@ -13,99 +13,42 @@
<br>
## Highlights
JavaScript module using TensorFlow/JS Machine Learning library
- Compatible with most server-side and client-side environments and frameworks
- Combines multiple machine learning models which can be switched on-demand depending on the use-case
- Related models are executed in an attention pipeline to provide details when needed
- Optimized input pre-processing that can enhance image quality of any type of inputs
- Detection of frame changes to trigger only required models for improved performance
- Intelligent temporal interpolation to provide smooth results regardless of processing performance
- Simple unified API
- Built-in Image, Video and WebCam handling
[*Jump to Quick Start*](#quick-start)
- **Browser**:
Compatible with both desktop and mobile platforms
Compatible with *CPU*, *WebGL*, *WASM* backends
Compatible with *WebWorker* execution
- **NodeJS**:
Compatible with both software *tfjs-node* and
GPU accelerated backends *tfjs-node-gpu* using CUDA libraries
<br>
## Compatibility
**Browser**:
- Compatible with both desktop and mobile platforms
- Compatible with *WebGPU*, *WebGL*, *WASM*, *CPU* backends
- Compatible with *WebWorker* execution
- Compatible with *WebView*
- Primary platform: *Chromium*-based browsers
- Secondary platform: *Firefox*, *Safari*
**NodeJS**:
- Compatibile with *WASM* backend for executions on architectures where *tensorflow* binaries are not available
- Compatible with *tfjs-node* using software execution via *tensorflow* shared libraries
- Compatible with *tfjs-node* using GPU-accelerated execution via *tensorflow* shared libraries and nVidia CUDA
- Supported versions are from **14.x** to **22.x**
- NodeJS version **23.x** is not supported due to breaking changes and issues with `@tensorflow/tfjs`
<br>
## Releases
- [Release Notes](https://github.com/vladmandic/human/releases)
- [NPM Link](https://www.npmjs.com/package/@vladmandic/human)
## Demos
*Check out [**Simple Live Demo**](https://vladmandic.github.io/human/demo/typescript/index.html) fully annotated app as a good start starting point ([html](https://github.com/vladmandic/human/blob/main/demo/typescript/index.html))([code](https://github.com/vladmandic/human/blob/main/demo/typescript/index.ts))*
*Check out [**Main Live Demo**](https://vladmandic.github.io/human/demo/index.html) app for advanced processing of of webcam, video stream or images static images with all possible tunable options*
Check out [**Live Demo**](https://vladmandic.github.io/human/demo/index.html) app for processing of live WebCam video or static images
- To start video detection, simply press *Play*
- To process images, simply drag & drop in your Browser window
- Note: For optimal performance, select only models you'd like to use
- Note: If you have modern GPU, *WebGL* (default) backend is preferred, otherwise select *WASM* backend
- Note: If you have modern GPU, WebGL (default) backend is preferred, otherwise select WASM backend
<br>
## Demos
- [**List of all Demo applications**](https://github.com/vladmandic/human/wiki/Demos)
- [**Live Examples galery**](https://vladmandic.github.io/human/samples/index.html)
### Browser Demos
*All browser demos are self-contained without any external dependencies*
- **Full** [[*Live*]](https://vladmandic.github.io/human/demo/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo): Main browser demo app that showcases all Human capabilities
- **Simple** [[*Live*]](https://vladmandic.github.io/human/demo/typescript/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/typescript): Simple demo in WebCam processing demo in TypeScript
- **Embedded** [[*Live*]](https://vladmandic.github.io/human/demo/video/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/video/index.html): Even simpler demo with tiny code embedded in HTML file
- **Face Detect** [[*Live*]](https://vladmandic.github.io/human/demo/facedetect/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facedetect): Extract faces from images and processes details
- **Face Match** [[*Live*]](https://vladmandic.github.io/human/demo/facematch/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facematch): Extract faces from images, calculates face descriptors and similarities and matches them to known database
- **Face ID** [[*Live*]](https://vladmandic.github.io/human/demo/faceid/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/faceid): Runs multiple checks to validate webcam input before performing face match to faces in IndexDB
- **Multi-thread** [[*Live*]](https://vladmandic.github.io/human/demo/multithread/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/multithread): Runs each Human module in a separate web worker for highest possible performance
- **NextJS** [[*Live*]](https://vladmandic.github.io/human-next/out/index.html) [[*Details*]](https://github.com/vladmandic/human-next): Use Human with TypeScript, NextJS and ReactJS
- **ElectronJS** [[*Details*]](https://github.com/vladmandic/human-electron): Use Human with TypeScript and ElectonJS to create standalone cross-platform apps
- **3D Analysis with BabylonJS** [[*Live*]](https://vladmandic.github.io/human-motion/src/index.html) [[*Details*]](https://github.com/vladmandic/human-motion): 3D tracking and visualization of heead, face, eye, body and hand
- **VRM Virtual Model Tracking with Three.JS** [[*Live*]](https://vladmandic.github.io/human-three-vrm/src/human-vrm.html) [[*Details*]](https://github.com/vladmandic/human-three-vrm): VR model with head, face, eye, body and hand tracking
- **VRM Virtual Model Tracking with BabylonJS** [[*Live*]](https://vladmandic.github.io/human-bjs-vrm/src/index.html) [[*Details*]](https://github.com/vladmandic/human-bjs-vrm): VR model with head, face, eye, body and hand tracking
### NodeJS Demos
*NodeJS demos may require extra dependencies which are used to decode inputs*
*See header of each demo to see its dependencies as they are not automatically installed with `Human`*
- **Main** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node.js): Process images from files, folders or URLs using native methods
- **Canvas** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-canvas.js): Process image from file or URL and draw results to a new image file using `node-canvas`
- **Video** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-video.js): Processing of video input using `ffmpeg`
- **WebCam** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-webcam.js): Processing of webcam screenshots using `fswebcam`
- **Events** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-event.js): Showcases usage of `Human` eventing to get notifications on processing
- **Similarity** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-similarity.js): Compares two input images for similarity of detected faces
- **Face Match** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facematch/node-match.js): Parallel processing of face **match** in multiple child worker threads
- **Multiple Workers** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/multithread/node-multiprocess.js): Runs multiple parallel `human` by dispaching them to pool of pre-created worker processes
- **Dynamic Load** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Loads Human dynamically with multiple different desired backends
- [*Live:* **Main Application**](https://vladmandic.github.io/human/demo/index.html)
- [*Live:* **Face Extraction, Description, Identification and Matching**](https://vladmandic.github.io/human/demo/facematch/index.html)
- [*Live:* **Face Extraction and 3D Rendering**](https://vladmandic.github.io/human/demo/face3d/index.html)
- [*Live:* **Multithreaded Detection Showcasing Maximum Performance**](https://vladmandic.github.io/human/demo/multithread/index.html)
- [*Live:* **VR Model with Head, Face, Eye, Body and Hand tracking**](https://vladmandic.github.io/human-vrm/src/human-vrm.html)
## Project pages
- [**Code Repository**](https://github.com/vladmandic/human)
- [**NPM Package**](https://www.npmjs.com/package/@vladmandic/human)
- [**Issues Tracker**](https://github.com/vladmandic/human/issues)
- [**TypeDoc API Specification - Main class**](https://vladmandic.github.io/human/typedoc/classes/Human.html)
- [**TypeDoc API Specification - Full**](https://vladmandic.github.io/human/typedoc/)
- [**TypeDoc API Specification**](https://vladmandic.github.io/human/typedoc/classes/Human.html)
- [**Change Log**](https://github.com/vladmandic/human/blob/main/CHANGELOG.md)
- [**Current To-do List**](https://github.com/vladmandic/human/blob/main/TODO.md)
@ -114,11 +57,8 @@
- [**Home**](https://github.com/vladmandic/human/wiki)
- [**Installation**](https://github.com/vladmandic/human/wiki/Install)
- [**Usage & Functions**](https://github.com/vladmandic/human/wiki/Usage)
- [**Configuration Details**](https://github.com/vladmandic/human/wiki/Config)
- [**Result Details**](https://github.com/vladmandic/human/wiki/Result)
- [**Customizing Draw Methods**](https://github.com/vladmandic/human/wiki/Draw)
- [**Caching & Smoothing**](https://github.com/vladmandic/human/wiki/Caching)
- [**Input Processing**](https://github.com/vladmandic/human/wiki/Image)
- [**Configuration Details**](https://github.com/vladmandic/human/wiki/Configuration)
- [**Output Details**](https://github.com/vladmandic/human/wiki/Outputs)
- [**Face Recognition & Face Description**](https://github.com/vladmandic/human/wiki/Embedding)
- [**Gesture Recognition**](https://github.com/vladmandic/human/wiki/Gesture)
- [**Common Issues**](https://github.com/vladmandic/human/wiki/Issues)
@ -134,9 +74,7 @@
- [**Performance Profiling**](https://github.com/vladmandic/human/wiki/Profiling)
- [**Platform Support**](https://github.com/vladmandic/human/wiki/Platforms)
- [**Diagnostic and Performance trace information**](https://github.com/vladmandic/human/wiki/Diag)
- [**Dockerize Human applications**](https://github.com/vladmandic/human/wiki/Docker)
- [**List of Models & Credits**](https://github.com/vladmandic/human/wiki/Models)
- [**Models Download Repository**](https://github.com/vladmandic/human-models)
- [**Security & Privacy Policy**](https://github.com/vladmandic/human/blob/main/SECURITY.md)
- [**License & Usage Restrictions**](https://github.com/vladmandic/human/blob/main/LICENSE)
@ -148,76 +86,81 @@
<hr><br>
## App Examples
Visit [Examples gallery](https://vladmandic.github.io/human/samples/index.html) for more examples
[<img src="assets/samples.jpg" width="640"/>](assets/samples.jpg)
<br>
## Options
All options as presented in the demo application...
[demo/index.html](demo/index.html)
[<img src="assets/screenshot-menu.png"/>](assets/screenshot-menu.png)
> [demo/index.html](demo/index.html)
![Options visible in demo](assets/screenshot-menu.png)
<br>
## Examples
<br>
**Face Close-up:**
![Face](assets/screenshot-face.jpg)
<br>
**Face under a high angle:**
![Angle](assets/screenshot-angle.jpg)
<br>
**Full Person Details:**
![Pose](assets/screenshot-person.jpg)
<br>
**Pose Detection:**
![Pose](assets/screenshot-pose.jpg)
<br>
**Body Segmentation and Background Replacement:**
![Pose](assets/screenshot-segmentation.jpg)
<br>
**Large Group:**
![Group](assets/screenshot-group.jpg)
<br>
**VR Model Tracking:**
![vrmodel](assets/screenshot-vrm.jpg)
<br>
**Results Browser:**
[ *Demo -> Display -> Show Results* ]<br>
[<img src="assets/screenshot-results.png"/>](assets/screenshot-results.png)
![Results](assets/screenshot-results.png)
<br>
## Advanced Examples
1. **Face Similarity Matching:**
**Face Similarity Matching:**
Extracts all faces from provided input images,
sorts them by similarity to selected face
and optionally matches detected face with database of known people to guess their names
> [demo/facematch](demo/facematch/index.html)
[<img src="assets/screenshot-facematch.jpg" width="640"/>](assets/screenshot-facematch.jpg)
2. **Face Detect:**
Extracts all detect faces from loaded images on-demand and highlights face details on a selected face
> [demo/facedetect](demo/facedetect/index.html)
[<img src="assets/screenshot-facedetect.jpg" width="640"/>](assets/screenshot-facedetect.jpg)
3. **Face ID:**
Performs validation check on a webcam input to detect a real face and matches it to known faces stored in database
> [demo/faceid](demo/faceid/index.html)
[<img src="assets/screenshot-faceid.jpg" width="640"/>](assets/screenshot-faceid.jpg)
![Face Matching](assets/screenshot-facematch.jpg)
<br>
4. **3D Rendering:**
> [human-motion](https://github.com/vladmandic/human-motion)
**Face3D OpenGL Rendering:**
> [demo/face3d](demo/face3d/index.html)
[<img src="https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-face.jpg" width="640"/>](https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-face.jpg)
[<img src="https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-body.jpg" width="640"/>](https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-body.jpg)
[<img src="https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-hand.jpg" width="640"/>](https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-hand.jpg)
<br>
5. **VR Model Tracking:**
> [human-three-vrm](https://github.com/vladmandic/human-three-vrm)
> [human-bjs-vrm](https://github.com/vladmandic/human-bjs-vrm)
[<img src="https://github.com/vladmandic/human-three-vrm/raw/main/assets/human-vrm-screenshot.jpg" width="640"/>](https://github.com/vladmandic/human-three-vrm/raw/main/assets/human-vrm-screenshot.jpg)
6. **Human as OS native application:**
> [human-electron](https://github.com/vladmandic/human-electron)
![Face Matching](assets/screenshot-face3d.jpg)
<br>
**468-Point Face Mesh Defails:**
(view in full resolution to see keypoints)
[<img src="assets/facemesh.png" width="400"/>](assets/facemesh.png)
![FaceMesh](assets/facemesh.png)
<br><hr><br>
@ -227,25 +170,44 @@ Simply load `Human` (*IIFE version*) directly from a cloud CDN in your HTML file
(pick one: `jsdelirv`, `unpkg` or `cdnjs`)
```html
<!DOCTYPE HTML>
<script src="https://cdn.jsdelivr.net/npm/@vladmandic/human/dist/human.js"></script>
<script src="https://unpkg.dev/@vladmandic/human/dist/human.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/human/3.0.0/human.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/human/2.1.5/human.js"></script>
```
For details, including how to use `Browser ESM` version or `NodeJS` version of `Human`, see [**Installation**](https://github.com/vladmandic/human/wiki/Install)
<br>
## Code Examples
## Inputs
Simple app that uses Human to process video input and
`Human` library can process all known input types:
- `Image`, `ImageData`, `ImageBitmap`, `Canvas`, `OffscreenCanvas`, `Tensor`,
- `HTMLImageElement`, `HTMLCanvasElement`, `HTMLVideoElement`, `HTMLMediaElement`
Additionally, `HTMLVideoElement`, `HTMLMediaElement` can be a standard `<video>` tag that links to:
- WebCam on user's system
- Any supported video type
For example: `.mp4`, `.avi`, etc.
- Additional video types supported via *HTML5 Media Source Extensions*
Live streaming examples:
- **HLS** (*HTTP Live Streaming*) using `hls.js`
- **DASH** (Dynamic Adaptive Streaming over HTTP) using `dash.js`
- **WebRTC** media track using built-in support
<br>
## Example
Example simple app that uses Human to process video input and
draw output on screen using internal draw helper functions
```js
// create instance of human with simple configuration using default values
const config = { backend: 'webgl' };
const human = new Human.Human(config);
const human = new Human(config);
// select input HTMLVideoElement and output HTMLCanvasElement from page
const inputVideo = document.getElementById('video-id');
const outputCanvas = document.getElementById('canvas-id');
@ -264,7 +226,6 @@ function detectVideo() {
human.draw.gesture(outputCanvas, result.gesture);
// and loop immediate to the next frame
requestAnimationFrame(detectVideo);
return result;
});
}
@ -325,7 +286,7 @@ async function detectVideo() {
async function drawVideo() {
if (result) { // check if result is available
const interpolated = human.next(result); // get smoothened result using last-known results
const interpolated = human.next(result); // calculate next interpolated frame
human.draw.all(outputCanvas, interpolated); // draw the frame
}
requestAnimationFrame(drawVideo); // run draw loop
@ -335,108 +296,27 @@ detectVideo(); // start detection loop
drawVideo(); // start draw loop
```
or same, but using built-in full video processing instead of running manual frame-by-frame loop:
```js
const human = new Human(); // create instance of Human
const inputVideo = document.getElementById('video-id');
const outputCanvas = document.getElementById('canvas-id');
async function drawResults() {
const interpolated = human.next(); // get smoothened result using last-known results
human.draw.all(outputCanvas, interpolated); // draw the frame
requestAnimationFrame(drawResults); // run draw loop
}
human.video(inputVideo); // start detection loop which continously updates results
drawResults(); // start draw loop
```
or using built-in webcam helper methods that take care of video handling completely:
```js
const human = new Human(); // create instance of Human
const outputCanvas = document.getElementById('canvas-id');
async function drawResults() {
const interpolated = human.next(); // get smoothened result using last-known results
human.draw.canvas(outputCanvas, human.webcam.element); // draw current webcam frame
human.draw.all(outputCanvas, interpolated); // draw the frame detectgion results
requestAnimationFrame(drawResults); // run draw loop
}
await human.webcam.start({ crop: true });
human.video(human.webcam.element); // start detection loop which continously updates results
drawResults(); // start draw loop
```
And for even better results, you can run detection in a separate web worker thread
<br><hr><br>
## Inputs
`Human` library can process all known input types:
- `Image`, `ImageData`, `ImageBitmap`, `Canvas`, `OffscreenCanvas`, `Tensor`,
- `HTMLImageElement`, `HTMLCanvasElement`, `HTMLVideoElement`, `HTMLMediaElement`
Additionally, `HTMLVideoElement`, `HTMLMediaElement` can be a standard `<video>` tag that links to:
- WebCam on user's system
- Any supported video type
e.g. `.mp4`, `.avi`, etc.
- Additional video types supported via *HTML5 Media Source Extensions*
e.g.: **HLS** (*HTTP Live Streaming*) using `hls.js` or **DASH** (*Dynamic Adaptive Streaming over HTTP*) using `dash.js`
- **WebRTC** media track using built-in support
<br><hr><br>
## Detailed Usage
- [**Wiki Home**](https://github.com/vladmandic/human/wiki)
- [**List of all available methods, properies and namespaces**](https://github.com/vladmandic/human/wiki/Usage)
- [**TypeDoc API Specification - Main class**](https://vladmandic.github.io/human/typedoc/classes/Human.html)
- [**TypeDoc API Specification - Full**](https://vladmandic.github.io/human/typedoc/)
![typedoc](assets/screenshot-typedoc.png)
<br><hr><br>
## TypeDefs
`Human` is written using TypeScript strong typing and ships with full **TypeDefs** for all classes defined by the library bundled in `types/human.d.ts` and enabled by default
*Note*: This does not include embedded `tfjs`
If you want to use embedded `tfjs` inside `Human` (`human.tf` namespace) and still full **typedefs**, add this code:
> import type * as tfjs from '@vladmandic/human/dist/tfjs.esm';
> const tf = human.tf as typeof tfjs;
This is not enabled by default as `Human` does not ship with full **TFJS TypeDefs** due to size considerations
Enabling `tfjs` TypeDefs as above creates additional project (dev-only as only types are required) dependencies as defined in `@vladmandic/human/dist/tfjs.esm.d.ts`:
> @tensorflow/tfjs-core, @tensorflow/tfjs-converter, @tensorflow/tfjs-backend-wasm, @tensorflow/tfjs-backend-webgl
<br><hr><br>
## Default models
Default models in Human library are:
- **Face Detection**: *MediaPipe BlazeFace Back variation*
- **Face Mesh**: *MediaPipe FaceMesh*
- **Face Iris Analysis**: *MediaPipe Iris*
- **Face Description**: *HSE FaceRes*
- **Emotion Detection**: *Oarriaga Emotion*
- **Body Analysis**: *MoveNet Lightning variation*
- **Hand Analysis**: *HandTrack & MediaPipe HandLandmarks*
- **Body Segmentation**: *Google Selfie*
- **Object Detection**: *CenterNet with MobileNet v3*
- **Face Detection**: MediaPipe BlazeFace - Back variation
- **Face Mesh**: MediaPipe FaceMesh
- **Face Iris Analysis**: MediaPipe Iris
- **Face Description**: HSE FaceRes
- **Emotion Detection**: Oarriaga Emotion
- **Body Analysis**: MoveNet - Lightning variation
- **Hand Analysis**: MediaPipe Hands
- **Body Segmentation**: Google Selfie
- **Object Detection**: MB3 CenterNet
- **Body Segmentation**: Google Selfie
Note that alternative models are provided and can be enabled via configuration
For example, body pose detection by default uses *MoveNet Lightning*, but can be switched to *MultiNet Thunder* for higher precision or *Multinet MultiPose* for multi-person detection or even *PoseNet*, *BlazePose* or *EfficientPose* depending on the use case
For example, `PoseNet` model can be switched for `BlazePose`, `EfficientPose` or `MoveNet` model depending on the use case
For more info, see [**Configuration Details**](https://github.com/vladmandic/human/wiki/Configuration) and [**List of Models**](https://github.com/vladmandic/human/wiki/Models)
@ -448,9 +328,9 @@ For more info, see [**Configuration Details**](https://github.com/vladmandic/hum
<br><hr><br>
`Human` library is written in [TypeScript](https://www.typescriptlang.org/docs/handbook/intro.html) **5.1** using [TensorFlow/JS](https://www.tensorflow.org/js/) **4.10** and conforming to latest `JavaScript` [ECMAScript version 2022](https://262.ecma-international.org/) standard
Build target for distributables is `JavaScript` [EMCAScript version 2018](https://262.ecma-international.org/9.0/)
`Human` library is written in `TypeScript` [4.4](https://www.typescriptlang.org/docs/handbook/intro.html)
Conforming to `JavaScript` [ECMAScript version 2020](https://www.ecma-international.org/ecma-262/11.0/index.html) standard
Build target is `JavaScript` [EMCAScript version 2018](https://262.ecma-international.org/9.0/)
<br>
@ -459,7 +339,6 @@ and [**API Specification**](https://vladmandic.github.io/human/typedoc/classes/H
<br>
[![](https://img.shields.io/static/v1?label=Sponsor&message=%E2%9D%A4&logo=GitHub&color=%23fe8e86)](https://github.com/sponsors/vladmandic)
![Stars](https://img.shields.io/github/stars/vladmandic/human?style=flat-square&svg=true)
![Forks](https://badgen.net/github/forks/vladmandic/human)
![Code Size](https://img.shields.io/github/languages/code-size/vladmandic/human?style=flat-square&svg=true)

83
TODO.md
View File

@ -1,38 +1,75 @@
# To-Do list for Human library
## Work-in-Progress
## Work in Progress
<hr><br>
WebGL shader optimizations for faster load and initial detection
## Known Issues & Limitations
- Fix shader packing: <https://github.com/tensorflow/tfjs/issues/5343>
- Add and benchmark WGSL for WebGPU
### Face with Attention
<br>
`FaceMesh-Attention` is not supported when using `WASM` backend due to missing kernel op in **TFJS**
No issues with default model `FaceMesh`
## Exploring
- Optical Flow: <https://docs.opencv.org/3.3.1/db/d7f/tutorial_js_lucas_kanade.html>
- TFLite Models: <https://js.tensorflow.org/api_tflite/0.0.1-alpha.4/>
<br>
## Known Issues
<br>
### Face Detection
Enhanced rotation correction for face detection is not working in NodeJS due to missing kernel op in TFJS
Feature is automatically disabled in NodeJS without user impact
- Backend NodeJS missing kernel op `RotateWithOffset`
<https://github.com/tensorflow/tfjs/issues/5473>
<br>
### Hand Detection
Enhanced rotation correction for hand detection is not working in NodeJS due to missing kernel op in TFJS
Feature is automatically disabled in NodeJS without user impact
- Backend NodeJS missing kernel op `RotateWithOffset`
<https://github.com/tensorflow/tfjs/issues/5473>
Hand detection using WASM backend has reduced precision due to math rounding errors in backend
<br>
### Body Detection
MoveNet MultiPose model does not work with WASM backend due to missing F32 implementation
- Backend WASM missing F32 implementation
<https://github.com/tensorflow/tfjs/issues/5516>
### Object Detection
`NanoDet` model is not supported when using `WASM` backend due to missing kernel op in **TFJS**
No issues with default model `MB3-CenterNet`
Object detection using CenterNet or NanoDet models is not working when using WASM backend due to missing kernel ops in TFJS
## Body Detection using MoveNet-MultiPose
- Backend WASM missing kernel op `Mod`
<https://github.com/tensorflow/tfjs/issues/5110>
- Backend WASM missing kernel op `SparseToDense`
<https://github.com/tensorflow/tfjs/issues/4824>
Model does not return valid detection scores (all other functionality is not impacted)
### WebGPU Backend
### Firefox
Implementation of WebGPU backend
Experimental support only
Running in **web workers** requires `OffscreenCanvas` which is still disabled by default in **Firefox**
Enable via `about:config` -> `gfx.offscreencanvas.enabled`
[Details](https://developer.mozilla.org/en-US/docs/Web/API/OffscreenCanvas#browser_compatibility)
*Target: `Human` v2.3 with `Chrome` v94 and `TFJS` v4.0*
### Safari
<br>
No support for running in **web workers** as Safari still does not support `OffscreenCanvas`
[Details](https://developer.mozilla.org/en-US/docs/Web/API/OffscreenCanvas#browser_compatibility)
## React-Native
`Human` support for **React-Native** is best-effort, but not part of the main development focus
<hr><br>
- Backend WebGPU missing kernel ops
<https://github.com/tensorflow/tfjs/issues/5496>
- Backend WebGPU incompatible with web workers
<https://github.com/tensorflow/tfjs/issues/5467>
- Backend WebGPU incompatible with sync read calls
<https://github.com/tensorflow/tfjs/issues/5468>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 595 KiB

After

Width:  |  Height:  |  Size: 1.1 MiB

File diff suppressed because one or more lines are too long

117
assets/sample-result.json Normal file

File diff suppressed because one or more lines are too long

Binary file not shown.

Before

Width:  |  Height:  |  Size: 261 KiB

BIN
assets/screenshot-angle.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 59 KiB

BIN
assets/screenshot-face.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 47 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 321 KiB

After

Width:  |  Height:  |  Size: 366 KiB

BIN
assets/screenshot-group.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 369 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 170 KiB

BIN
assets/screenshot-pose.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 74 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 14 KiB

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

After

Width:  |  Height:  |  Size: 47 KiB

153
build.js
View File

@ -1,153 +0,0 @@
const fs = require('fs');
const path = require('path');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
const Build = require('@vladmandic/build').Build; // eslint-disable-line node/no-unpublished-require
const APIExtractor = require('@microsoft/api-extractor'); // eslint-disable-line node/no-unpublished-require
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
const packageJSON = require('./package.json');
const logFile = 'test/build.log';
const modelsOut = 'models/models.json';
const modelsFolders = [
'./models',
'../human-models/models',
'../blazepose/model/',
'../anti-spoofing/model',
'../efficientpose/models',
'../insightface/models',
'../movenet/models',
'../nanodet/models',
];
const apiExtractorIgnoreList = [ // eslint-disable-line no-unused-vars
'ae-missing-release-tag',
'tsdoc-param-tag-missing-hyphen',
'tsdoc-escape-right-brace',
'tsdoc-undefined-tag',
'tsdoc-escape-greater-than',
'ae-unresolved-link',
'ae-forgotten-export',
'tsdoc-malformed-inline-tag',
'tsdoc-unnecessary-backslash',
];
const regEx = [
{ search: 'types="@webgpu/types/dist"', replace: 'path="../src/types/webgpu.d.ts"' },
{ search: 'types="offscreencanvas"', replace: 'path="../src/types/offscreencanvas.d.ts"' },
];
function copyFile(src, dst) {
if (!fs.existsSync(src)) {
log.warn('Copy:', { input: src, output: dst });
return;
}
log.state('Copy:', { input: src, output: dst });
const buffer = fs.readFileSync(src);
fs.writeFileSync(dst, buffer);
}
function writeFile(str, dst) {
log.state('Write:', { output: dst });
fs.writeFileSync(dst, str);
}
function regExFile(src, entries) {
if (!fs.existsSync(src)) {
log.warn('Filter:', { src });
return;
}
log.state('Filter:', { input: src });
for (const entry of entries) {
const buffer = fs.readFileSync(src, 'UTF-8');
const lines = buffer.split(/\r?\n/);
const out = [];
for (const line of lines) {
if (line.includes(entry.search)) out.push(line.replace(entry.search, entry.replace));
else out.push(line);
}
fs.writeFileSync(src, out.join('\n'));
}
}
async function analyzeModels() {
log.info('Analyze models:', { folders: modelsFolders.length, result: modelsOut });
let totalSize = 0;
const models = {};
const allModels = [];
for (const folder of modelsFolders) {
try {
if (!fs.existsSync(folder)) continue;
const stat = fs.statSync(folder);
if (!stat.isDirectory) continue;
const dir = fs.readdirSync(folder);
const found = dir.map((f) => `file://${folder}/${f}`).filter((f) => f.endsWith('json'));
log.state('Models', { folder, models: found.length });
allModels.push(...found);
} catch {
// log.warn('Cannot enumerate:', modelFolder);
}
}
for (const url of allModels) {
// if (!f.endsWith('.json')) continue;
// const url = `file://${modelsDir}/${f}`;
const model = new tf.GraphModel(url); // create model prototype and decide if load from cache or from original modelurl
model.findIOHandler();
const artifacts = await model.handler.load();
const size = artifacts?.weightData?.byteLength || 0;
totalSize += size;
const name = path.basename(url).replace('.json', '');
if (!models[name]) models[name] = size;
}
const json = JSON.stringify(models, null, 2);
fs.writeFileSync(modelsOut, json);
log.state('Models:', { count: Object.keys(models).length, totalSize });
}
async function main() {
log.logFile(logFile);
log.data('Build', { name: packageJSON.name, version: packageJSON.version });
// run production build
const build = new Build();
await build.run('production');
// patch tfjs typedefs
copyFile('node_modules/@vladmandic/tfjs/types/tfjs-core.d.ts', 'types/tfjs-core.d.ts');
copyFile('node_modules/@vladmandic/tfjs/types/tfjs.d.ts', 'types/tfjs.esm.d.ts');
copyFile('src/types/tsconfig.json', 'types/tsconfig.json');
copyFile('src/types/eslint.json', 'types/.eslintrc.json');
copyFile('src/types/tfjs.esm.d.ts', 'dist/tfjs.esm.d.ts');
regExFile('types/tfjs-core.d.ts', regEx);
// run api-extractor to create typedef rollup
const extractorConfig = APIExtractor.ExtractorConfig.loadFileAndPrepare('.api-extractor.json');
try {
const extractorResult = APIExtractor.Extractor.invoke(extractorConfig, {
localBuild: true,
showVerboseMessages: false,
messageCallback: (msg) => {
msg.handled = true;
if (msg.logLevel === 'none' || msg.logLevel === 'verbose' || msg.logLevel === 'info') return;
if (msg.sourceFilePath?.includes('/node_modules/')) return;
// if (apiExtractorIgnoreList.reduce((prev, curr) => prev || msg.messageId.includes(curr), false)) return; // those are external issues outside of human control
log.data('API', { level: msg.logLevel, category: msg.category, id: msg.messageId, file: msg.sourceFilePath, line: msg.sourceFileLine, text: msg.text });
},
});
log.state('API-Extractor:', { succeeeded: extractorResult.succeeded, errors: extractorResult.errorCount, warnings: extractorResult.warningCount });
} catch (err) {
log.error('API-Extractor:', err);
}
regExFile('types/human.d.ts', regEx);
writeFile('export * from \'../types/human\';', 'dist/human.esm-nobundle.d.ts');
writeFile('export * from \'../types/human\';', 'dist/human.esm.d.ts');
writeFile('export * from \'../types/human\';', 'dist/human.d.ts');
writeFile('export * from \'../types/human\';', 'dist/human.node-gpu.d.ts');
writeFile('export * from \'../types/human\';', 'dist/human.node.d.ts');
writeFile('export * from \'../types/human\';', 'dist/human.node-wasm.d.ts');
// generate model signature
await analyzeModels();
log.info('Human Build complete...', { logFile });
}
main();

View File

@ -7,15 +7,13 @@
},
"profiles": {
"production": ["clean", "compile", "typings", "typedoc", "lint", "changelog"],
"development": ["serve", "watch", "compile"],
"serve": ["serve"],
"clean": ["clean"]
"development": ["serve", "watch", "compile"]
},
"clean": {
"locations": ["dist/*", "types/*", "typedoc/*"]
},
"lint": {
"locations": [ "**/*.json", "src/**/*.ts", "test/**/*.js", "demo/**/*.js", "**/*.md" ],
"locations": [ "*.json", "src/**/*.ts", "test/**/*.js", "demo/**/*.js" ],
"rules": { }
},
"changelog": {
@ -24,8 +22,8 @@
"serve": {
"sslKey": "node_modules/@vladmandic/build/cert/https.key",
"sslCrt": "node_modules/@vladmandic/build/cert/https.crt",
"httpPort": 8000,
"httpsPort": 8001,
"httpPort": 10030,
"httpsPort": 10031,
"documentRoot": ".",
"defaultFolder": "demo",
"defaultFile": "index.html"
@ -34,18 +32,9 @@
"global": {
"target": "es2018",
"sourcemap": false,
"treeShaking": true,
"ignoreAnnotations": true,
"banner": { "js": "/*\n Human\n homepage: <https://github.com/vladmandic/human>\n author: <https://github.com/vladmandic>'\n*/\n" }
},
"targets": [
{
"name": "tfjs/browser/version",
"platform": "browser",
"format": "esm",
"input": "tfjs/tf-version.ts",
"output": "dist/tfjs.version.js"
},
{
"name": "tfjs/nodejs/cpu",
"platform": "node",
@ -84,7 +73,6 @@
"format": "cjs",
"input": "tfjs/tf-node-wasm.ts",
"output": "dist/tfjs.esm.js",
"minify": false,
"external": ["@tensorflow"]
},
{
@ -95,13 +83,21 @@
"output": "dist/human.node-wasm.js",
"external": ["@tensorflow"]
},
{
"name": "tfjs/browser/version",
"platform": "browser",
"format": "esm",
"input": "tfjs/tf-version.ts",
"output": "dist/tfjs.version.js",
"external": ["fs", "os", "buffer", "util"]
},
{
"name": "tfjs/browser/esm/nobundle",
"platform": "browser",
"format": "esm",
"input": "tfjs/tf-browser.ts",
"output": "dist/tfjs.esm.js",
"external": ["@tensorflow"]
"external": ["@tensorflow", "fs", "os", "buffer", "util"]
},
{
"name": "human/browser/esm/nobundle",
@ -109,8 +105,8 @@
"format": "esm",
"input": "src/human.ts",
"output": "dist/human.esm-nobundle.js",
"sourcemap": false,
"external": ["@tensorflow"]
"sourcemap": true,
"external": ["@tensorflow", "fs", "os", "buffer", "util"]
},
{
"name": "tfjs/browser/esm/bundle",
@ -118,8 +114,8 @@
"format": "esm",
"input": "tfjs/tf-browser.ts",
"output": "dist/tfjs.esm.js",
"sourcemap": false,
"minify": true
"sourcemap": true,
"external": ["fs", "os", "buffer", "util"]
},
{
"name": "human/browser/iife/bundle",
@ -129,7 +125,7 @@
"output": "dist/human.js",
"minify": true,
"globalName": "Human",
"external": ["@tensorflow"]
"external": ["fs", "os", "buffer", "util"]
},
{
"name": "human/browser/esm/bundle",
@ -138,42 +134,14 @@
"input": "src/human.ts",
"output": "dist/human.esm.js",
"sourcemap": true,
"minify": false,
"external": ["@tensorflow"],
"typings": "types/lib",
"external": ["fs", "os", "buffer", "util"],
"typings": "types",
"typedoc": "typedoc"
},
{
"name": "demo/typescript",
"platform": "browser",
"format": "esm",
"input": "demo/typescript/index.ts",
"output": "demo/typescript/index.js",
"sourcemap": true,
"external": ["*/human.esm.js"]
},
{
"name": "demo/faceid",
"platform": "browser",
"format": "esm",
"input": "demo/faceid/index.ts",
"output": "demo/faceid/index.js",
"sourcemap": true,
"external": ["*/human.esm.js"]
},
{
"name": "demo/tracker",
"platform": "browser",
"format": "esm",
"input": "demo/tracker/index.ts",
"output": "demo/tracker/index.js",
"sourcemap": true,
"external": ["*/human.esm.js"]
}
]
},
"watch": {
"locations": [ "src/**/*", "tfjs/**/*", "demo/**/*.ts" ]
"locations": [ "src/**/*", "tfjs/**/*" ]
},
"typescript": {
"allowJs": false

View File

@ -1,67 +1,5 @@
# Human Library: Demos
For details on other demos see Wiki: [**Demos**](https://github.com/vladmandic/human/wiki/Demos)
For details see Wiki:
## Main Demo
`index.html`: Full demo using `Human` ESM module running in Browsers,
Includes:
- Selectable inputs:
- Sample images
- Image via drag & drop
- Image via URL param
- WebCam input
- Video stream
- WebRTC stream
- Selectable active `Human` modules
- With interactive module params
- Interactive `Human` image filters
- Selectable interactive `results` browser
- Selectable `backend`
- Multiple execution methods:
- Sync vs Async
- in main thread or web worker
- live on git pages, on user-hosted web server or via included [**micro http2 server**](https://github.com/vladmandic/human/wiki/Development-Server)
### Demo Options
- General `Human` library options
in `index.js:userConfig`
- General `Human` `draw` options
in `index.js:drawOptions`
- Demo PWA options
in `index.js:pwa`
- Demo specific options
in `index.js:ui`
```js
const ui = {
console: true, // log messages to browser console
useWorker: true, // use web workers for processing
buffered: true, // should output be buffered between frames
interpolated: true, // should output be interpolated for smoothness between frames
results: false, // show results tree
useWebRTC: false, // use webrtc as camera source instead of local webcam
};
```
Demo implements several ways to use `Human` library,
### URL Params
Demo app can use URL parameters to override configuration values
For example:
- Force using `WASM` as backend: <https://vladmandic.github.io/human/demo/index.html?backend=wasm>
- Enable `WebWorkers`: <https://vladmandic.github.io/human/demo/index.html?worker=true>
- Skip pre-loading and warming up: <https://vladmandic.github.io/human/demo/index.html?preload=false&warmup=false>
### WebRTC
Note that WebRTC connection requires a WebRTC server that provides a compatible media track such as H.264 video track
For such a WebRTC server implementation see <https://github.com/vladmandic/stream-rtsp> project
that implements a connection to IP Security camera using RTSP protocol and transcodes it to WebRTC
ready to be consumed by a client such as `Human`
- [**Demos**](https://github.com/vladmandic/human/wiki/Demos)

View File

@ -0,0 +1,30 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Human</title>
<meta name="viewport" content="width=device-width" id="viewport">
<meta name="keywords" content="Human">
<meta name="application-name" content="Human">
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="theme-color" content="#000000">
<link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../../assets/icon.png">
<script type="module" src="browser.js"></script>
<style>
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
body { margin: 0; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
body::-webkit-scrollbar { display: none; }
.status { position: absolute; width: 100vw; bottom: 10%; text-align: center; font-size: 3rem; font-weight: 100; text-shadow: 2px 2px #303030; }
.log { position: absolute; bottom: 0; margin: 0.4rem 0.4rem 0 0.4rem; font-size: 0.9rem; }
</style>
</head>
<body>
<div id="status" class="status"></div>
<img id="image" src="../../samples/groups/group1.jpg" style="display: none"></img>
<div id="log" class="log"></div>
</body>
</html>

51
demo/benchmark/browser.js Normal file
View File

@ -0,0 +1,51 @@
// import * as tf from '../../assets/tf.es2017.js';
// import '../../assets/tf-backend-webgpu.es2017.js';
import Human from '../../dist/human.esm.js';
const loop = 20;
// eslint-disable-next-line no-console
const log = (...msg) => console.log(...msg);
const myConfig = {
backend: 'humangl',
modelBasePath: 'https://vladmandic.github.io/human/models',
wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.9.0/dist/',
debug: true,
async: true,
cacheSensitivity: 0,
filter: { enabled: false },
face: {
enabled: true,
detector: { enabled: true, rotation: false },
mesh: { enabled: true },
iris: { enabled: true },
description: { enabled: true },
emotion: { enabled: false },
},
hand: { enabled: true, rotation: false },
body: { enabled: true },
object: { enabled: false },
};
async function main() {
const human = new Human(myConfig);
await human.tf.ready();
log('Human:', human.version);
await human.load();
const loaded = Object.keys(human.models).filter((a) => human.models[a]);
log('Loaded:', loaded);
log('Memory state:', human.tf.engine().memory());
const element = document.getElementById('image');
const processed = await human.image(element);
const t0 = performance.now();
await human.detect(processed.tensor, myConfig);
const t1 = performance.now();
log('Backend:', human.tf.getBackend());
log('Warmup:', Math.round(t1 - t0));
for (let i = 0; i < loop; i++) await human.detect(processed.tensor, myConfig);
const t2 = performance.now();
log('Average:', Math.round((t2 - t1) / loop));
}
main();

71
demo/benchmark/node.js Normal file
View File

@ -0,0 +1,71 @@
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
const tf = require('@tensorflow/tfjs-node-gpu');
const log = require('@vladmandic/pilogger');
const canvasJS = require('canvas');
const Human = require('../../dist/human.node-gpu.js').default;
const input = 'samples/groups/group1.jpg';
const loop = 20;
const myConfig = {
backend: 'tensorflow',
modelBasePath: 'https://vladmandic.github.io/human/models',
wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.9.0/dist/',
debug: true,
async: true,
cacheSensitivity: 0,
filter: { enabled: false },
face: {
enabled: true,
detector: { enabled: true, rotation: false },
mesh: { enabled: true },
iris: { enabled: true },
description: { enabled: true },
emotion: { enabled: true },
},
hand: {
enabled: true,
},
body: { enabled: true },
object: { enabled: false },
};
async function getImage(human) {
const img = await canvasJS.loadImage(input);
const canvas = canvasJS.createCanvas(img.width, img.height);
const ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0, img.width, img.height);
const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
const res = human.tf.tidy(() => {
const tensor = human.tf.tensor(Array.from(imageData.data), [canvas.height, canvas.width, 4], 'int32'); // create rgba image tensor from flat array
const channels = human.tf.split(tensor, 4, 2); // split rgba to channels
const rgb = human.tf.stack([channels[0], channels[1], channels[2]], 2); // stack channels back to rgb
const reshape = human.tf.reshape(rgb, [1, canvas.height, canvas.width, 3]); // move extra dim from the end of tensor and use it as batch number instead
return reshape;
});
log.info('Image:', input, res.shape);
return res;
}
async function main() {
log.header();
const human = new Human(myConfig);
await human.tf.ready();
log.info('Human:', human.version);
await human.load();
const loaded = Object.keys(human.models).filter((a) => human.models[a]);
log.info('Loaded:', loaded);
log.info('Memory state:', human.tf.engine().memory());
const tensor = await getImage(human);
log.state('Processing:', tensor['shape']);
const t0 = performance.now();
await human.detect(tensor, myConfig);
const t1 = performance.now();
log.state('Backend:', human.tf.getBackend());
log.data('Warmup:', Math.round(t1 - t0));
for (let i = 0; i < loop; i++) await human.detect(tensor, myConfig);
const t2 = performance.now();
log.data('Average:', Math.round((t2 - t1) / loop));
}
main();

189
demo/face3d/face3d.js Normal file
View File

@ -0,0 +1,189 @@
// @ts-nocheck // typescript checks disabled as this is pure javascript
/**
* Human demo for browsers
*
* Demo for face mesh detection and projection as 3D object using Three.js
*/
import { DoubleSide, Mesh, MeshBasicMaterial, OrthographicCamera, Scene, sRGBEncoding, VideoTexture, WebGLRenderer, BufferGeometry, BufferAttribute } from '../helpers/three.js';
import { OrbitControls } from '../helpers/three-orbitControls.js';
import Human from '../../dist/human.esm.js'; // equivalent of @vladmandic/human
const userConfig = {
backend: 'wasm',
async: false,
profile: false,
warmup: 'full',
modelBasePath: '../../models/',
// wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.9.0/dist/',
filter: { enabled: false },
face: { enabled: true,
detector: { rotation: false, maxDetected: 1 },
mesh: { enabled: true },
iris: { enabled: true },
description: { enabled: false },
emotion: { enabled: false },
},
hand: { enabled: false },
gesture: { enabled: false },
body: { enabled: false },
object: { enabled: false },
};
const human = new Human(userConfig);
const wireframe = true; // enable wireframe overlay
const canvas = document.getElementById('canvas');
let width = 0;
let height = 0;
const renderer = new WebGLRenderer({ antialias: true, alpha: true, canvas });
renderer.setClearColor(0x000000);
renderer.outputEncoding = sRGBEncoding;
const camera = new OrthographicCamera();
const controls = new OrbitControls(camera, renderer.domElement); // pan&zoom controls
controls.enabled = true;
const materialWireFrame = new MeshBasicMaterial({ // create wireframe material
color: 0xffaaaa,
wireframe: true,
});
const materialFace = new MeshBasicMaterial({ // create material for mask
color: 0xffffff,
map: null, // will be created when the video is ready.
side: DoubleSide,
});
class FaceGeometry extends BufferGeometry {
constructor(triangulation) {
super();
this.positions = new Float32Array(478 * 3);
this.uvs = new Float32Array(478 * 2);
this.setAttribute('position', new BufferAttribute(this.positions, 3));
this.setAttribute('uv', new BufferAttribute(this.uvs, 2));
this.setIndex(triangulation);
}
update(face) {
let ptr = 0;
for (const p of face.mesh) {
this.positions[ptr + 0] = -p[0] + width / 2;
this.positions[ptr + 1] = height - p[1] - height / 2;
this.positions[ptr + 2] = -p[2];
ptr += 3;
}
ptr = 0;
for (const p of face.meshRaw) {
this.uvs[ptr + 0] = 0 + p[0];
this.uvs[ptr + 1] = 1 - p[1];
ptr += 2;
}
materialFace.map.update(); // update textures from video
this.attributes.position.needsUpdate = true; // vertices
this.attributes.uv.needsUpdate = true; // textures
this.computeVertexNormals();
}
}
const scene = new Scene();
const faceGeometry = new FaceGeometry(human.faceTriangulation); // create a new geometry helper
const mesh = new Mesh(faceGeometry, materialFace); // create mask mesh
scene.add(mesh);
function resize(input) {
width = input.videoWidth;
height = input.videoHeight;
camera.left = -width / 2;
camera.right = width / 2;
camera.top = height / 2;
camera.bottom = -height / 2;
camera.near = -100;
camera.far = 100;
camera.zoom = 2;
camera.updateProjectionMatrix();
renderer.setSize(width, height);
}
const isLive = (input) => input.srcObject && (input.srcObject.getVideoTracks()[0].readyState === 'live') && (input.readyState > 2) && (!input.paused);
async function render(input) {
if (isLive(input)) {
if (width !== input.videoWidth || height !== input.videoHeight) resize(input); // resize orthographic camera to video dimensions if necessary
const res = await human.detect(input);
if (res?.face?.length > 0) {
faceGeometry.update(res.face[0]);
// render the mask
mesh.material = materialFace;
renderer.autoClear = true;
renderer.render(scene, camera);
if (wireframe) { // overlay wireframe
mesh.material = materialWireFrame;
renderer.autoClear = false;
renderer.render(scene, camera);
}
}
}
requestAnimationFrame(() => render(input));
}
// setup webcam
async function setupCamera() {
if (!navigator.mediaDevices) return null;
const video = document.getElementById('video');
canvas.addEventListener('click', () => {
if (isLive(video)) video.pause();
else video.play();
});
const constraints = {
audio: false,
video: { facingMode: 'user', resizeMode: 'crop-and-scale' },
};
if (window.innerWidth > window.innerHeight) constraints.video.width = { ideal: window.innerWidth };
else constraints.video.height = { ideal: window.innerHeight };
const stream = await navigator.mediaDevices.getUserMedia(constraints);
if (stream) video.srcObject = stream;
else return null;
// get information data
const track = stream.getVideoTracks()[0];
const settings = track.getSettings();
// log('camera constraints:', constraints, 'window:', { width: window.innerWidth, height: window.innerHeight }, 'settings:', settings, 'track:', track);
const engineData = human.tf.engine();
const gpuData = (engineData.backendInstance && engineData.backendInstance.numBytesInGPU > 0) ? `gpu: ${(engineData.backendInstance.numBytesInGPU ? engineData.backendInstance.numBytesInGPU : 0).toLocaleString()} bytes` : '';
const cameraData = { name: track.label?.toLowerCase(), width: settings.width, height: settings.height, facing: settings.facingMode === 'user' ? 'front' : 'back' };
const memoryData = `system: ${engineData.state.numBytes.toLocaleString()} bytes ${gpuData} | tensors: ${engineData.state.numTensors.toLocaleString()}`;
document.getElementById('log').innerHTML = `
video: ${cameraData.name} | facing: ${cameraData.facing} | screen: ${window.innerWidth} x ${window.innerHeight} camera: ${cameraData.width} x ${cameraData.height}<br>
backend: ${human.tf.getBackend()} | ${memoryData}<br>
`;
// return when camera is ready
return new Promise((resolve) => {
video.onloadeddata = async () => {
video.width = video.videoWidth;
video.height = video.videoHeight;
canvas.width = video.width;
canvas.height = video.height;
video.play();
resolve(video);
};
});
}
async function main() {
window.addEventListener('unhandledrejection', (evt) => {
// eslint-disable-next-line no-console
console.error(evt.reason || evt);
document.getElementById('log').innerHTML = evt?.reason?.message || evt?.reason || evt;
evt.preventDefault();
});
await human.load();
const video = await setupCamera();
if (video) {
const videoTexture = new VideoTexture(video); // now load textures from video
videoTexture.encoding = sRGBEncoding;
materialFace.map = videoTexture;
render(video);
}
}
window.onload = main;

View File

@ -1,30 +1,32 @@
<!DOCTYPE html>
<html lang="en">
<head>
<title>Human Browser Tests</title>
<title>Human</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="viewport" content="width=device-width, shrink-to-fit=yes">
<meta name="keywords" content="Human">
<meta name="application-name" content="Human">
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="theme-color" content="#000000">
<link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../../assets/icon.png">
<script src="./face3d.js" type="module"></script>
<style>
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
html { font-family: 'Lato', 'Segoe UI'; font-size: 14px; font-variant: small-caps; }
body { margin: 0; background: black; color: white; width: 100vw; }
.canvas { position: fixed; bottom: 10px; right: 10px; width: 256px; height: 256px; z-index: 99; }
.events { position: fixed; top: 10px; right: 10px; width: 12rem; height: 1.25rem; background-color: grey; padding: 8px; z-index: 99; }
.state { position: fixed; top: 60px; right: 10px; width: 12rem; height: 1.25rem; background-color: grey; padding: 8px; z-index: 99; }
.pre { line-height: 150%; }
html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
body { margin: 0; background: black; color: white; overflow-x: hidden; }
body::-webkit-scrollbar { display: none; }
.video { display: none; }
.canvas { margin: 0 auto; }
</style>
</head>
<body>
<pre id="log" class="pre"></pre>
<div id="events" class="events"></div>
<div id="state" class="state"></div>
<canvas id="canvas" class="canvas" width="256" height="256"></canvas>
<script type="module" src="./test-browser-esm.js"></script>
<div id="media">
<canvas id="canvas" class="canvas"></canvas>
<video id="video" playsinline class="video"></video>
<div id="log"></div>
</div>
</body>
</html>

View File

@ -1,160 +0,0 @@
/**
* Human demo for browsers
*
* Demo for face detection
*/
/** @type {Human} */
import { Human } from '../../dist/human.esm.js';
let loader;
const humanConfig = { // user configuration for human, used to fine-tune behavior
cacheSensitivity: 0,
debug: true,
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
filter: { enabled: true, equalization: false, flip: false },
face: {
enabled: true,
detector: { rotation: false, maxDetected: 100, minConfidence: 0.2, return: true, square: false },
iris: { enabled: true },
description: { enabled: true },
emotion: { enabled: true },
antispoof: { enabled: true },
liveness: { enabled: true },
},
body: { enabled: false },
hand: { enabled: false },
object: { enabled: false },
gesture: { enabled: false },
segmentation: { enabled: false },
};
const human = new Human(humanConfig); // new instance of human
export const showLoader = (msg) => { loader.setAttribute('msg', msg); loader.style.display = 'block'; };
export const hideLoader = () => loader.style.display = 'none';
class ComponentLoader extends HTMLElement { // watch for attributes
message = document.createElement('div');
static get observedAttributes() { return ['msg']; }
attributeChangedCallback(_name, _prevVal, currVal) {
this.message.innerHTML = currVal;
}
connectedCallback() { // triggered on insert
this.attachShadow({ mode: 'open' });
const css = document.createElement('style');
css.innerHTML = `
.loader-container { top: 450px; justify-content: center; position: fixed; width: 100%; }
.loader-message { font-size: 1.5rem; padding: 1rem; }
.loader { width: 300px; height: 300px; border: 3px solid transparent; border-radius: 50%; border-top: 4px solid #f15e41; animation: spin 4s linear infinite; position: relative; }
.loader::before, .loader::after { content: ""; position: absolute; top: 6px; bottom: 6px; left: 6px; right: 6px; border-radius: 50%; border: 4px solid transparent; }
.loader::before { border-top-color: #bad375; animation: 3s spin linear infinite; }
.loader::after { border-top-color: #26a9e0; animation: spin 1.5s linear infinite; }
@keyframes spin { from { transform: rotate(0deg); } to { transform: rotate(360deg); } }
`;
const container = document.createElement('div');
container.id = 'loader-container';
container.className = 'loader-container';
loader = document.createElement('div');
loader.id = 'loader';
loader.className = 'loader';
this.message.id = 'loader-message';
this.message.className = 'loader-message';
this.message.innerHTML = '';
container.appendChild(this.message);
container.appendChild(loader);
this.shadowRoot?.append(css, container);
loader = this; // eslint-disable-line @typescript-eslint/no-this-alias
}
}
customElements.define('component-loader', ComponentLoader);
function addFace(face, source) {
const deg = (rad) => Math.round((rad || 0) * 180 / Math.PI);
const canvas = document.createElement('canvas');
const emotion = face.emotion?.map((e) => `${Math.round(100 * e.score)}% ${e.emotion}`) || [];
const rotation = `pitch ${deg(face.rotation?.angle.pitch)}° | roll ${deg(face.rotation?.angle.roll)}° | yaw ${deg(face.rotation?.angle.yaw)}°`;
const gaze = `direction ${deg(face.rotation?.gaze.bearing)}° strength ${Math.round(100 * (face.rotation.gaze.strength || 0))}%`;
canvas.title = `
source: ${source}
score: ${Math.round(100 * face.boxScore)}% detection ${Math.round(100 * face.faceScore)}% analysis
age: ${face.age} years | gender: ${face.gender} score ${Math.round(100 * face.genderScore)}%
emotion: ${emotion.join(' | ')}
head rotation: ${rotation}
eyes gaze: ${gaze}
camera distance: ${face.distance}m | ${Math.round(100 * face.distance / 2.54)}in
check: ${Math.round(100 * face.real)}% real ${Math.round(100 * face.live)}% live
`.replace(/ /g, ' ');
canvas.onclick = (e) => {
e.preventDefault();
document.getElementById('description').innerHTML = canvas.title;
};
human.draw.tensor(face.tensor, canvas);
human.tf.dispose(face.tensor);
return canvas;
}
async function addFaces(imgEl) {
showLoader('human: busy');
const faceEl = document.getElementById('faces');
faceEl.innerHTML = '';
const res = await human.detect(imgEl);
console.log(res); // eslint-disable-line no-console
document.getElementById('description').innerHTML = `detected ${res.face.length} faces`;
for (const face of res.face) {
const canvas = addFace(face, imgEl.src.substring(0, 64));
faceEl.appendChild(canvas);
}
hideLoader();
}
function addImage(imageUri) {
const imgEl = new Image(256, 256);
imgEl.onload = () => {
const images = document.getElementById('images');
images.appendChild(imgEl); // add image if loaded ok
images.scroll(images?.offsetWidth, 0);
};
imgEl.onerror = () => console.error('addImage', { imageUri }); // eslint-disable-line no-console
imgEl.onclick = () => addFaces(imgEl);
imgEl.title = imageUri.substring(0, 64);
imgEl.src = encodeURI(imageUri);
}
async function initDragAndDrop() {
const reader = new FileReader();
reader.onload = async (e) => {
if (e.target.result.startsWith('data:image')) await addImage(e.target.result);
};
document.body.addEventListener('dragenter', (evt) => evt.preventDefault());
document.body.addEventListener('dragleave', (evt) => evt.preventDefault());
document.body.addEventListener('dragover', (evt) => evt.preventDefault());
document.body.addEventListener('drop', async (evt) => {
evt.preventDefault();
evt.dataTransfer.dropEffect = 'copy';
for (const f of evt.dataTransfer.files) reader.readAsDataURL(f);
});
document.body.onclick = (e) => {
if (e.target.localName !== 'canvas') document.getElementById('description').innerHTML = '';
};
}
async function main() {
showLoader('loading models');
await human.load();
showLoader('compiling models');
await human.warmup();
showLoader('loading images');
const images = ['group-1.jpg', 'group-2.jpg', 'group-3.jpg', 'group-4.jpg', 'group-5.jpg', 'group-6.jpg', 'group-7.jpg', 'solvay1927.jpg', 'stock-group-1.jpg', 'stock-group-2.jpg', 'stock-models-6.jpg', 'stock-models-7.jpg'];
const imageUris = images.map((a) => `../../samples/in/${a}`);
for (let i = 0; i < imageUris.length; i++) addImage(imageUris[i]);
initDragAndDrop();
hideLoader();
}
window.onload = main;

View File

@ -1,43 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Human</title>
<!-- <meta http-equiv="content-type" content="text/html; charset=utf-8"> -->
<meta name="viewport" content="width=device-width, shrink-to-fit=yes">
<meta name="keywords" content="Human">
<meta name="application-name" content="Human">
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="theme-color" content="#000000">
<link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../../assets/icon.png">
<script src="./facedetect.js" type="module"></script>
<style>
img { object-fit: contain; }
img:hover { filter: grayscale(1); transform: scale(1.08); transition : all 0.3s ease; }
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
html { font-family: 'Lato', 'Segoe UI'; font-size: 24px; font-variant: small-caps; }
body { margin: 24px; background: black; color: white; overflow: hidden; text-align: -webkit-center; width: 100vw; height: 100vh; }
::-webkit-scrollbar { height: 8px; border: 0; border-radius: 0; }
::-webkit-scrollbar-thumb { background: grey }
::-webkit-scrollbar-track { margin: 3px; }
canvas { width: 192px; height: 192px; margin: 2px; padding: 2px; cursor: grab; transform: scale(1.00); transition : all 0.3s ease; }
canvas:hover { filter: grayscale(1); transform: scale(1.08); transition : all 0.3s ease; }
</style>
</head>
<body>
<component-loader></component-loader>
<div style="display: flex">
<div>
<div style="margin: 24px">select image to show detected faces<br>drag & drop to add your images</div>
<div id="images" style="display: flex; width: 98vw; overflow-x: auto; overflow-y: hidden; scroll-behavior: smooth"></div>
</div>
</div>
<div id="list" style="height: 10px"></div>
<div style="margin: 24px">hover or click on face to show details</div>
<div id="faces" style="overflow-y: auto"></div>
<div id="description" style="white-space: pre;"></div>
</body>
</html>

View File

@ -1,42 +0,0 @@
# Human Face Recognition: FaceID
`faceid` runs multiple checks to validate webcam input before performing face match
Detected face image and descriptor are stored in client-side IndexDB
## Workflow
- Starts webcam
- Waits until input video contains validated face or timeout is reached
- Number of people
- Face size
- Face and gaze direction
- Detection scores
- Blink detection (including temporal check for blink speed) to verify live input
- Runs `antispoofing` optional module
- Runs `liveness` optional module
- Runs match against database of registered faces and presents best match with scores
## Notes
Both `antispoof` and `liveness` models are tiny and
designed to serve as a quick check when used together with other indicators:
- size below 1MB
- very quick inference times as they are very simple (11 ops for antispoof and 23 ops for liveness)
- trained on low-resolution inputs
### Anti-spoofing Module
- Checks if input is realistic (e.g. computer generated faces)
- Configuration: `human.config.face.antispoof`.enabled
- Result: `human.result.face[0].real` as score
### Liveness Module
- Checks if input has obvious artifacts due to recording (e.g. playing back phone recording of a face)
- Configuration: `human.config.face.liveness`.enabled
- Result: `human.result.face[0].live` as score
### Models
**FaceID** is compatible with
- `faceres.json` (default) perfoms combined age/gender/descriptor analysis
- `faceres-deep.json` higher resolution variation of `faceres`
- `insightface` alternative model for face descriptor analysis
- `mobilefacenet` alternative model for face descriptor analysis

View File

@ -1,49 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Human: Face Recognition</title>
<meta name="viewport" content="width=device-width" id="viewport">
<meta name="keywords" content="Human">
<meta name="application-name" content="Human">
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="theme-color" content="#000000">
<link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../../assets/icon.png">
<script src="./index.js" type="module"></script>
<style>
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
body { margin: 0; padding: 16px; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
body::-webkit-scrollbar { display: none; }
.button { padding: 2px; cursor: pointer; box-shadow: 2px 2px black; width: 64px; text-align: center; place-content: center; margin-left: 16px; height: 16px; display: none }
.ok { position: absolute; top: 64px; right: 20px; width: 150px; background-color: grey; padding: 4px; color: black; font-size: 14px }
</style>
</head>
<body>
<div style="padding: 8px">
<h1 style="margin: 0">faceid demo using human library</h1>
look directly at camera and make sure that detection passes all of the required tests noted on the right hand side of the screen<br>
if input does not satisfies tests within specific timeout, no image will be selected<br>
once face image is approved, it will be compared with existing face database<br>
you can also store face descriptor with label in a browser's indexdb for future usage<br>
<br>
<i>note: this is not equivalent to full faceid methods as used by modern mobile phones or windows hello<br>
as they rely on additional infrared sensors and depth-sensing and not just camera image for additional levels of security</i>
</div>
<canvas id="canvas" style="padding: 8px"></canvas>
<canvas id="source" style="padding: 8px"></canvas>
<video id="video" playsinline style="display: none"></video>
<pre id="log" style="padding: 8px"></pre>
<div id="match" style="display: none; padding: 8px">
<label for="name">name:</label>
<input id="name" type="text" value="" style="height: 16px; border: none; padding: 2px; margin-left: 8px">
<span id="save" class="button" style="background-color: royalblue">save</span>
<span id="delete" class="button" style="background-color: lightcoral">delete</span>
</div>
<div id="retry" class="button" style="background-color: darkslategray; width: 93%; margin-top: 32px; padding: 12px">retry</div>
<div id="ok"></div>
</body>
</html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -1,318 +0,0 @@
/**
* Human demo for browsers
* @default Human Library
* @summary <https://github.com/vladmandic/human>
* @author <https://github.com/vladmandic>
* @copyright <https://github.com/vladmandic>
* @license MIT
*/
import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
import * as indexDb from './indexdb'; // methods to deal with indexdb
const humanConfig = { // user configuration for human, used to fine-tune behavior
cacheSensitivity: 0.01,
modelBasePath: '../../models',
filter: { enabled: true, equalization: true }, // lets run with histogram equilizer
debug: true,
face: {
enabled: true,
detector: { rotation: true, return: true, mask: false }, // return tensor is used to get detected face image
description: { enabled: true }, // default model for face descriptor extraction is faceres
// mobilefacenet: { enabled: true, modelPath: 'https://vladmandic.github.io/human-models/models/mobilefacenet.json' }, // alternative model
// insightface: { enabled: true, modelPath: 'https://vladmandic.github.io/insightface/models/insightface-mobilenet-swish.json' }, // alternative model
iris: { enabled: true }, // needed to determine gaze direction
emotion: { enabled: false }, // not needed
antispoof: { enabled: true }, // enable optional antispoof module
liveness: { enabled: true }, // enable optional liveness module
},
body: { enabled: false },
hand: { enabled: false },
object: { enabled: false },
gesture: { enabled: true }, // parses face and iris gestures
};
// const matchOptions = { order: 2, multiplier: 1000, min: 0.0, max: 1.0 }; // for embedding model
const matchOptions = { order: 2, multiplier: 25, min: 0.2, max: 0.8 }; // for faceres model
const options = {
minConfidence: 0.6, // overal face confidence for box, face, gender, real, live
minSize: 224, // min input to face descriptor model before degradation
maxTime: 30000, // max time before giving up
blinkMin: 10, // minimum duration of a valid blink
blinkMax: 800, // maximum duration of a valid blink
threshold: 0.5, // minimum similarity
distanceMin: 0.4, // closest that face is allowed to be to the cammera in cm
distanceMax: 1.0, // farthest that face is allowed to be to the cammera in cm
mask: humanConfig.face.detector.mask,
rotation: humanConfig.face.detector.rotation,
...matchOptions,
};
const ok: Record<string, { status: boolean | undefined, val: number }> = { // must meet all rules
faceCount: { status: false, val: 0 },
faceConfidence: { status: false, val: 0 },
facingCenter: { status: false, val: 0 },
lookingCenter: { status: false, val: 0 },
blinkDetected: { status: false, val: 0 },
faceSize: { status: false, val: 0 },
antispoofCheck: { status: false, val: 0 },
livenessCheck: { status: false, val: 0 },
distance: { status: false, val: 0 },
age: { status: false, val: 0 },
gender: { status: false, val: 0 },
timeout: { status: true, val: 0 },
descriptor: { status: false, val: 0 },
elapsedMs: { status: undefined, val: 0 }, // total time while waiting for valid face
detectFPS: { status: undefined, val: 0 }, // mark detection fps performance
drawFPS: { status: undefined, val: 0 }, // mark redraw fps performance
};
const allOk = () => ok.faceCount.status
&& ok.faceSize.status
&& ok.blinkDetected.status
&& ok.facingCenter.status
&& ok.lookingCenter.status
&& ok.faceConfidence.status
&& ok.antispoofCheck.status
&& ok.livenessCheck.status
&& ok.distance.status
&& ok.descriptor.status
&& ok.age.status
&& ok.gender.status;
const current: { face: H.FaceResult | null, record: indexDb.FaceRecord | null } = { face: null, record: null }; // current face record and matched database record
const blink = { // internal timers for blink start/end/duration
start: 0,
end: 0,
time: 0,
};
// let db: Array<{ name: string, source: string, embedding: number[] }> = []; // holds loaded face descriptor database
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
human.env.perfadd = false; // is performance data showing instant or total values
human.draw.options.font = 'small-caps 18px "Lato"'; // set font used to draw labels when using draw methods
human.draw.options.lineHeight = 20;
const dom = { // grab instances of dom objects so we dont have to look them up later
video: document.getElementById('video') as HTMLVideoElement,
canvas: document.getElementById('canvas') as HTMLCanvasElement,
log: document.getElementById('log') as HTMLPreElement,
fps: document.getElementById('fps') as HTMLPreElement,
match: document.getElementById('match') as HTMLDivElement,
name: document.getElementById('name') as HTMLInputElement,
save: document.getElementById('save') as HTMLSpanElement,
delete: document.getElementById('delete') as HTMLSpanElement,
retry: document.getElementById('retry') as HTMLDivElement,
source: document.getElementById('source') as HTMLCanvasElement,
ok: document.getElementById('ok') as HTMLDivElement,
};
const timestamp = { detect: 0, draw: 0 }; // holds information used to calculate performance and possible memory leaks
let startTime = 0;
const log = (...msg) => { // helper method to output messages
dom.log.innerText += msg.join(' ') + '\n';
console.log(...msg); // eslint-disable-line no-console
};
async function webCam() { // initialize webcam
// @ts-ignore resizeMode is not yet defined in tslib
const cameraOptions: MediaStreamConstraints = { audio: false, video: { facingMode: 'user', resizeMode: 'none', width: { ideal: document.body.clientWidth } } };
const stream: MediaStream = await navigator.mediaDevices.getUserMedia(cameraOptions);
const ready = new Promise((resolve) => { dom.video.onloadeddata = () => resolve(true); });
dom.video.srcObject = stream;
void dom.video.play();
await ready;
dom.canvas.width = dom.video.videoWidth;
dom.canvas.height = dom.video.videoHeight;
dom.canvas.style.width = '50%';
dom.canvas.style.height = '50%';
if (human.env.initial) log('video:', dom.video.videoWidth, dom.video.videoHeight, '|', stream.getVideoTracks()[0].label);
dom.canvas.onclick = () => { // pause when clicked on screen and resume on next click
if (dom.video.paused) void dom.video.play();
else dom.video.pause();
};
}
async function detectionLoop() { // main detection loop
if (!dom.video.paused) {
if (current.face?.tensor) human.tf.dispose(current.face.tensor); // dispose previous tensor
await human.detect(dom.video); // actual detection; were not capturing output in a local variable as it can also be reached via human.result
const now = human.now();
ok.detectFPS.val = Math.round(10000 / (now - timestamp.detect)) / 10;
timestamp.detect = now;
requestAnimationFrame(detectionLoop); // start new frame immediately
}
}
function drawValidationTests() {
let y = 32;
for (const [key, val] of Object.entries(ok)) {
let el = document.getElementById(`ok-${key}`);
if (!el) {
el = document.createElement('div');
el.id = `ok-${key}`;
el.innerText = key;
el.className = 'ok';
el.style.top = `${y}px`;
dom.ok.appendChild(el);
}
if (typeof val.status === 'boolean') el.style.backgroundColor = val.status ? 'lightgreen' : 'lightcoral';
const status = val.status ? 'ok' : 'fail';
el.innerText = `${key}: ${val.val === 0 ? status : val.val}`;
y += 28;
}
}
async function validationLoop(): Promise<H.FaceResult> { // main screen refresh loop
const interpolated = human.next(human.result); // smoothen result using last-known results
human.draw.canvas(dom.video, dom.canvas); // draw canvas to screen
await human.draw.all(dom.canvas, interpolated); // draw labels, boxes, lines, etc.
const now = human.now();
ok.drawFPS.val = Math.round(10000 / (now - timestamp.draw)) / 10;
timestamp.draw = now;
ok.faceCount.val = human.result.face.length;
ok.faceCount.status = ok.faceCount.val === 1; // must be exactly detected face
if (ok.faceCount.status) { // skip the rest if no face
const gestures: string[] = Object.values(human.result.gesture).map((gesture: H.GestureResult) => gesture.gesture); // flatten all gestures
if (gestures.includes('blink left eye') || gestures.includes('blink right eye')) blink.start = human.now(); // blink starts when eyes get closed
if (blink.start > 0 && !gestures.includes('blink left eye') && !gestures.includes('blink right eye')) blink.end = human.now(); // if blink started how long until eyes are back open
ok.blinkDetected.status = ok.blinkDetected.status || (Math.abs(blink.end - blink.start) > options.blinkMin && Math.abs(blink.end - blink.start) < options.blinkMax);
if (ok.blinkDetected.status && blink.time === 0) blink.time = Math.trunc(blink.end - blink.start);
ok.facingCenter.status = gestures.includes('facing center');
ok.lookingCenter.status = gestures.includes('looking center'); // must face camera and look at camera
ok.faceConfidence.val = human.result.face[0].faceScore || human.result.face[0].boxScore || 0;
ok.faceConfidence.status = ok.faceConfidence.val >= options.minConfidence;
ok.antispoofCheck.val = human.result.face[0].real || 0;
ok.antispoofCheck.status = ok.antispoofCheck.val >= options.minConfidence;
ok.livenessCheck.val = human.result.face[0].live || 0;
ok.livenessCheck.status = ok.livenessCheck.val >= options.minConfidence;
ok.faceSize.val = Math.min(human.result.face[0].box[2], human.result.face[0].box[3]);
ok.faceSize.status = ok.faceSize.val >= options.minSize;
ok.distance.val = human.result.face[0].distance || 0;
ok.distance.status = (ok.distance.val >= options.distanceMin) && (ok.distance.val <= options.distanceMax);
ok.descriptor.val = human.result.face[0].embedding?.length || 0;
ok.descriptor.status = ok.descriptor.val > 0;
ok.age.val = human.result.face[0].age || 0;
ok.age.status = ok.age.val > 0;
ok.gender.val = human.result.face[0].genderScore || 0;
ok.gender.status = ok.gender.val >= options.minConfidence;
}
// run again
ok.timeout.status = ok.elapsedMs.val <= options.maxTime;
drawValidationTests();
if (allOk() || !ok.timeout.status) { // all criteria met
dom.video.pause();
return human.result.face[0];
}
ok.elapsedMs.val = Math.trunc(human.now() - startTime);
return new Promise((resolve) => {
setTimeout(async () => {
await validationLoop(); // run validation loop until conditions are met
resolve(human.result.face[0]); // recursive promise resolve
}, 30); // use to slow down refresh from max refresh rate to target of 30 fps
});
}
async function saveRecords() {
if (dom.name.value.length > 0) {
const image = dom.canvas.getContext('2d')?.getImageData(0, 0, dom.canvas.width, dom.canvas.height) as ImageData;
const rec = { id: 0, name: dom.name.value, descriptor: current.face?.embedding as number[], image };
await indexDb.save(rec);
log('saved face record:', rec.name, 'descriptor length:', current.face?.embedding?.length);
log('known face records:', await indexDb.count());
} else {
log('invalid name');
}
}
async function deleteRecord() {
if (current.record && current.record.id > 0) {
await indexDb.remove(current.record);
}
}
async function detectFace() {
dom.canvas.style.height = '';
dom.canvas.getContext('2d')?.clearRect(0, 0, options.minSize, options.minSize);
if (!current?.face?.tensor || !current?.face?.embedding) return false;
console.log('face record:', current.face); // eslint-disable-line no-console
log(`detected face: ${current.face.gender} ${current.face.age || 0}y distance ${100 * (current.face.distance || 0)}cm/${Math.round(100 * (current.face.distance || 0) / 2.54)}in`);
await human.draw.tensor(current.face.tensor, dom.canvas);
if (await indexDb.count() === 0) {
log('face database is empty: nothing to compare face with');
document.body.style.background = 'black';
dom.delete.style.display = 'none';
return false;
}
const db = await indexDb.load();
const descriptors = db.map((rec) => rec.descriptor).filter((desc) => desc.length > 0);
const res = human.match.find(current.face.embedding, descriptors, matchOptions);
current.record = db[res.index] || null;
if (current.record) {
log(`best match: ${current.record.name} | id: ${current.record.id} | similarity: ${Math.round(1000 * res.similarity) / 10}%`);
dom.name.value = current.record.name;
dom.source.style.display = '';
dom.source.getContext('2d')?.putImageData(current.record.image, 0, 0);
}
document.body.style.background = res.similarity > options.threshold ? 'darkgreen' : 'maroon';
return res.similarity > options.threshold;
}
async function main() { // main entry point
ok.faceCount.status = false;
ok.faceConfidence.status = false;
ok.facingCenter.status = false;
ok.blinkDetected.status = false;
ok.faceSize.status = false;
ok.antispoofCheck.status = false;
ok.livenessCheck.status = false;
ok.age.status = false;
ok.gender.status = false;
ok.elapsedMs.val = 0;
dom.match.style.display = 'none';
dom.retry.style.display = 'none';
dom.source.style.display = 'none';
dom.canvas.style.height = '50%';
document.body.style.background = 'black';
await webCam();
await detectionLoop(); // start detection loop
startTime = human.now();
current.face = await validationLoop(); // start validation loop
dom.canvas.width = current.face?.tensor?.shape[1] || options.minSize;
dom.canvas.height = current.face?.tensor?.shape[0] || options.minSize;
dom.source.width = dom.canvas.width;
dom.source.height = dom.canvas.height;
dom.canvas.style.width = '';
dom.match.style.display = 'flex';
dom.save.style.display = 'flex';
dom.delete.style.display = 'flex';
dom.retry.style.display = 'block';
if (!allOk()) { // is all criteria met?
log('did not find valid face');
return false;
}
return detectFace();
}
async function init() {
log('human version:', human.version, '| tfjs version:', human.tf.version['tfjs-core']);
log('options:', JSON.stringify(options).replace(/{|}|"|\[|\]/g, '').replace(/,/g, ' '));
log('initializing webcam...');
await webCam(); // start webcam
log('loading human models...');
await human.load(); // preload all models
log('initializing human...');
log('face embedding model:', humanConfig.face.description.enabled ? 'faceres' : '', humanConfig.face['mobilefacenet']?.enabled ? 'mobilefacenet' : '', humanConfig.face['insightface']?.enabled ? 'insightface' : '');
log('loading face database...');
log('known face records:', await indexDb.count());
dom.retry.addEventListener('click', main);
dom.save.addEventListener('click', saveRecords);
dom.delete.addEventListener('click', deleteRecord);
await human.warmup(); // warmup function to initialize backend for future faster detection
await main();
}
window.onload = init;

View File

@ -1,65 +0,0 @@
let db: IDBDatabase; // instance of indexdb
const database = 'human';
const table = 'person';
export interface FaceRecord { id: number, name: string, descriptor: number[], image: ImageData }
const log = (...msg) => console.log('indexdb', ...msg); // eslint-disable-line no-console
export async function open() {
if (db) return true;
return new Promise((resolve) => {
const request: IDBOpenDBRequest = indexedDB.open(database, 1);
request.onerror = (evt) => log('error:', evt);
request.onupgradeneeded = (evt: IDBVersionChangeEvent) => { // create if doesnt exist
log('create:', evt.target);
db = (evt.target as IDBOpenDBRequest).result;
db.createObjectStore(table, { keyPath: 'id', autoIncrement: true });
};
request.onsuccess = (evt) => { // open
db = (evt.target as IDBOpenDBRequest).result;
log('open:', db);
resolve(true);
};
});
}
export async function load(): Promise<FaceRecord[]> {
const faceDB: FaceRecord[] = [];
if (!db) await open(); // open or create if not already done
return new Promise((resolve) => {
const cursor: IDBRequest = db.transaction([table], 'readwrite').objectStore(table).openCursor(null, 'next');
cursor.onerror = (evt) => log('load error:', evt);
cursor.onsuccess = (evt) => {
if ((evt.target as IDBRequest).result) {
faceDB.push((evt.target as IDBRequest).result.value);
(evt.target as IDBRequest).result.continue();
} else {
resolve(faceDB);
}
};
});
}
export async function count(): Promise<number> {
if (!db) await open(); // open or create if not already done
return new Promise((resolve) => {
const store: IDBRequest = db.transaction([table], 'readwrite').objectStore(table).count();
store.onerror = (evt) => log('count error:', evt);
store.onsuccess = () => resolve(store.result);
});
}
export async function save(faceRecord: FaceRecord) {
if (!db) await open(); // open or create if not already done
const newRecord = { name: faceRecord.name, descriptor: faceRecord.descriptor, image: faceRecord.image }; // omit id as its autoincrement
db.transaction([table], 'readwrite').objectStore(table).put(newRecord);
log('save:', newRecord);
}
export async function remove(faceRecord: FaceRecord) {
if (!db) await open(); // open or create if not already done
db.transaction([table], 'readwrite').objectStore(table).delete(faceRecord.id); // delete based on id
log('delete:', faceRecord);
}

View File

@ -1,84 +0,0 @@
# Human Face Recognition & Matching
- **Browser** demo: `index.html` & `facematch.js`:
Loads sample images, extracts faces and runs match and similarity analysis
- **NodeJS** demo `node-match.js` and `node-match-worker.js`
Advanced multithreading demo that runs number of worker threads to process high number of matches
- Sample face database: `faces.json`
<br>
## Browser Face Recognition Demo
- `demo/facematch`: Demo for Browsers that uses all face description and embedding features to
detect, extract and identify all faces plus calculate similarity between them
It highlights functionality such as:
- Loading images
- Extracting faces from images
- Calculating face embedding descriptors
- Finding face similarity and sorting them by similarity
- Finding best face match based on a known list of faces and printing matches
<br>
## NodeJS Multi-Threading Match Solution
### Methods and Properties in `node-match`
- `createBuffer`: create shared buffer array
single copy of data regardless of number of workers
fixed size based on `options.dbMax`
- `appendRecord`: add additional batch of descriptors to buffer
can append batch of records to buffer at anytime
workers are informed of the new content after append has been completed
- `workersStart`: start or expand pool of `threadPoolSize` workers
each worker runs `node-match-worker` and listens for messages from main thread
can shutdown workers or create additional worker threads on-the-fly
safe against workers that exit
- `workersClose`: close workers in a pool
first request workers to exit then terminate after timeout
- `match`: dispach a match job to a worker
returns first match that satisfies `minThreshold`
assigment to workers using round-robin
since timing for each job is near-fixed and predictable
- `getDescriptor`: get descriptor array for a given id from a buffer
- `fuzDescriptor`: small randomize descriptor content for harder match
- `getLabel`: fetch label for resolved descriptor index
- `loadDB`: load face database from a JSON file `dbFile`
extracts descriptors and adds them to buffer
extracts labels and maintains them in main thread
for test purposes loads same database `dbFact` times to create a very large database
`node-match` runs in a listens for messages from workers until `maxJobs` have been reached
### Performance
Linear performance decrease that depends on number of records in database
Non-linear performance that increases with number of worker threads due to communication overhead
- Face dataase with 10k records:
> threadPoolSize: 1 => ~60 ms / match job
> threadPoolSize: 6 => ~25 ms / match job
- Face database with 50k records:
> threadPoolSize: 1 => ~300 ms / match job
> threadPoolSize: 6 => ~100 ms / match job
- Face database with 100k records:
> threadPoolSize: 1 => ~600 ms / match job
> threadPoolSize: 6 => ~200 ms / match job
### Example
> node node-match
<!-- eslint-skip -->
```js
INFO: options: { dbFile: './faces.json', dbMax: 10000, threadPoolSize: 6, workerSrc: './node-match-worker.js', debug: false, minThreshold: 0.9, descLength: 1024 }
DATA: created shared buffer: { maxDescriptors: 10000, totalBytes: 40960000, totalElements: 10240000 }
DATA: db loaded: { existingRecords: 0, newRecords: 5700 }
INFO: starting worker thread pool: { totalWorkers: 6, alreadyActive: 0 }
STATE: submitted: { matchJobs: 100, poolSize: 6, activeWorkers: 6 }
STATE: { matchJobsFinished: 100, totalTimeMs: 1769, averageTimeMs: 17.69 }
INFO: closing workers: { poolSize: 6, activeWorkers: 6 }
```

View File

@ -1,29 +1,25 @@
// @ts-nocheck // typescript checks disabled as this is pure javascript
/**
* Human demo for browsers
*
* Demo for face descriptor analysis and face similarity analysis
* Demo for face descriptor analysis and face simmilarity analysis
*/
/** @type {Human} */
import { Human } from '../../dist/human.esm.js';
import Human from '../../dist/human.esm.js';
const userConfig = {
backend: 'humangl',
async: true,
backend: 'wasm',
async: false,
warmup: 'none',
cacheSensitivity: 0.01,
debug: true,
modelBasePath: '../../models/',
deallocate: true,
filter: {
enabled: true,
equalization: true,
width: 0,
},
// wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.9.0/dist/',
face: {
enabled: true,
detector: { return: true, rotation: true, maxDetected: 50, iouThreshold: 0.01, minConfidence: 0.2 },
detector: { rotation: true, return: true },
mesh: { enabled: true },
embedding: { enabled: false },
iris: { enabled: false },
emotion: { enabled: true },
description: { enabled: true },
@ -31,6 +27,7 @@ const userConfig = {
hand: { enabled: false },
gesture: { enabled: false },
body: { enabled: false },
filter: { enabled: true },
segmentation: { enabled: false },
};
@ -39,65 +36,63 @@ const human = new Human(userConfig); // new instance of human
const all = []; // array that will hold all detected faces
let db = []; // array that holds all known faces
const minScore = 0.4;
const minScore = 0.6;
const minConfidence = 0.8;
function log(...msg) {
const dt = new Date();
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
console.log(ts, ...msg); // eslint-disable-line no-console
// eslint-disable-next-line no-console
console.log(ts, ...msg);
}
function title(msg) {
document.getElementById('title').innerHTML = msg;
}
async function loadFaceMatchDB() {
async function getFaceDB() {
// download db with known faces
try {
let res = await fetch('/demo/facematch/faces.json');
if (!res || !res.ok) res = await fetch('/human/demo/facematch/faces.json');
db = (res && res.ok) ? await res.json() : [];
log('Loaded Faces DB:', db);
for (const rec of db) {
rec.embedding = rec.embedding.map((a) => parseFloat(a.toFixed(4)));
}
} catch (err) {
log('Could not load faces database', err);
}
}
async function selectFaceCanvas(face) {
async function analyze(face) {
// refresh faces database
await getFaceDB();
// if we have face image tensor, enhance it and display it
let embedding;
document.getElementById('orig').style.filter = 'blur(16px)';
if (face.tensor) {
title('Sorting Faces by Similarity');
const c = document.getElementById('orig');
await human.draw.tensor(face.tensor, c);
const arr = db.map((rec) => rec.embedding);
const res = await human.match.find(face.embedding, arr);
log('Match:', db[res.index].name);
const emotion = face.emotion[0] ? `${Math.round(100 * face.emotion[0].score)}% ${face.emotion[0].emotion}` : 'N/A';
document.getElementById('desc').innerHTML = `
source: ${face.fileName}<br>
match: ${Math.round(1000 * res.similarity) / 10}% ${db[res.index].name}<br>
score: ${Math.round(100 * face.boxScore)}% detection ${Math.round(100 * face.faceScore)}% analysis<br>
age: ${face.age} years<br>
gender: ${Math.round(100 * face.genderScore)}% ${face.gender}<br>
emotion: ${emotion}<br>
`;
embedding = face.embedding.map((a) => parseFloat(a.toFixed(4)));
const enhanced = human.enhance(face);
const desc = document.getElementById('desc');
desc.innerText = `{"name":"unknown", "source":"${face.fileName}", "embedding":[${face.embedding}]},`;
const embedding = face.embedding.map((a) => parseFloat(a.toFixed(4)));
navigator.clipboard.writeText(`{"name":"unknown", "source":"${face.fileName}", "embedding":[${embedding}]},`);
if (enhanced) {
const c = document.getElementById('orig');
const squeeze = human.tf.div(human.tf.squeeze(enhanced), 255);
await human.tf.browser.toPixels(squeeze, c);
human.tf.dispose(enhanced);
human.tf.dispose(squeeze);
const ctx = c.getContext('2d');
ctx.font = 'small-caps 0.4rem "Lato"';
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
}
}
// loop through all canvases that contain faces
const canvases = document.getElementsByClassName('face');
let time = 0;
for (const canvas of canvases) {
// calculate similarity from selected face to current one in the loop
const current = all[canvas.tag.sample][canvas.tag.face];
const similarity = human.match.similarity(face.embedding, current.embedding);
canvas.tag.similarity = similarity;
const similarity = human.similarity(face.embedding, current.embedding, 3);
// get best match
// draw the canvas
await human.draw.tensor(current.tensor, canvas);
canvas.title = similarity;
await human.tf.browser.toPixels(current.tensor, canvas);
const ctx = canvas.getContext('2d');
ctx.font = 'small-caps 1rem "Lato"';
ctx.fillStyle = 'rgba(0, 0, 0, 1)';
@ -108,73 +103,56 @@ async function selectFaceCanvas(face) {
ctx.fillText(`${current.age}y ${(100 * (current.genderScore || 0)).toFixed(1)}% ${current.gender}`, 4, canvas.height - 6);
// identify person
ctx.font = 'small-caps 1rem "Lato"';
const start = human.now();
const arr = db.map((rec) => rec.embedding);
const res = await human.match.find(current.embedding, arr);
time += (human.now() - start);
if (res.similarity > minScore) ctx.fillText(`DB: ${(100 * res.similarity).toFixed(1)}% ${db[res.index].name}`, 4, canvas.height - 30);
const person = await human.match(current.embedding, db);
if (person.similarity && person.similarity > minScore && current.confidence > minConfidence) ctx.fillText(`${(100 * person.similarity).toFixed(1)}% ${person.name}`, 4, canvas.height - 30);
}
log('Analyzed:', 'Face:', canvases.length, 'DB:', db.length, 'Time:', time);
// sort all faces by similarity
const sorted = document.getElementById('faces');
[...sorted.children]
.sort((a, b) => parseFloat(b.tag.similarity) - parseFloat(a.tag.similarity))
.sort((a, b) => parseFloat(b.title) - parseFloat(a.title))
.forEach((canvas) => sorted.appendChild(canvas));
document.getElementById('orig').style.filter = 'blur(0)';
title('Selected Face');
}
async function addFaceCanvas(index, res, fileName) {
async function faces(index, res, fileName) {
all[index] = res.face;
for (const i in res.face) {
if (!res.face[i].tensor) continue; // did not get valid results
if ((res.face[i].faceScore || 0) < human.config.face.detector.minConfidence) continue; // face analysis score too low
all[index][i].fileName = fileName;
const canvas = document.createElement('canvas');
canvas.tag = { sample: index, face: i, source: fileName };
canvas.tag = { sample: index, face: i };
canvas.width = 200;
canvas.height = 200;
canvas.className = 'face';
const emotion = res.face[i].emotion[0] ? `${Math.round(100 * res.face[i].emotion[0].score)}% ${res.face[i].emotion[0].emotion}` : 'N/A';
canvas.title = `
source: ${res.face[i].fileName}
score: ${Math.round(100 * res.face[i].boxScore)}% detection ${Math.round(100 * res.face[i].faceScore)}% analysis
age: ${res.face[i].age} years
gender: ${Math.round(100 * res.face[i].genderScore)}% ${res.face[i].gender}
emotion: ${emotion}
`.replace(/ /g, ' ');
await human.draw.tensor(res.face[i].tensor, canvas);
// mouse click on any face canvas triggers analysis
canvas.addEventListener('click', (evt) => {
log('Select:', 'Image:', evt.target.tag.sample, 'Face:', evt.target.tag.face, all[evt.target.tag.sample][evt.target.tag.face]);
analyze(all[evt.target.tag.sample][evt.target.tag.face]);
});
// if we actually got face image tensor, draw canvas with that face
if (res.face[i].tensor) {
await human.tf.browser.toPixels(res.face[i].tensor, canvas);
document.getElementById('faces').appendChild(canvas);
const ctx = canvas.getContext('2d');
if (!ctx) return;
ctx.font = 'small-caps 0.8rem "Lato"';
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
ctx.fillText(`${res.face[i].age}y ${(100 * (res.face[i].genderScore || 0)).toFixed(1)}% ${res.face[i].gender}`, 4, canvas.height - 6);
const arr = db.map((rec) => rec.embedding);
const result = human.match.find(res.face[i].embedding, arr);
const person = await human.match(res.face[i].embedding, db);
ctx.font = 'small-caps 1rem "Lato"';
if (result.similarity && res.similarity > minScore) ctx.fillText(`${(100 * result.similarity).toFixed(1)}% ${db[result.index].name}`, 4, canvas.height - 30);
document.getElementById('faces').appendChild(canvas);
canvas.addEventListener('click', (evt) => {
log('Select:', 'Image:', evt.target.tag.sample, 'Face:', evt.target.tag.face, 'Source:', evt.target.tag.source, all[evt.target.tag.sample][evt.target.tag.face]);
selectFaceCanvas(all[evt.target.tag.sample][evt.target.tag.face]);
});
if (person.similarity && person.similarity > minScore && res.face[i].confidence > minConfidence) ctx.fillText(`${(100 * person.similarity).toFixed(1)}% ${person.name}`, 4, canvas.height - 30);
}
}
}
async function addImageElement(index, image, length) {
const faces = all.reduce((prev, curr) => prev += curr.length, 0);
title(`Analyzing Input Images<br> ${Math.round(100 * index / length)}% [${index} / ${length}]<br>Found ${faces} Faces`);
async function process(index, image) {
return new Promise((resolve) => {
const img = new Image(128, 128);
img.onload = () => { // must wait until image is loaded
human.detect(img, userConfig).then(async (res) => {
await faces(index, res, image); // then wait until image is analyzed
log('Add image:', index + 1, image, 'faces:', res.face.length);
document.getElementById('images').appendChild(img); // and finally we can add it
human.detect(img, userConfig)
.then((res) => { // eslint-disable-line promise/always-return
addFaceCanvas(index, res, image); // then wait until image is analyzed
resolve(true);
})
.catch(() => log('human detect error'));
});
};
img.onerror = () => {
log('Add image error:', index + 1, image);
@ -185,7 +163,7 @@ async function addImageElement(index, image, length) {
});
}
function createFaceMatchDB() {
async function createDB() {
log('Creating Faces DB...');
for (const image of all) {
for (const face of image) db.push({ name: 'unknown', source: face.fileName, embedding: face.embedding });
@ -194,64 +172,64 @@ function createFaceMatchDB() {
}
async function main() {
/*
window.addEventListener('unhandledrejection', (evt) => {
// eslint-disable-next-line no-console
console.error(evt.reason || evt);
document.getElementById('list').innerHTML = evt?.reason?.message || evt?.reason || evt;
evt.preventDefault();
});
*/
// pre-load human models
await human.load();
title('Loading Face Match Database');
let images = [];
let dir = [];
// load face descriptor database
await loadFaceMatchDB();
await getFaceDB();
// enumerate all sample images in /assets
title('Enumerating Input Images');
const res = await fetch('/samples/in');
const res = await fetch('/samples/groups');
dir = (res && res.ok) ? await res.json() : [];
images = images.concat(dir.filter((img) => (img.endsWith('.jpg') && img.includes('sample'))));
// could not dynamically enumerate images so using static list
if (images.length === 0) {
images = [
'ai-face.jpg', 'ai-upper.jpg', 'ai-body.jpg', 'solvay1927.jpg',
'group-1.jpg', 'group-2.jpg', 'group-3.jpg', 'group-4.jpg', 'group-5.jpg', 'group-6.jpg', 'group-7.jpg',
'person-celeste.jpg', 'person-christina.jpg', 'person-lauren.jpg', 'person-lexi.jpg', 'person-linda.jpg', 'person-nicole.jpg', 'person-tasia.jpg', 'person-tetiana.jpg', 'person-vlado.jpg', 'person-vlado1.jpg', 'person-vlado5.jpg',
'stock-group-1.jpg', 'stock-group-2.jpg',
'stock-models-1.jpg', 'stock-models-2.jpg', 'stock-models-3.jpg', 'stock-models-4.jpg', 'stock-models-5.jpg', 'stock-models-6.jpg', 'stock-models-7.jpg', 'stock-models-8.jpg', 'stock-models-9.jpg',
'stock-teen-1.jpg', 'stock-teen-2.jpg', 'stock-teen-3.jpg', 'stock-teen-4.jpg', 'stock-teen-5.jpg', 'stock-teen-6.jpg', 'stock-teen-7.jpg', 'stock-teen-8.jpg',
'stock-models-10.jpg', 'stock-models-11.jpg', 'stock-models-12.jpg', 'stock-models-13.jpg', 'stock-models-14.jpg', 'stock-models-15.jpg', 'stock-models-16.jpg',
'cgi-model-1.jpg', 'cgi-model-2.jpg', 'cgi-model-3.jpg', 'cgi-model-4.jpg', 'cgi-model-5.jpg', 'cgi-model-6.jpg', 'cgi-model-7.jpg', 'cgi-model-8.jpg', 'cgi-model-9.jpg',
'cgi-model-10.jpg', 'cgi-model-11.jpg', 'cgi-model-12.jpg', 'cgi-model-13.jpg', 'cgi-model-14.jpg', 'cgi-model-15.jpg', 'cgi-model-18.jpg', 'cgi-model-19.jpg',
'cgi-model-20.jpg', 'cgi-model-21.jpg', 'cgi-model-22.jpg', 'cgi-model-23.jpg', 'cgi-model-24.jpg', 'cgi-model-25.jpg', 'cgi-model-26.jpg', 'cgi-model-27.jpg', 'cgi-model-28.jpg', 'cgi-model-29.jpg',
'cgi-model-30.jpg', 'cgi-model-31.jpg', 'cgi-model-33.jpg', 'cgi-model-34.jpg',
'cgi-multiangle-1.jpg', 'cgi-multiangle-2.jpg', 'cgi-multiangle-3.jpg', 'cgi-multiangle-4.jpg', 'cgi-multiangle-6.jpg', 'cgi-multiangle-7.jpg', 'cgi-multiangle-8.jpg', 'cgi-multiangle-9.jpg', 'cgi-multiangle-10.jpg', 'cgi-multiangle-11.jpg',
'stock-emotions-a-1.jpg', 'stock-emotions-a-2.jpg', 'stock-emotions-a-3.jpg', 'stock-emotions-a-4.jpg', 'stock-emotions-a-5.jpg', 'stock-emotions-a-6.jpg', 'stock-emotions-a-7.jpg', 'stock-emotions-a-8.jpg',
'stock-emotions-b-1.jpg', 'stock-emotions-b-2.jpg', 'stock-emotions-b-3.jpg', 'stock-emotions-b-4.jpg', 'stock-emotions-b-5.jpg', 'stock-emotions-b-6.jpg', 'stock-emotions-b-7.jpg', 'stock-emotions-b-8.jpg',
'groups/group1.jpg',
'groups/group2.jpg',
'groups/group3.jpg',
'groups/group4.jpg',
'groups/group5.jpg',
'groups/group6.jpg',
'groups/group7.jpg',
'groups/group8.jpg',
'groups/group9.jpg',
'groups/group10.jpg',
'groups/group11.jpg',
'groups/group12.jpg',
'groups/group13.jpg',
'groups/group14.jpg',
];
// add prefix for gitpages
images = images.map((a) => `../../samples/in/${a}`);
log('Adding static image list:', images);
} else {
log('Discovered images:', images);
images = images.map((a) => `/human/samples/${a}`);
log('Adding static image list:', images.length, 'images');
}
// images = ['/samples/in/person-lexi.jpg', '/samples/in/person-carolina.jpg', '/samples/in/solvay1927.jpg'];
const t0 = human.now();
for (let i = 0; i < images.length; i++) await addImageElement(i, images[i], images.length);
const t1 = human.now();
// download and analyze all images
for (let i = 0; i < images.length; i++) await process(i, images[i]);
// print stats
const num = all.reduce((prev, cur) => prev += cur.length, 0);
log('Extracted faces:', num, 'from images:', all.length, 'time:', Math.round(t1 - t0));
log('Extracted faces:', num, 'from images:', all.length);
log(human.tf.engine().memory());
// if we didn't download db, generate it from current faces
if (!db || db.length === 0) createFaceMatchDB();
if (!db || db.length === 0) await createDB();
else log('Loaded Faces DB:', db.length);
title('');
log('Ready');
human.validate(userConfig);
human.match.similarity([], []);
}
window.onload = main;

File diff suppressed because one or more lines are too long

View File

@ -1,9 +1,8 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Human</title>
<!-- <meta http-equiv="content-type" content="text/html; charset=utf-8"> -->
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="viewport" content="width=device-width, shrink-to-fit=yes">
<meta name="keywords" content="Human">
<meta name="application-name" content="Human">
@ -15,36 +14,30 @@
<link rel="apple-touch-icon" href="../../assets/icon.png">
<script src="./facematch.js" type="module"></script>
<style>
img { object-fit: contain; }
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
html { font-family: 'Lato', 'Segoe UI'; font-size: 24px; font-variant: small-caps; }
body { margin: 24px; background: black; color: white; overflow: hidden; text-align: -webkit-center; min-height: 100%; max-height: 100%; }
::-webkit-scrollbar { height: 8px; border: 0; border-radius: 0; }
::-webkit-scrollbar-thumb { background: grey }
::-webkit-scrollbar-track { margin: 3px; }
.orig { width: 200px; height: 200px; padding-bottom: 20px; filter: blur(16px); transition : all 0.5s ease; }
.text { margin: 24px; }
.face { width: 128px; height: 128px; margin: 2px; padding: 2px; cursor: grab; transform: scale(1.00); transition : all 0.3s ease; }
.face:hover { filter: grayscale(1); transform: scale(1.08); transition : all 0.3s ease; }
body { margin: 0; background: black; color: white; overflow-x: hidden; }
img { object-fit: contain; }
.face { width: 128px; height: 128px; }
</style>
</head>
<body>
<div style="display: block">
<div style="display: flex">
<div style="min-width: 400px">
<div class="text" id="title"></div>
<canvas id="orig" class="orig"></canvas>
<div id="desc" style="font-size: 0.8rem; text-align: left;"></div>
<div>
Selected Face<br>
<canvas id="orig" style="width: 200px; height: 200px; padding: 20px"></canvas>
</div>
<div style="width: 20px"></div>
<div>
<div class="text">Input Images</div>
<div id="images" style="display: flex; width: 60vw; overflow-x: auto; overflow-y: hidden; scroll-behavior: smooth"></div>
Sample Images<br>
<div id="images" style="display: flex; flex-wrap: wrap; width: 85vw"></div>
</div>
<span id="desc" style="visibility: hidden; font-size: 0.4rem;"></span><br>
</div>
<div id="list" style="height: 10px"></div>
<div class="text">Select person to sort by similarity and get a known face match</div>
<div id="faces" style="height: 50vh; overflow-y: auto"></div>
Extracted Faces - click on a face to sort by similarity and get a known face match:<br>
<div id="faces"></div>
</div>
</body>
</html>

View File

@ -1,76 +0,0 @@
/**
* Runs in a worker thread started by `node-match` demo app
*
*/
const threads = require('worker_threads');
let debug = false;
/** @type SharedArrayBuffer */
let buffer;
/** @type Float32Array */
let view;
let threshold = 0;
let records = 0;
const descLength = 1024; // descriptor length in bytes
function distance(descBuffer, index, options = { order: 2, multiplier: 20 }) {
const descriptor = new Float32Array(descBuffer);
let sum = 0;
for (let i = 0; i < descriptor.length; i++) {
const diff = (options.order === 2) ? (descriptor[i] - view[index * descLength + i]) : (Math.abs(descriptor[i] - view[index * descLength + i]));
sum += (options.order === 2) ? (diff * diff) : (diff ** options.order);
}
return (options.multiplier || 20) * sum;
}
function match(descBuffer, options = { order: 2, multiplier: 20 }) {
let best = Number.MAX_SAFE_INTEGER;
let index = -1;
for (let i = 0; i < records; i++) {
const res = distance(descBuffer, i, { order: options.order, multiplier: options.multiplier });
if (res < best) {
best = res;
index = i;
}
if (best < threshold || best === 0) break; // short circuit
}
best = (options.order === 2) ? Math.sqrt(best) : best ** (1 / options.order);
const similarity = Math.round(100 * Math.max(0, 100 - best) / 100.0) / 100;
return { index, distance: best, similarity };
}
threads.parentPort?.on('message', (msg) => {
if (typeof msg.descriptor !== 'undefined') { // actual work order to find a match
const t0 = performance.now();
const result = match(msg.descriptor);
const t1 = performance.now();
threads.parentPort?.postMessage({ request: msg.request, time: Math.trunc(t1 - t0), ...result });
return; // short circuit
}
if (msg instanceof SharedArrayBuffer) { // called only once to receive reference to shared array buffer
buffer = msg;
view = new Float32Array(buffer); // initialize f64 view into buffer
if (debug) threads.parentPort?.postMessage(`buffer: ${buffer.byteLength}`);
}
if (typeof msg.records !== 'undefined') { // recived every time when number of records changes
records = msg.records;
if (debug) threads.parentPort?.postMessage(`records: ${records}`);
}
if (typeof msg.debug !== 'undefined') { // set verbose logging
debug = msg.debug;
// if (debug) threads.parentPort?.postMessage(`debug: ${debug}`);
}
if (typeof msg.threshold !== 'undefined') { // set minimum similarity threshold
threshold = msg.threshold;
// if (debug) threads.parentPort?.postMessage(`threshold: ${threshold}`);
}
if (typeof msg.shutdown !== 'undefined') { // got message to close worker
if (debug) threads.parentPort?.postMessage('shutting down');
process.exit(0); // eslint-disable-line no-process-exit
}
});
if (debug) threads.parentPort?.postMessage('started');

View File

@ -1,184 +0,0 @@
/**
* Human demo app for NodeJS that generates random facial descriptors
* and uses NodeJS multi-threading to start multiple threads for face matching
* uses `node-match-worker.js` to perform actual face matching analysis
*/
const fs = require('fs');
const path = require('path');
const threads = require('worker_threads');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// global optinos
const options = {
dbFile: 'demo/facematch/faces.json', // sample face db
dbMax: 10000, // maximum number of records to hold in memory
threadPoolSize: 12, // number of worker threads to create in thread pool
workerSrc: './node-match-worker.js', // code that executes in the worker thread
debug: true, // verbose messages
minThreshold: 0.5, // match returns first record that meets the similarity threshold, set to 0 to always scan all records
descLength: 1024, // descriptor length
};
// test options
const testOptions = {
dbFact: 175, // load db n times to fake huge size
maxJobs: 200, // exit after processing this many jobs
fuzDescriptors: true, // randomize descriptor content before match for harder jobs
};
// global data structures
const data = {
/** @type string[] */
labels: [], // array of strings, length of array serves as overal number of records so has to be maintained carefully
/** @type SharedArrayBuffer | null */
buffer: null,
/** @type Float32Array | null */
view: null,
/** @type threads.Worker[] */
workers: [], // holds instance of workers. worker can be null if exited
requestID: 0, // each request should increment this counter as its used for round robin assignment
};
let t0 = process.hrtime.bigint(); // used for perf counters
const appendRecords = (labels, descriptors) => {
if (!data.view) return 0;
if (descriptors.length !== labels.length) {
log.error('append error:', { descriptors: descriptors.length, labels: labels.length });
}
// if (options.debug) log.state('appending:', { descriptors: descriptors.length, labels: labels.length });
for (let i = 0; i < descriptors.length; i++) {
for (let j = 0; j < descriptors[i].length; j++) {
data.view[data.labels.length * descriptors[i].length + j] = descriptors[i][j]; // add each descriptors element to buffer
}
data.labels.push(labels[i]); // finally add to labels
}
for (const worker of data.workers) { // inform all workers how many records we have
if (worker) worker.postMessage({ records: data.labels.length });
}
return data.labels.length;
};
const getLabel = (index) => data.labels[index];
const getDescriptor = (index) => {
if (!data.view) return [];
const descriptor = [];
for (let i = 0; i < 1024; i++) descriptor.push(data.view[index * options.descLength + i]);
return descriptor;
};
const fuzDescriptor = (descriptor) => {
for (let i = 0; i < descriptor.length; i++) descriptor[i] += Math.random() - 0.5;
return descriptor;
};
const delay = (ms) => new Promise((resolve) => { setTimeout(resolve, ms); });
async function workersClose() {
const current = data.workers.filter((worker) => !!worker).length;
log.info('closing workers:', { poolSize: data.workers.length, activeWorkers: current });
for (const worker of data.workers) {
if (worker) worker.postMessage({ shutdown: true }); // tell worker to exit
}
await delay(250); // wait a little for threads to exit on their own
const remaining = data.workers.filter((worker) => !!worker).length;
if (remaining > 0) {
log.info('terminating remaining workers:', { remaining: current, pool: data.workers.length });
for (const worker of data.workers) {
if (worker) worker.terminate(); // if worker did not exit cleany terminate it
}
}
}
const workerMessage = (index, msg) => {
if (msg.request) {
if (options.debug) log.data('message:', { worker: index, request: msg.request, time: msg.time, label: getLabel(msg.index), similarity: msg.similarity });
if (msg.request >= testOptions.maxJobs) {
const t1 = process.hrtime.bigint();
const elapsed = Math.round(Number(t1 - t0) / 1000 / 1000);
log.state({ matchJobsFinished: testOptions.maxJobs, totalTimeMs: elapsed, averageTimeMs: Math.round(100 * elapsed / testOptions.maxJobs) / 100 });
workersClose();
}
} else {
log.data('message:', { worker: index, msg });
}
};
async function workerClose(id, code) {
const previous = data.workers.filter((worker) => !!worker).length;
delete data.workers[id];
const current = data.workers.filter((worker) => !!worker).length;
if (options.debug) log.state('worker exit:', { id, code, previous, current });
}
async function workersStart(numWorkers) {
const previous = data.workers.filter((worker) => !!worker).length;
log.info('starting worker thread pool:', { totalWorkers: numWorkers, alreadyActive: previous });
for (let i = 0; i < numWorkers; i++) {
if (!data.workers[i]) { // worker does not exist, so create it
const worker = new threads.Worker(path.join(__dirname, options.workerSrc));
worker.on('message', (msg) => workerMessage(i, msg));
worker.on('error', (err) => log.error('worker error:', { err }));
worker.on('exit', (code) => workerClose(i, code));
worker.postMessage(data.buffer); // send buffer to worker
data.workers[i] = worker;
}
data.workers[i]?.postMessage({ records: data.labels.length, threshold: options.minThreshold, debug: options.debug }); // inform worker how many records there are
}
await delay(100); // just wait a bit for everything to settle down
}
const match = (descriptor) => {
// const arr = Float32Array.from(descriptor);
const buffer = new ArrayBuffer(options.descLength * 4);
const view = new Float32Array(buffer);
view.set(descriptor);
const available = data.workers.filter((worker) => !!worker).length; // find number of available workers
if (available > 0) data.workers[data.requestID % available].postMessage({ descriptor: buffer, request: data.requestID }, [buffer]); // round robin to first available worker
else log.error('no available workers');
};
async function loadDB(count) {
const previous = data.labels.length;
if (!fs.existsSync(options.dbFile)) {
log.error('db file does not exist:', options.dbFile);
return;
}
t0 = process.hrtime.bigint();
for (let i = 0; i < count; i++) { // test loop: load entire face db from array of objects n times into buffer
const db = JSON.parse(fs.readFileSync(options.dbFile).toString());
const names = db.map((record) => record.name);
const descriptors = db.map((record) => record.embedding);
appendRecords(names, descriptors);
}
log.data('db loaded:', { existingRecords: previous, newRecords: data.labels.length });
}
async function createBuffer() {
data.buffer = new SharedArrayBuffer(4 * options.dbMax * options.descLength); // preallocate max number of records as sharedarraybuffers cannot grow
data.view = new Float32Array(data.buffer); // create view into buffer
data.labels.length = 0;
log.data('created shared buffer:', { maxDescriptors: (data.view.length || 0) / options.descLength, totalBytes: data.buffer.byteLength, totalElements: data.view.length });
}
async function main() {
log.header();
log.info('options:', options);
await createBuffer(); // create shared buffer array
await loadDB(testOptions.dbFact); // loadDB is a test method that calls actual addRecords
await workersStart(options.threadPoolSize); // can be called at anytime to modify worker pool size
for (let i = 0; i < testOptions.maxJobs; i++) {
const idx = Math.trunc(data.labels.length * Math.random()); // grab a random descriptor index that we'll search for
const descriptor = getDescriptor(idx); // grab a descriptor at index
data.requestID++; // increase request id
if (testOptions.fuzDescriptors) match(fuzDescriptor(descriptor)); // fuz descriptor for harder match
else match(descriptor);
if (options.debug) log.debug('submited job', data.requestID); // we already know what we're searching for so we can compare results
}
log.state('submitted:', { matchJobs: testOptions.maxJobs, poolSize: data.workers.length, activeWorkers: data.workers.filter((worker) => !!worker).length });
}
main();

View File

@ -1,3 +0,0 @@
# Helper libraries
Used by main `Human` demo app

View File

@ -1,3 +1,4 @@
// @ts-nocheck
// based on: https://github.com/munrocket/gl-bench
const UICSS = `
@ -36,13 +37,15 @@ const UISVG = `
class GLBench {
/** GLBench constructor
* @param { WebGLRenderingContext | WebGL2RenderingContext | null } gl context
* @param { WebGLRenderingContext | WebGL2RenderingContext } gl context
* @param { Object | undefined } settings additional settings
*/
constructor(gl, settings = {}) {
this.css = UICSS;
this.svg = UISVG;
// eslint-disable-next-line @typescript-eslint/no-empty-function
this.paramLogger = () => {};
// eslint-disable-next-line @typescript-eslint/no-empty-function
this.chartLogger = () => {};
this.chartLen = 20;
this.chartHz = 20;
@ -89,6 +92,7 @@ class GLBench {
const addProfiler = (fn, self, target) => {
const t = self.now();
// eslint-disable-next-line prefer-rest-params
fn.apply(target, arguments);
if (self.trackGPU) self.finished.push(glFinish(t, self.activeAccums.slice(0)));
};
@ -103,11 +107,13 @@ class GLBench {
if (gl[fn]) {
gl[fn] = addProfiler(gl[fn], this, gl);
} else {
// eslint-disable-next-line no-console
console.log('bench: cannot attach to webgl function');
}
/*
gl.getExtension = ((fn, self) => {
// eslint-disable-next-line prefer-rest-params
const ext = fn.apply(gl, arguments);
if (ext) {
['drawElementsInstancedANGLE', 'drawBuffersWEBGL'].forEach((fn2) => {
@ -142,6 +148,7 @@ class GLBench {
return (i, cpu, gpu, mem, fps, totalTime, frameId) => {
nodes['gl-cpu'][i].style.strokeDasharray = (cpu * 0.27).toFixed(0) + ' 100';
nodes['gl-gpu'][i].style.strokeDasharray = (gpu * 0.27).toFixed(0) + ' 100';
// eslint-disable-next-line no-nested-ternary
nodes['gl-mem'][i].innerHTML = names[i] ? names[i] : (mem ? 'mem: ' + mem.toFixed(0) + 'mb' : '');
nodes['gl-fps'][i].innerHTML = 'FPS: ' + fps.toFixed(1);
logger(names[i], cpu, gpu, mem, fps, totalTime, frameId);

View File

@ -64,7 +64,9 @@ function createNode() {
hideChildren() {
if (Array.isArray(this.children)) {
this.children.forEach((item) => {
// @ts-ignore
item['elem']['classList'].add('hide');
// @ts-ignore
if (item['expanded']) item.hideChildren();
});
}
@ -72,7 +74,9 @@ function createNode() {
showChildren() {
if (Array.isArray(this.children)) {
this.children.forEach((item) => {
// @ts-ignore
item['elem']['classList'].remove('hide');
// @ts-ignore
if (item['expanded']) item.showChildren();
});
}

View File

@ -1,3 +1,5 @@
//@ts-nocheck
let instance = 0;
let CSScreated = false;
@ -84,7 +86,6 @@ class Menu {
}
createMenu(parent, title = '', position = { top: null, left: null, bottom: null, right: null }) {
/** @type {HTMLDivElement} */
this.menu = document.createElement('div');
this.menu.id = `menu-${instance}`;
this.menu.className = 'menu';
@ -132,11 +133,11 @@ class Menu {
}
get width() {
return this.menu ? this.menu.offsetWidth : 0;
return this.menu.offsetWidth || 0;
}
get height() {
return this.menu ? this.menu.offsetHeight : 0;
return this.menu.offsetHeight || 0;
}
hide() {
@ -204,10 +205,8 @@ class Menu {
el.innerHTML = `<div class="menu-checkbox"><input class="menu-checkbox" type="checkbox" id="${this.newID}" ${object[variable] ? 'checked' : ''}/><label class="menu-checkbox-label" for="${this.ID}"></label></div>${title}`;
if (this.container) this.container.appendChild(el);
el.addEventListener('change', (evt) => {
if (evt.target) {
object[variable] = evt.target['checked'];
if (callback) callback(evt.target['checked']);
}
object[variable] = evt.target.checked;
if (callback) callback(evt.target.checked);
});
return el;
}
@ -226,7 +225,7 @@ class Menu {
el.style.fontVariant = document.body.style.fontVariant;
if (this.container) this.container.appendChild(el);
el.addEventListener('change', (evt) => {
if (callback && evt.target) callback(items[evt.target['selectedIndex']]);
if (callback) callback(items[evt.target.selectedIndex]);
});
return el;
}
@ -238,12 +237,12 @@ class Menu {
if (this.container) this.container.appendChild(el);
el.addEventListener('change', (evt) => {
if (evt.target) {
object[variable] = parseInt(evt.target['value']) === parseFloat(evt.target['value']) ? parseInt(evt.target['value']) : parseFloat(evt.target['value']);
evt.target.setAttribute('value', evt.target['value']);
if (callback) callback(evt.target['value']);
object[variable] = parseInt(evt.target.value) === parseFloat(evt.target.value) ? parseInt(evt.target.value) : parseFloat(evt.target.value);
evt.target.setAttribute('value', evt.target.value);
if (callback) callback(evt.target.value);
}
});
el['input'] = el.children[0];
el.input = el.children[0];
return el;
}
@ -283,6 +282,7 @@ class Menu {
return el;
}
// eslint-disable-next-line class-methods-use-this
updateValue(title, val, suffix = '') {
const el = document.getElementById(`menu-val-${title}`);
if (el) el.innerText = `${title}: ${val}${suffix}`;
@ -299,13 +299,12 @@ class Menu {
return el;
}
// eslint-disable-next-line class-methods-use-this
async updateChart(id, values) {
if (!values || (values.length === 0)) return;
/** @type {HTMLCanvasElement} */
const canvas = document.getElementById(`menu-canvas-${id}`);
if (!canvas) return;
const ctx = canvas.getContext('2d');
if (!ctx) return;
ctx.fillStyle = theme.background;
ctx.fillRect(0, 0, canvas.width, canvas.height);
const width = canvas.width / values.length;
@ -319,7 +318,7 @@ class Menu {
ctx.fillRect(i * width, 0, width - 4, canvas.height);
ctx.fillStyle = theme.background;
ctx.font = `${width / 1.5}px "Segoe UI"`;
ctx.fillText(Math.round(values[i]).toString(), i * width + 1, canvas.height - 1, width - 1);
ctx.fillText(Math.round(values[i]), i * width + 1, canvas.height - 1, width - 1);
}
}
}

View File

@ -0,0 +1,870 @@
// @ts-nocheck
import { Vector2, Vector3, Spherical, MOUSE, Quaternion, EventDispatcher } from './three.js';
/**
* @author qiao / https://github.com/qiao
* @author mrdoob / http://mrdoob.com
* @author alteredq / http://alteredqualia.com/
* @author WestLangley / http://github.com/WestLangley
* @author erich666 / http://erichaines.com
*/
// This set of controls performs orbiting, dollying (zooming), and panning.
// Unlike TrackballControls, it maintains the "up" direction object.up (+Y by default).
//
// Orbit - left mouse / touch: one-finger move
// Zoom - middle mouse, or mousewheel / touch: two-finger spread or squish
// Pan - right mouse, or left mouse + ctrl/metaKey, or arrow keys / touch: two-finger move
const OrbitControls = function (object, domElement) {
this.object = object;
this.domElement = (domElement !== undefined) ? domElement : document;
// Set to false to disable this control
this.enabled = true;
// "target" sets the location of focus, where the object orbits around
this.target = new Vector3();
// How far you can dolly in and out ( PerspectiveCamera only )
this.minDistance = 0;
this.maxDistance = Infinity;
// How far you can zoom in and out ( OrthographicCamera only )
this.minZoom = 0;
this.maxZoom = Infinity;
// How far you can orbit vertically, upper and lower limits.
// Range is 0 to Math.PI radians.
this.minPolarAngle = 0; // radians
this.maxPolarAngle = Math.PI; // radians
// How far you can orbit horizontally, upper and lower limits.
// If set, must be a sub-interval of the interval [ - Math.PI, Math.PI ].
this.minAzimuthAngle = -Infinity; // radians
this.maxAzimuthAngle = Infinity; // radians
// Set to true to enable damping (inertia)
// If damping is enabled, you must call controls.update() in your animation loop
this.enableDamping = false;
this.dampingFactor = 0.25;
// This option actually enables dollying in and out; left as "zoom" for backwards compatibility.
// Set to false to disable zooming
this.enableZoom = true;
this.zoomSpeed = 1.0;
// Set to false to disable rotating
this.enableRotate = true;
this.rotateSpeed = 1.0;
// Set to false to disable panning
this.enablePan = true;
this.panSpeed = 1.0;
this.screenSpacePanning = false; // if true, pan in screen-space
this.keyPanSpeed = 7.0; // pixels moved per arrow key push
// Set to true to automatically rotate around the target
// If auto-rotate is enabled, you must call controls.update() in your animation loop
this.autoRotate = false;
this.autoRotateSpeed = 2.0; // 30 seconds per round when fps is 60
// Set to false to disable use of the keys
this.enableKeys = true;
// The four arrow keys
this.keys = { LEFT: 37, UP: 38, RIGHT: 39, BOTTOM: 40 };
// Mouse buttons
this.mouseButtons = { LEFT: MOUSE.LEFT, MIDDLE: MOUSE.MIDDLE, RIGHT: MOUSE.RIGHT };
// for reset
this.target0 = this.target.clone();
this.position0 = this.object.position.clone();
this.zoom0 = this.object.zoom;
//
// public methods
//
this.getPolarAngle = function () {
return spherical.phi;
};
this.getAzimuthalAngle = function () {
return spherical.theta;
};
this.saveState = function () {
scope.target0.copy(scope.target);
scope.position0.copy(scope.object.position);
scope.zoom0 = scope.object.zoom;
};
this.reset = function () {
scope.target.copy(scope.target0);
scope.object.position.copy(scope.position0);
scope.object.zoom = scope.zoom0;
scope.object.updateProjectionMatrix();
scope.dispatchEvent(changeEvent);
scope.update();
state = STATE.NONE;
};
// this method is exposed, but perhaps it would be better if we can make it private...
this.update = (function () {
const offset = new Vector3();
// so camera.up is the orbit axis
const quat = new Quaternion().setFromUnitVectors(object.up, new Vector3(0, 1, 0));
const quatInverse = quat.clone().invert();
const lastPosition = new Vector3();
const lastQuaternion = new Quaternion();
return function update() {
const position = scope.object.position;
offset.copy(position).sub(scope.target);
// rotate offset to "y-axis-is-up" space
offset.applyQuaternion(quat);
// angle from z-axis around y-axis
spherical.setFromVector3(offset);
if (scope.autoRotate && state === STATE.NONE) {
rotateLeft(getAutoRotationAngle());
}
spherical.theta += sphericalDelta.theta;
spherical.phi += sphericalDelta.phi;
// restrict theta to be between desired limits
spherical.theta = Math.max(scope.minAzimuthAngle, Math.min(scope.maxAzimuthAngle, spherical.theta));
// restrict phi to be between desired limits
spherical.phi = Math.max(scope.minPolarAngle, Math.min(scope.maxPolarAngle, spherical.phi));
spherical.makeSafe();
spherical.radius *= scale;
// restrict radius to be between desired limits
spherical.radius = Math.max(scope.minDistance, Math.min(scope.maxDistance, spherical.radius));
// move target to panned location
scope.target.add(panOffset);
offset.setFromSpherical(spherical);
// rotate offset back to "camera-up-vector-is-up" space
offset.applyQuaternion(quatInverse);
position.copy(scope.target).add(offset);
scope.object.lookAt(scope.target);
if (scope.enableDamping === true) {
sphericalDelta.theta *= (1 - scope.dampingFactor);
sphericalDelta.phi *= (1 - scope.dampingFactor);
panOffset.multiplyScalar(1 - scope.dampingFactor);
} else {
sphericalDelta.set(0, 0, 0);
panOffset.set(0, 0, 0);
}
scale = 1;
// update condition is:
// min(camera displacement, camera rotation in radians)^2 > EPS
// using small-angle approximation cos(x/2) = 1 - x^2 / 8
if (zoomChanged
|| lastPosition.distanceToSquared(scope.object.position) > EPS
|| 8 * (1 - lastQuaternion.dot(scope.object.quaternion)) > EPS) {
scope.dispatchEvent(changeEvent);
lastPosition.copy(scope.object.position);
lastQuaternion.copy(scope.object.quaternion);
zoomChanged = false;
return true;
}
return false;
};
}());
this.dispose = function () {
scope.domElement.removeEventListener('contextmenu', onContextMenu, false);
scope.domElement.removeEventListener('mousedown', onMouseDown, false);
scope.domElement.removeEventListener('wheel', onMouseWheel, false);
scope.domElement.removeEventListener('touchstart', onTouchStart, false);
scope.domElement.removeEventListener('touchend', onTouchEnd, false);
scope.domElement.removeEventListener('touchmove', onTouchMove, false);
document.removeEventListener('mousemove', onMouseMove, false);
document.removeEventListener('mouseup', onMouseUp, false);
window.removeEventListener('keydown', onKeyDown, false);
// scope.dispatchEvent( { type: 'dispose' } ); // should this be added here?
};
//
// internals
//
var scope = this;
var changeEvent = { type: 'change' };
const startEvent = { type: 'start' };
const endEvent = { type: 'end' };
var STATE = { NONE: -1, ROTATE: 0, DOLLY: 1, PAN: 2, TOUCH_ROTATE: 3, TOUCH_DOLLY_PAN: 4 };
var state = STATE.NONE;
var EPS = 0.000001;
// current position in spherical coordinates
var spherical = new Spherical();
var sphericalDelta = new Spherical();
var scale = 1;
var panOffset = new Vector3();
var zoomChanged = false;
const rotateStart = new Vector2();
const rotateEnd = new Vector2();
const rotateDelta = new Vector2();
const panStart = new Vector2();
const panEnd = new Vector2();
const panDelta = new Vector2();
const dollyStart = new Vector2();
const dollyEnd = new Vector2();
const dollyDelta = new Vector2();
function getAutoRotationAngle() {
return 2 * Math.PI / 60 / 60 * scope.autoRotateSpeed;
}
function getZoomScale() {
return Math.pow(0.95, scope.zoomSpeed);
}
function rotateLeft(angle) {
sphericalDelta.theta -= angle;
}
function rotateUp(angle) {
sphericalDelta.phi -= angle;
}
const panLeft = (function () {
const v = new Vector3();
return function panLeft(distance, objectMatrix) {
v.setFromMatrixColumn(objectMatrix, 0); // get X column of objectMatrix
v.multiplyScalar(-distance);
panOffset.add(v);
};
}());
const panUp = (function () {
const v = new Vector3();
return function panUp(distance, objectMatrix) {
if (scope.screenSpacePanning === true) {
v.setFromMatrixColumn(objectMatrix, 1);
} else {
v.setFromMatrixColumn(objectMatrix, 0);
v.crossVectors(scope.object.up, v);
}
v.multiplyScalar(distance);
panOffset.add(v);
};
}());
// deltaX and deltaY are in pixels; right and down are positive
const pan = (function () {
const offset = new Vector3();
return function pan(deltaX, deltaY) {
const element = scope.domElement === document ? scope.domElement.body : scope.domElement;
if (scope.object.isPerspectiveCamera) {
// perspective
const position = scope.object.position;
offset.copy(position).sub(scope.target);
let targetDistance = offset.length();
// half of the fov is center to top of screen
targetDistance *= Math.tan((scope.object.fov / 2) * Math.PI / 180.0);
// we use only clientHeight here so aspect ratio does not distort speed
panLeft(2 * deltaX * targetDistance / element.clientHeight, scope.object.matrix);
panUp(2 * deltaY * targetDistance / element.clientHeight, scope.object.matrix);
} else if (scope.object.isOrthographicCamera) {
// orthographic
panLeft(deltaX * (scope.object.right - scope.object.left) / scope.object.zoom / element.clientWidth,
scope.object.matrix);
panUp(deltaY * (scope.object.top - scope.object.bottom) / scope.object.zoom / element.clientHeight, scope
.object.matrix);
} else {
// camera neither orthographic nor perspective
scope.enablePan = false;
}
};
}());
function dollyIn(dollyScale) {
if (scope.object.isPerspectiveCamera) {
scale /= dollyScale;
} else if (scope.object.isOrthographicCamera) {
scope.object.zoom = Math.max(scope.minZoom, Math.min(scope.maxZoom, scope.object.zoom * dollyScale));
scope.object.updateProjectionMatrix();
zoomChanged = true;
} else {
scope.enableZoom = false;
}
}
function dollyOut(dollyScale) {
if (scope.object.isPerspectiveCamera) {
scale *= dollyScale;
} else if (scope.object.isOrthographicCamera) {
scope.object.zoom = Math.max(scope.minZoom, Math.min(scope.maxZoom, scope.object.zoom / dollyScale));
scope.object.updateProjectionMatrix();
zoomChanged = true;
} else {
scope.enableZoom = false;
}
}
//
// event callbacks - update the object state
//
function handleMouseDownRotate(event) {
// console.log( 'handleMouseDownRotate' );
rotateStart.set(event.clientX, event.clientY);
}
function handleMouseDownDolly(event) {
// console.log( 'handleMouseDownDolly' );
dollyStart.set(event.clientX, event.clientY);
}
function handleMouseDownPan(event) {
// console.log( 'handleMouseDownPan' );
panStart.set(event.clientX, event.clientY);
}
function handleMouseMoveRotate(event) {
// console.log( 'handleMouseMoveRotate' );
rotateEnd.set(event.clientX, event.clientY);
rotateDelta.subVectors(rotateEnd, rotateStart).multiplyScalar(scope.rotateSpeed);
const element = scope.domElement === document ? scope.domElement.body : scope.domElement;
rotateLeft(2 * Math.PI * rotateDelta.x / element.clientHeight); // yes, height
rotateUp(2 * Math.PI * rotateDelta.y / element.clientHeight);
rotateStart.copy(rotateEnd);
scope.update();
}
function handleMouseMoveDolly(event) {
// console.log( 'handleMouseMoveDolly' );
dollyEnd.set(event.clientX, event.clientY);
dollyDelta.subVectors(dollyEnd, dollyStart);
if (dollyDelta.y > 0) {
dollyIn(getZoomScale());
} else if (dollyDelta.y < 0) {
dollyOut(getZoomScale());
}
dollyStart.copy(dollyEnd);
scope.update();
}
function handleMouseMovePan(event) {
// console.log( 'handleMouseMovePan' );
panEnd.set(event.clientX, event.clientY);
panDelta.subVectors(panEnd, panStart).multiplyScalar(scope.panSpeed);
pan(panDelta.x, panDelta.y);
panStart.copy(panEnd);
scope.update();
}
function handleMouseUp(event) {
// console.log( 'handleMouseUp' );
}
function handleMouseWheel(event) {
// console.log( 'handleMouseWheel' );
if (event.deltaY < 0) {
dollyOut(getZoomScale());
} else if (event.deltaY > 0) {
dollyIn(getZoomScale());
}
scope.update();
}
function handleKeyDown(event) {
// console.log( 'handleKeyDown' );
switch (event.keyCode) {
case scope.keys.UP:
pan(0, scope.keyPanSpeed);
scope.update();
break;
case scope.keys.BOTTOM:
pan(0, -scope.keyPanSpeed);
scope.update();
break;
case scope.keys.LEFT:
pan(scope.keyPanSpeed, 0);
scope.update();
break;
case scope.keys.RIGHT:
pan(-scope.keyPanSpeed, 0);
scope.update();
break;
}
}
function handleTouchStartRotate(event) {
// console.log( 'handleTouchStartRotate' );
rotateStart.set(event.touches[0].pageX, event.touches[0].pageY);
}
function handleTouchStartDollyPan(event) {
// console.log( 'handleTouchStartDollyPan' );
if (scope.enableZoom) {
const dx = event.touches[0].pageX - event.touches[1].pageX;
const dy = event.touches[0].pageY - event.touches[1].pageY;
const distance = Math.sqrt(dx * dx + dy * dy);
dollyStart.set(0, distance);
}
if (scope.enablePan) {
const x = 0.5 * (event.touches[0].pageX + event.touches[1].pageX);
const y = 0.5 * (event.touches[0].pageY + event.touches[1].pageY);
panStart.set(x, y);
}
}
function handleTouchMoveRotate(event) {
// console.log( 'handleTouchMoveRotate' );
rotateEnd.set(event.touches[0].pageX, event.touches[0].pageY);
rotateDelta.subVectors(rotateEnd, rotateStart).multiplyScalar(scope.rotateSpeed);
const element = scope.domElement === document ? scope.domElement.body : scope.domElement;
rotateLeft(2 * Math.PI * rotateDelta.x / element.clientHeight); // yes, height
rotateUp(2 * Math.PI * rotateDelta.y / element.clientHeight);
rotateStart.copy(rotateEnd);
scope.update();
}
function handleTouchMoveDollyPan(event) {
// console.log( 'handleTouchMoveDollyPan' );
if (scope.enableZoom) {
const dx = event.touches[0].pageX - event.touches[1].pageX;
const dy = event.touches[0].pageY - event.touches[1].pageY;
const distance = Math.sqrt(dx * dx + dy * dy);
dollyEnd.set(0, distance);
dollyDelta.set(0, Math.pow(dollyEnd.y / dollyStart.y, scope.zoomSpeed));
dollyIn(dollyDelta.y);
dollyStart.copy(dollyEnd);
}
if (scope.enablePan) {
const x = 0.5 * (event.touches[0].pageX + event.touches[1].pageX);
const y = 0.5 * (event.touches[0].pageY + event.touches[1].pageY);
panEnd.set(x, y);
panDelta.subVectors(panEnd, panStart).multiplyScalar(scope.panSpeed);
pan(panDelta.x, panDelta.y);
panStart.copy(panEnd);
}
scope.update();
}
function handleTouchEnd(event) {
// console.log( 'handleTouchEnd' );
}
//
// event handlers - FSM: listen for events and reset state
//
function onMouseDown(event) {
if (scope.enabled === false) return;
event.preventDefault();
switch (event.button) {
case scope.mouseButtons.LEFT:
if (event.ctrlKey || event.metaKey) {
if (scope.enablePan === false) return;
handleMouseDownPan(event);
state = STATE.PAN;
} else {
if (scope.enableRotate === false) return;
handleMouseDownRotate(event);
state = STATE.ROTATE;
}
break;
case scope.mouseButtons.MIDDLE:
if (scope.enableZoom === false) return;
handleMouseDownDolly(event);
state = STATE.DOLLY;
break;
case scope.mouseButtons.RIGHT:
if (scope.enablePan === false) return;
handleMouseDownPan(event);
state = STATE.PAN;
break;
}
if (state !== STATE.NONE) {
document.addEventListener('mousemove', onMouseMove, false);
document.addEventListener('mouseup', onMouseUp, false);
scope.dispatchEvent(startEvent);
}
}
function onMouseMove(event) {
if (scope.enabled === false) return;
event.preventDefault();
switch (state) {
case STATE.ROTATE:
if (scope.enableRotate === false) return;
handleMouseMoveRotate(event);
break;
case STATE.DOLLY:
if (scope.enableZoom === false) return;
handleMouseMoveDolly(event);
break;
case STATE.PAN:
if (scope.enablePan === false) return;
handleMouseMovePan(event);
break;
}
}
function onMouseUp(event) {
if (scope.enabled === false) return;
handleMouseUp(event);
document.removeEventListener('mousemove', onMouseMove, false);
document.removeEventListener('mouseup', onMouseUp, false);
scope.dispatchEvent(endEvent);
state = STATE.NONE;
}
function onMouseWheel(event) {
if (scope.enabled === false || scope.enableZoom === false || (state !== STATE.NONE && state !== STATE.ROTATE)) return;
event.preventDefault();
event.stopPropagation();
scope.dispatchEvent(startEvent);
handleMouseWheel(event);
scope.dispatchEvent(endEvent);
}
function onKeyDown(event) {
if (scope.enabled === false || scope.enableKeys === false || scope.enablePan === false) return;
handleKeyDown(event);
}
function onTouchStart(event) {
if (scope.enabled === false) return;
event.preventDefault();
switch (event.touches.length) {
case 1: // one-fingered touch: rotate
if (scope.enableRotate === false) return;
handleTouchStartRotate(event);
state = STATE.TOUCH_ROTATE;
break;
case 2: // two-fingered touch: dolly-pan
if (scope.enableZoom === false && scope.enablePan === false) return;
handleTouchStartDollyPan(event);
state = STATE.TOUCH_DOLLY_PAN;
break;
default:
state = STATE.NONE;
}
if (state !== STATE.NONE) {
scope.dispatchEvent(startEvent);
}
}
function onTouchMove(event) {
if (scope.enabled === false) return;
event.preventDefault();
event.stopPropagation();
switch (event.touches.length) {
case 1: // one-fingered touch: rotate
if (scope.enableRotate === false) return;
if (state !== STATE.TOUCH_ROTATE) return; // is this needed?
handleTouchMoveRotate(event);
break;
case 2: // two-fingered touch: dolly-pan
if (scope.enableZoom === false && scope.enablePan === false) return;
if (state !== STATE.TOUCH_DOLLY_PAN) return; // is this needed?
handleTouchMoveDollyPan(event);
break;
default:
state = STATE.NONE;
}
}
function onTouchEnd(event) {
if (scope.enabled === false) return;
handleTouchEnd(event);
scope.dispatchEvent(endEvent);
state = STATE.NONE;
}
function onContextMenu(event) {
if (scope.enabled === false) return;
event.preventDefault();
}
//
scope.domElement.addEventListener('contextmenu', onContextMenu, false);
scope.domElement.addEventListener('mousedown', onMouseDown, false);
scope.domElement.addEventListener('wheel', onMouseWheel, false);
scope.domElement.addEventListener('touchstart', onTouchStart, false);
scope.domElement.addEventListener('touchend', onTouchEnd, false);
scope.domElement.addEventListener('touchmove', onTouchMove, false);
window.addEventListener('keydown', onKeyDown, false);
// force an update at start
this.update();
};
OrbitControls.prototype = Object.create(EventDispatcher.prototype);
OrbitControls.prototype.constructor = OrbitControls;
Object.defineProperties(OrbitControls.prototype, {
center: {
get() {
return this.target;
},
},
// backward compatibility
noZoom: {
get() {
return !this.enableZoom;
},
set(value) {
this.enableZoom = !value;
},
},
noRotate: {
get() {
return !this.enableRotate;
},
set(value) {
this.enableRotate = !value;
},
},
noPan: {
get() {
return !this.enablePan;
},
set(value) {
this.enablePan = !value;
},
},
noKeys: {
get() {
return !this.enableKeys;
},
set(value) {
this.enableKeys = !value;
},
},
staticMoving: {
get() {
return !this.enableDamping;
},
set(value) {
this.enableDamping = !value;
},
},
dynamicDampingFactor: {
get() {
return this.dampingFactor;
},
set(value) {
this.dampingFactor = value;
},
},
});
export { OrbitControls };

3090
demo/helpers/three.js Normal file

File diff suppressed because one or more lines are too long

View File

@ -4,7 +4,8 @@ async function log(...msg) {
if (debug) {
const dt = new Date();
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
console.log(ts, 'webrtc', ...msg); // eslint-disable-line no-console
// eslint-disable-next-line no-console
console.log(ts, 'webrtc', ...msg);
}
}

View File

@ -2,7 +2,6 @@
* PWA Service Worker for Human main demo
*/
/* eslint-disable no-restricted-globals */
/// <reference lib="webworker" />
const skipCaching = false;
@ -20,7 +19,8 @@ const stats = { hit: 0, miss: 0 };
const log = (...msg) => {
const dt = new Date();
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
console.log(ts, 'pwa', ...msg); // eslint-disable-line no-console
// eslint-disable-next-line no-console
console.log(ts, 'pwa', ...msg);
};
async function updateCached(req) {
@ -31,7 +31,7 @@ async function updateCached(req) {
caches
.open(cacheName)
.then((cache) => cache.put(req, update))
.catch((err) => log('cache update error', err)); // eslint-disable-line promise/no-nesting
.catch((err) => log('cache update error', err));
}
return true;
})
@ -75,13 +75,14 @@ async function getCached(evt) {
}
function cacheInit() {
// eslint-disable-next-line promise/catch-or-return
caches.open(cacheName)
// eslint-disable-next-line promise/no-nesting
.then((cache) => cache.addAll(cacheFiles)
.then( // eslint-disable-line promise/no-nesting
.then(
() => log('cache refresh:', cacheFiles.length, 'files'),
(err) => log('cache error', err),
))
.catch(() => log('cache error'));
));
}
if (!listening) {
@ -98,12 +99,14 @@ if (!listening) {
self.addEventListener('install', (evt) => {
log('install');
// @ts-ignore scope for self is ServiceWorkerGlobalScope not Window
self.skipWaiting();
evt.waitUntil(cacheInit);
});
self.addEventListener('activate', (evt) => {
log('activate');
// @ts-ignore scope for self is ServiceWorkerGlobalScope not Window
evt.waitUntil(self.clients.claim());
});
@ -111,7 +114,7 @@ if (!listening) {
const uri = new URL(evt.request.url);
// if (uri.pathname === '/') { log('cache skip /', evt.request); return; } // skip root access requests
if (evt.request.cache === 'only-if-cached' && evt.request.mode !== 'same-origin') return; // required due to chrome bug
if (uri.origin !== self.location.origin) return; // skip non-local requests
if (uri.origin !== location.origin) return; // skip non-local requests
if (evt.request.method !== 'GET') return; // only cache get requests
if (evt.request.url.includes('/api/')) return; // don't cache api requests, failures are handled at the time of call
@ -126,7 +129,7 @@ if (!listening) {
log(`PWA: ${evt.type}`);
if (refreshed) return;
refreshed = true;
self.location.reload();
location.reload();
});
listening = true;

View File

@ -1,27 +1,35 @@
/**
* Web worker used by main demo app
* Loaded from index.js
*/
/// <reference lib="webworker"/>
/// <reference lib="webworker" />
// load Human using IIFE script as Chome Mobile does not support Modules as Workers
self.importScripts('../dist/human.js'); // eslint-disable-line no-restricted-globals
// import Human from '../dist/human.esm.js';
self.importScripts('../dist/human.js');
let busy = false;
// eslint-disable-next-line new-cap, no-undef
// @ts-ignore // Human is registered as global namespace using IIFE script
// eslint-disable-next-line no-undef, new-cap
const human = new Human.default();
onmessage = async (msg) => { // receive message from main thread
function log(...msg) {
const dt = new Date();
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
// eslint-disable-next-line no-console
if (msg) console.log(ts, 'Human:', ...msg);
}
onmessage = async (msg) => {
if (busy) return;
busy = true;
// received from index.js using:
// worker.postMessage({ image: image.data.buffer, width: canvas.width, height: canvas.height, config }, [image.data.buffer]);
const image = new ImageData(new Uint8ClampedArray(msg.data.image), msg.data.width, msg.data.height);
let result = {};
try {
result = await human.detect(image, msg.data.userConfig);
result.tensors = human.tf.engine().state.numTensors; // append to result object so main thread get info
result.backend = human.tf.getBackend(); // append to result object so main thread get info
} catch (err) {
result.error = err.message;
log('worker thread error:', err.message);
}
if (result.canvas) { // convert canvas to imageData and send it by reference
const canvas = new OffscreenCanvas(result.canvas.width, result.canvas.height);
const ctx = canvas.getContext('2d');
@ -29,9 +37,9 @@ onmessage = async (msg) => { // receive message from main thread
const img = ctx ? ctx.getImageData(0, 0, result.canvas.width, result.canvas.height) : null;
result.canvas = null; // must strip original canvas from return value as it cannot be transfered from worker thread
if (img) postMessage({ result, image: img.data.buffer, width: msg.data.width, height: msg.data.height }, [img.data.buffer]);
else postMessage({ result }); // send message back to main thread with canvas
else postMessage({ result });
} else {
postMessage({ result }); // send message back to main thread without canvas
postMessage({ result });
}
busy = false;
};

View File

@ -67,7 +67,7 @@
.hint { opacity: 0; transition-duration: 0.5s; transition-property: opacity; font-style: italic; position: fixed; top: 5rem; padding: 8px; margin: 8px; box-shadow: 0 0 2px 2px #303030; }
.input-file { align-self: center; width: 5rem; }
.results { position: absolute; left: 0; top: 5rem; background: #303030; width: 20rem; height: 90%; font-size: 0.8rem; overflow-y: auto; display: none }
.results { position: absolute; left: 0; top: 6rem; background: #303030; width: 20rem; height: 90%; font-size: 0.8rem; overflow-y: auto; display: none }
.results::-webkit-scrollbar { background-color: #303030; }
.results::-webkit-scrollbar-thumb { background: black; border-radius: 10px; }
.json-line { margin: 4px 0; display: flex; justify-content: flex-start; }
@ -89,9 +89,9 @@
<body>
<div id="play" class="play icon-play"></div>
<div id="background">
<div class="wave one"></div>
<div class="wave two"></div>
<div class="wave three"></div>
<div class='wave one'></div>
<div class='wave two'></div>
<div class='wave three'></div>
</div>
<div id="loader" class="loader"></div>
<div id="status" class="status"></div>

View File

@ -18,12 +18,11 @@
* ui={}: contains all variables exposed in the UI
*/
// WARNING!!!
// This demo is written using older code style and a lot of manual setup
// Newer versions of Human have richer functionality allowing for much cleaner & easier usage
// It is recommended to use other demos such as `demo/typescript` for usage examples
// test url <https://human.local/?worker=false&async=false&bench=false&draw=true&warmup=full&backend=humangl>
import { Human } from '../dist/human.esm.js'; // equivalent of @vladmandic/human
// @ts-nocheck // typescript checks disabled as this is pure javascript
import Human from '../dist/human.esm.js'; // equivalent of @vladmandic/human
import Menu from './helpers/menu.js';
import GLBench from './helpers/gl-bench.js';
import webRTC from './helpers/webrtc.js';
@ -32,36 +31,31 @@ import jsonView from './helpers/jsonview.js';
let human;
let userConfig = {
// face: { enabled: false },
// body: { enabled: false },
// hand: { enabled: false },
/*
warmup: 'none',
backend: 'webgl',
debug: true,
backend: 'humangl',
/*
wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.9.0/dist/',
async: false,
cacheSensitivity: 0.75,
filter: { enabled: false, flip: false },
filter: {
enabled: false,
flip: false,
},
face: { enabled: false,
detector: { return: false, rotation: true },
mesh: { enabled: false },
iris: { enabled: false },
mesh: { enabled: true },
iris: { enabled: true },
description: { enabled: false },
emotion: { enabled: false },
},
object: { enabled: false },
gesture: { enabled: true },
hand: { enabled: true, maxDetected: 1, minConfidence: 0.5, detector: { modelPath: 'handtrack.json' } },
hand: { enabled: true },
body: { enabled: false },
// body: { enabled: true, modelPath: 'movenet-multipose.json' },
// body: { enabled: true, modelPath: 'posenet.json' },
segmentation: { enabled: false },
*/
/*
face: { iris: { enabled: false }, emotion: { enabled: false } },
hand: { enabled: false },
body: { enabled: false },
gesture: { enabled: false },
*/
};
@ -70,12 +64,8 @@ const drawOptions = {
drawBoxes: true,
drawGaze: true,
drawLabels: true,
drawGestures: true,
drawPolygons: true,
drawPoints: false,
fillPolygons: false,
useCurves: false,
useDepth: true,
};
// ui options
@ -86,10 +76,10 @@ const ui = {
facing: true, // camera facing front or back
baseBackground: 'rgba(50, 50, 50, 1)', // 'grey'
columns: 2, // when processing sample images create this many columns
useWorker: false, // use web workers for processing
useWorker: true, // use web workers for processing
worker: 'index-worker.js',
maxFPSframes: 10, // keep fps history for how many frames
modelsPreload: false, // preload human models on startup
modelsPreload: true, // preload human models on startup
modelsWarmup: false, // warmup human models on startup
buffered: true, // should output be buffered between frames
interpolated: true, // should output be interpolated for smoothness between frames
@ -97,7 +87,6 @@ const ui = {
autoPlay: false, // start webcam & detection on load
// internal variables
exceptionHandler: true, // should capture all unhandled exceptions
busy: false, // internal camera busy flag
menuWidth: 0, // internal
menuHeight: 0, // internal
@ -114,7 +103,7 @@ const ui = {
results: false, // show results tree
lastFrame: 0, // time of last frame processing
viewportSet: false, // internal, has custom viewport been set
transferCanvas: null, // canvas used to transfer data to and from worker
background: null, // holds instance of segmentation background image
// webrtc
useWebRTC: false, // use webrtc as camera source instead of local webcam
@ -152,10 +141,6 @@ let worker;
let bench;
let lastDetectedResult = {};
// helper function: async pause
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
const delay = (ms) => new Promise((resolve) => { setTimeout(resolve, ms); });
// helper function: translates json to human readable string
function str(...msg) {
if (!Array.isArray(msg)) return msg;
@ -171,93 +156,79 @@ function str(...msg) {
function log(...msg) {
const dt = new Date();
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
if (ui.console) console.log(ts, ...msg); // eslint-disable-line no-console
// eslint-disable-next-line no-console
if (ui.console) console.log(ts, ...msg);
}
let prevStatus = '';
function status(msg) {
const div = document.getElementById('status');
if (div && msg && msg !== prevStatus && msg.length > 0) {
if (div && msg && msg.length > 0) {
log('status', msg);
document.getElementById('play').style.display = 'none';
document.getElementById('loader').style.display = 'block';
div.innerText = msg;
prevStatus = msg;
} else {
const video = document.getElementById('video');
const playing = isLive(video) && !video.paused; // eslint-disable-line no-use-before-define
const playing = (video.srcObject !== null) && !video.paused;
document.getElementById('play').style.display = playing ? 'none' : 'block';
document.getElementById('loader').style.display = 'none';
div.innerText = '';
}
}
async function videoPlay(videoElement = document.getElementById('video')) {
document.getElementById('btnStartText').innerHTML = 'pause video';
await videoElement.play();
}
async function videoPause() {
document.getElementById('btnStartText').innerHTML = 'start video';
await document.getElementById('video').pause();
status('paused');
document.getElementById('play').style.display = 'block';
document.getElementById('loader').style.display = 'none';
}
const compare = { enabled: false, original: null };
async function calcSimmilarity(result) {
document.getElementById('compare-container').onclick = () => {
log('resetting face compare baseline:');
compare.original = null;
};
document.getElementById('compare-container').style.display = compare.enabled ? 'block' : 'none';
if (!compare.enabled) {
compare.original = null;
return;
}
if (!compare.enabled) return;
if (!result || !result.face || !result.face[0] || !result.face[0].embedding) return;
if (!(result.face.length > 0) || (result.face[0].embedding.length <= 64)) return;
if (!compare.original) {
compare.original = result;
log('setting face compare baseline:', result.face[0]);
if (result.face[0].tensor) {
const enhanced = human.enhance(result.face[0]);
if (enhanced) {
const c = document.getElementById('orig');
human.draw.tensor(result.face[0].tensor, c);
const squeeze = human.tf.squeeze(enhanced);
const norm = human.tf.div(squeeze, 255);
human.tf.browser.toPixels(norm, c);
human.tf.dispose(enhanced);
human.tf.dispose(squeeze);
human.tf.dispose(norm);
}
} else {
document.getElementById('compare-canvas').getContext('2d').drawImage(compare.original.canvas, 0, 0, 200, 200);
}
}
const similarity = human.match.similarity(compare.original.face[0].embedding, result.face[0].embedding);
const similarity = human.similarity(compare.original.face[0].embedding, result.face[0].embedding);
document.getElementById('similarity').innerText = `similarity: ${Math.trunc(1000 * similarity) / 10}%`;
}
const isLive = (input) => {
const isCamera = input.srcObject?.getVideoTracks()[0] && input.srcObject?.getVideoTracks()[0].enabled;
const isVideoLive = input.readyState > 2;
const isCameraLive = input.srcObject?.getVideoTracks()[0].readyState === 'live';
let live = isCamera ? isCameraLive : isVideoLive;
live = live && !input.paused;
const videoLive = input.readyState > 2;
const cameraLive = input.srcObject?.getVideoTracks()[0].readyState === 'live';
const live = (videoLive || cameraLive) && (!input.paused);
return live;
};
// draws processed results and starts processing of a next frame
let lastDraw = 0;
let lastDraw = performance.now();
async function drawResults(input) {
// await delay(25);
const result = lastDetectedResult;
const canvas = document.getElementById('canvas');
// update draw fps data
ui.drawFPS.push(1000 / (human.now() - lastDraw));
ui.drawFPS.push(1000 / (performance.now() - lastDraw));
if (ui.drawFPS.length > ui.maxFPSframes) ui.drawFPS.shift();
lastDraw = human.now();
lastDraw = performance.now();
// draw fps chart
await menu.process.updateChart('FPS', ui.detectFPS);
if (!result.canvas || ui.buffered) { // refresh with input if using buffered output or if missing canvas
const image = await human.image(input, false);
if (userConfig.segmentation.enabled && ui.buffered) { // refresh segmentation if using buffered output
result.canvas = await human.segmentation(input, ui.background, userConfig);
} else if (!result.canvas || ui.buffered) { // refresh with input if using buffered output or if missing canvas
const image = await human.image(input);
result.canvas = image.canvas;
human.tf.dispose(image.tensor);
}
@ -275,10 +246,12 @@ async function drawResults(input) {
}
// draw all results using interpolated results
let interpolated;
if (ui.interpolated) interpolated = human.next(result);
else interpolated = result;
if (ui.interpolated) {
const interpolated = human.next(result);
human.draw.all(canvas, interpolated, drawOptions);
} else {
human.draw.all(canvas, result, drawOptions);
}
// show tree with results
if (ui.results) {
@ -300,36 +273,37 @@ async function drawResults(input) {
// update log
const engine = human.tf.engine();
const gpu = engine.backendInstance ? `gpu: ${(engine.backendInstance.numBytesInGPU ? engine.backendInstance.numBytesInGPU : 0).toLocaleString()} bytes` : '';
const memory = `system: ${engine.state.numBytes.toLocaleString()} bytes ${gpu} | tensors: ${engine.state.numTensors.toLocaleString()}`;
const processing = result.canvas ? `processing: ${result.canvas.width} x ${result.canvas.height}` : '';
const avgDetect = ui.detectFPS.length > 0 ? Math.trunc(10 * ui.detectFPS.reduce((a, b) => a + b, 0) / ui.detectFPS.length) / 10 : 0;
const avgDraw = ui.drawFPS.length > 0 ? Math.trunc(10 * ui.drawFPS.reduce((a, b) => a + b, 0) / ui.drawFPS.length) / 10 : 0;
const warning = (ui.detectFPS.length > 5) && (avgDetect < 2) ? '<font color="lightcoral">warning: your performance is low: try switching to higher performance backend, lowering resolution or disabling some models</font>' : '';
const fps = avgDetect > 0 ? `FPS process:${avgDetect} refresh:${avgDraw}` : '';
const backend = result.backend || human.tf.getBackend();
const gpu = engine.backendInstance ? `gpu: ${(engine.backendInstance.numBytesInGPU ? engine.backendInstance.numBytesInGPU : 0).toLocaleString()} bytes` : '';
const memory = result.tensors ? `tensors: ${result.tensors.toLocaleString()} in worker` : `system: ${engine.state.numBytes.toLocaleString()} bytes ${gpu} | tensors: ${engine.state.numTensors.toLocaleString()}`;
const backend = engine.state.numTensors > 0 ? `backend: ${human.tf.getBackend()} | ${memory}` : 'running in web worker';
document.getElementById('log').innerHTML = `
video: ${ui.camera.name} | facing: ${ui.camera.facing} | screen: ${window.innerWidth} x ${window.innerHeight} camera: ${ui.camera.width} x ${ui.camera.height} ${processing}<br>
backend: ${backend} | ${memory}<br>
performance: ${str(interpolated.performance)}ms ${fps}<br>
backend: ${backend}<br>
performance: ${str(lastDetectedResult.performance)}ms ${fps}<br>
${warning}<br>
`;
ui.framesDraw++;
ui.lastFrame = human.now();
ui.lastFrame = performance.now();
// if buffered, immediate loop but limit frame rate although it's going to run slower as JS is singlethreaded
if (ui.buffered) {
if (isLive(input)) {
// ui.drawThread = requestAnimationFrame(() => drawResults(input));
ui.drawThread = setTimeout(() => drawResults(input), 25);
ui.drawThread = requestAnimationFrame(() => drawResults(input));
} else {
cancelAnimationFrame(ui.drawThread);
videoPause();
ui.drawThread = null;
}
} else if (ui.drawThread) {
} else {
if (ui.drawThread) {
log('stopping buffered refresh');
cancelAnimationFrame(ui.drawThread);
ui.drawThread = null;
}
}
}
// setup webcam
@ -348,7 +322,7 @@ async function setupCamera() {
} catch (err) {
log(err);
} finally {
// status();
status();
}
return '';
}
@ -377,11 +351,10 @@ async function setupCamera() {
},
};
// enumerate devices for diag purposes
const devices = await navigator.mediaDevices.enumerateDevices();
if (initialCameraAccess) log('enumerated input devices:', devices);
// to select specific camera add deviceid from enumerated devices to camera constraints
// constraints.video.deviceId = '6794499e046cf4aebf41cfeb7d1ef48a17bd65f72bafb55f3c0b06405d3d487b';
if (initialCameraAccess) log('camera constraints', constraints);
if (initialCameraAccess) {
navigator.mediaDevices.enumerateDevices().then((devices) => log('enumerated input devices:', devices));
log('camera constraints', constraints);
}
try {
stream = await navigator.mediaDevices.getUserMedia(constraints);
} catch (err) {
@ -403,24 +376,31 @@ async function setupCamera() {
}
const track = stream.getVideoTracks()[0];
const settings = track.getSettings();
if (initialCameraAccess) log('selected video source:', track, settings);
ui.camera = { name: track.label.toLowerCase(), width: settings.width, height: settings.height, facing: settings.facingMode === 'user' ? 'front' : 'back' };
if (initialCameraAccess) log('selected video source:', track, settings); // log('selected camera:', track.label, 'id:', settings.deviceId);
ui.camera = { name: track.label.toLowerCase(), width: video.videoWidth, height: video.videoHeight, facing: settings.facingMode === 'user' ? 'front' : 'back' };
initialCameraAccess = false;
if (!stream) return 'camera stream empty';
const ready = new Promise((resolve) => { (video.onloadeddata = () => resolve(true)); });
video.srcObject = stream;
await ready;
const promise = !stream || new Promise((resolve) => {
video.onloadeddata = () => {
if (settings.width > settings.height) canvas.style.width = '100vw';
else canvas.style.height = '100vh';
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
ui.menuWidth.input.setAttribute('value', video.videoWidth);
ui.menuHeight.input.setAttribute('value', video.videoHeight);
if (live || ui.autoPlay) await videoPlay();
if ((live || ui.autoPlay) && !ui.detectThread) runHumanDetect(video, canvas); // eslint-disable-line no-use-before-define
return 'camera stream ready';
if (live || ui.autoPlay) video.play();
// eslint-disable-next-line no-use-before-define
if ((live || ui.autoPlay) && !ui.detectThread) runHumanDetect(video, canvas);
ui.busy = false;
resolve();
};
});
// attach input to video element
if (stream) {
video.srcObject = stream;
return promise;
}
ui.busy = false;
return 'camera stream empty';
}
function initPerfMonitor() {
@ -457,23 +437,20 @@ function webWorker(input, image, canvas, timestamp) {
if (document.getElementById('gl-bench')) document.getElementById('gl-bench').style.display = ui.bench ? 'block' : 'none';
lastDetectedResult = msg.data.result;
if (msg.data.image) { // we dont really need canvas since we draw from video
/*
if (!lastDetectedResult.canvas || lastDetectedResult.canvas.width !== msg.data.width || lastDetectedResult.canvas.height !== msg.data.height) {
if (msg.data.image) {
lastDetectedResult.canvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(msg.data.width, msg.data.height) : document.createElement('canvas');
lastDetectedResult.canvas.width = msg.data.width;
lastDetectedResult.canvas.height = msg.data.height;
}
const ctx = lastDetectedResult.canvas.getContext('2d');
const imageData = new ImageData(new Uint8ClampedArray(msg.data.image), msg.data.width, msg.data.height);
ctx.putImageData(imageData, 0, 0);
*/
}
ui.framesDetect++;
if (!ui.drawThread) drawResults(input);
if (isLive(input)) {
ui.detectThread = requestAnimationFrame((now) => runHumanDetect(input, canvas, now)); // eslint-disable-line no-use-before-define
// eslint-disable-next-line no-use-before-define
ui.detectThread = requestAnimationFrame((now) => runHumanDetect(input, canvas, now));
}
});
}
@ -497,21 +474,18 @@ function runHumanDetect(input, canvas, timestamp) {
return;
}
if (ui.hintsThread) clearInterval(ui.hintsThread);
if (ui.useWorker && human.env.offscreen) {
if (ui.useWorker) {
// get image data from video as we cannot send html objects to webworker
if (!ui.transferCanvas || ui.transferCanvas.width !== canvas.width || ui.transferCanvas.height || canvas.height) {
ui.transferCanvas = document.createElement('canvas');
ui.transferCanvas.width = canvas.width;
ui.transferCanvas.height = canvas.height;
}
const ctx = ui.transferCanvas.getContext('2d');
const offscreen = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(canvas.width, canvas.height) : document.createElement('canvas');
offscreen.width = canvas.width;
offscreen.height = canvas.height;
const ctx = offscreen.getContext('2d');
ctx.drawImage(input, 0, 0, canvas.width, canvas.height);
const data = ctx.getImageData(0, 0, canvas.width, canvas.height);
// perform detection in worker
webWorker(input, data, canvas, timestamp);
} else {
human.detect(input, userConfig)
.then((result) => {
human.detect(input, userConfig).then((result) => {
status();
if (result.performance && result.performance.total) ui.detectFPS.push(1000 / result.performance.total);
if (ui.detectFPS.length > ui.maxFPSframes) ui.detectFPS.shift();
@ -529,9 +503,7 @@ function runHumanDetect(input, canvas, timestamp) {
ui.framesDetect++;
ui.detectThread = requestAnimationFrame((now) => runHumanDetect(input, canvas, now));
}
return result;
})
.catch(() => log('human detect error'));
});
}
}
@ -578,7 +550,8 @@ async function processImage(input, title) {
// copy to clipboard on click
if (typeof ClipboardItem !== 'undefined' && navigator.clipboard) {
evt.target.toBlob((blob) => {
const item = new ClipboardItem({ 'image/png': blob }); // eslint-disable-line no-undef
// eslint-disable-next-line no-undef
const item = new ClipboardItem({ 'image/png': blob });
navigator.clipboard.write([item]);
log('copied image to clipboard');
});
@ -589,7 +562,6 @@ async function processImage(input, title) {
const prev = document.getElementsByClassName('thumbnail');
if (prev && prev.length > 0) document.getElementById('samples-container').insertBefore(thumb, prev[0]);
else document.getElementById('samples-container').appendChild(thumb);
document.getElementById('samples-container').style.display = 'block';
// finish up
status();
@ -606,17 +578,22 @@ async function processImage(input, title) {
async function processVideo(input, title) {
status(`processing video: ${title}`);
const video = document.getElementById('video');
const video = document.createElement('video');
const canvas = document.getElementById('canvas');
video.id = 'video-file';
video.controls = true;
video.loop = true;
// video.onerror = async () => status(`video loading error: ${video.error.message}`);
video.addEventListener('error', () => status(`video loading error: ${video.error.message}`));
video.addEventListener('canplay', async () => {
for (const m of Object.values(menu)) m.hide();
document.getElementById('samples-container').style.display = 'none';
document.getElementById('play').style.display = 'none';
canvas.style.display = 'block';
await videoPlay();
runHumanDetect(video, canvas);
document.getElementById('btnStartText').innerHTML = 'pause video';
await video.play();
if (!ui.detectThread) runHumanDetect(video, canvas);
});
video.srcObject = null;
video.src = input;
}
@ -627,14 +604,18 @@ async function detectVideo() {
const canvas = document.getElementById('canvas');
canvas.style.display = 'block';
cancelAnimationFrame(ui.detectThread);
if (isLive(video) && !video.paused) {
await videoPause();
if ((video.srcObject !== null) && !video.paused) {
document.getElementById('btnStartText').innerHTML = 'start video';
status('paused');
await video.pause();
// if (ui.drawThread) cancelAnimationFrame(ui.drawThread);
} else {
const cameraError = await setupCamera();
if (!cameraError) {
status('starting detection');
for (const m of Object.values(menu)) m.hide();
await videoPlay();
document.getElementById('btnStartText').innerHTML = 'pause video';
await video.play();
runHumanDetect(video, canvas);
} else {
status(cameraError);
@ -675,17 +656,16 @@ function setupMenu() {
setupCamera();
});
menu.display.addHTML('<hr style="border-style: inset; border-color: dimgray">');
menu.display.addBool('use depth', drawOptions, 'useDepth');
menu.display.addBool('use curves', drawOptions, 'useCurves');
menu.display.addBool('print labels', drawOptions, 'drawLabels');
menu.display.addBool('draw points', drawOptions, 'drawPoints');
menu.display.addBool('draw boxes', drawOptions, 'drawBoxes');
menu.display.addBool('draw polygons', drawOptions, 'drawPolygons');
menu.display.addBool('fill polygons', drawOptions, 'fillPolygons');
menu.display.addBool('use depth', human.draw.options, 'useDepth');
menu.display.addBool('use curves', human.draw.options, 'useCurves');
menu.display.addBool('print labels', human.draw.options, 'drawLabels');
menu.display.addBool('draw points', human.draw.options, 'drawPoints');
menu.display.addBool('draw boxes', human.draw.options, 'drawBoxes');
menu.display.addBool('draw polygons', human.draw.options, 'drawPolygons');
menu.display.addBool('fill polygons', human.draw.options, 'fillPolygons');
menu.image = new Menu(document.body, '', { top, left: x[1] });
menu.image.addBool('enabled', userConfig.filter, 'enabled', (val) => userConfig.filter.enabled = val);
menu.image.addBool('histogram equalization', userConfig.filter, 'equalization', (val) => userConfig.filter.equalization = val);
ui.menuWidth = menu.image.addRange('image width', userConfig.filter, 'width', 0, 3840, 10, (val) => userConfig.filter.width = parseInt(val));
ui.menuHeight = menu.image.addRange('image height', userConfig.filter, 'height', 0, 2160, 10, (val) => userConfig.filter.height = parseInt(val));
menu.image.addHTML('<hr style="border-style: inset; border-color: dimgray">');
@ -704,6 +684,7 @@ function setupMenu() {
menu.image.addBool('technicolor', userConfig.filter, 'technicolor', (val) => userConfig.filter.technicolor = val);
menu.image.addBool('polaroid', userConfig.filter, 'polaroid', (val) => userConfig.filter.polaroid = val);
menu.image.addHTML('<input type="file" id="file-input" class="input-file"></input> &nbsp input');
menu.image.addHTML('<input type="file" id="file-background" class="input-file"></input> &nbsp background');
menu.process = new Menu(document.body, '', { top, left: x[2] });
menu.process.addList('backend', ['cpu', 'webgl', 'wasm', 'humangl'], userConfig.backend, (val) => userConfig.backend = val);
@ -751,6 +732,8 @@ function setupMenu() {
menu.models.addHTML('<hr style="border-style: inset; border-color: dimgray">');
menu.models.addBool('gestures', userConfig.gesture, 'enabled', (val) => userConfig.gesture.enabled = val);
menu.models.addHTML('<hr style="border-style: inset; border-color: dimgray">');
menu.models.addBool('body segmentation', userConfig.segmentation, 'enabled', (val) => userConfig.segmentation.enabled = val);
menu.models.addHTML('<hr style="border-style: inset; border-color: dimgray">');
menu.models.addBool('object detection', userConfig.object, 'enabled', (val) => userConfig.object.enabled = val);
menu.models.addHTML('<hr style="border-style: inset; border-color: dimgray">');
menu.models.addBool('face compare', compare, 'enabled', (val) => {
@ -770,7 +753,6 @@ function setupMenu() {
async function resize() {
window.onresize = null;
log('resize');
// best setting for mobile, ignored for desktop
// can set dynamic value such as Math.min(1, Math.round(100 * window.innerWidth / 960) / 100);
const viewportScale = 0.7;
@ -819,12 +801,42 @@ async function processDataURL(f, action) {
if (e.target.result.startsWith('data:video')) await processVideo(e.target.result, f.name);
document.getElementById('canvas').style.display = 'none';
}
if (action === 'background') {
const image = new Image();
image.onerror = async () => status('image loading error');
image.onload = async () => {
ui.background = image;
if (document.getElementById('canvas').style.display === 'block') { // replace canvas used for video
const canvas = document.getElementById('canvas');
const ctx = canvas.getContext('2d');
const overlaid = await human.segmentation(canvas, ui.background, userConfig);
if (overlaid) ctx.drawImage(overlaid, 0, 0);
} else {
const canvases = document.getElementById('samples-container').children; // replace loaded images
for (const canvas of canvases) {
const ctx = canvas.getContext('2d');
const overlaid = await human.segmentation(canvas, ui.background, userConfig);
if (overlaid) ctx.drawImage(overlaid, 0, 0);
}
}
};
image.src = e.target.result;
}
resolve(true);
};
reader.readAsDataURL(f);
});
}
async function runSegmentation() {
document.getElementById('file-background').onchange = async (evt) => {
userConfig.segmentation.enabled = true;
evt.preventDefault();
if (evt.target.files.length < 2) ui.columns = 1;
for (const f of evt.target.files) await processDataURL(f, 'background');
};
}
async function dragAndDrop() {
document.body.addEventListener('dragenter', (evt) => evt.preventDefault());
document.body.addEventListener('dragleave', (evt) => evt.preventDefault());
@ -862,10 +874,10 @@ async function pwaRegister() {
const regs = await navigator.serviceWorker.getRegistrations();
for (const reg of regs) {
log('pwa found:', reg.scope);
if (reg.scope.startsWith(window.location.origin)) found = reg;
if (reg.scope.startsWith(location.origin)) found = reg;
}
if (!found) {
const reg = await navigator.serviceWorker.register(pwa.scriptFile, { scope: window.location.pathname });
const reg = await navigator.serviceWorker.register(pwa.scriptFile, { scope: location.pathname });
found = reg;
log('pwa registered:', reg.scope);
}
@ -892,17 +904,13 @@ async function pwaRegister() {
}
async function main() {
if (ui.exceptionHandler) {
window.addEventListener('unhandledrejection', (evt) => {
if (ui.detectThread) cancelAnimationFrame(ui.detectThread);
if (ui.drawThread) cancelAnimationFrame(ui.drawThread);
const msg = evt.reason.message || evt.reason || evt;
console.error(msg); // eslint-disable-line no-console
document.getElementById('log').innerHTML = msg;
status(`exception: ${msg}`);
// eslint-disable-next-line no-console
console.error(evt.reason || evt);
document.getElementById('log').innerHTML = evt.reason.message || evt.reason || evt;
status('exception error');
evt.preventDefault();
});
}
log('demo starting ...');
@ -913,14 +921,14 @@ async function main() {
// sanity check for webworker compatibility
if (typeof Worker === 'undefined' || typeof OffscreenCanvas === 'undefined') {
ui.useWorker = false;
log('webworker functionality is disabled due to missing browser functionality');
log('workers are disabled due to missing browser functionality');
}
// register PWA ServiceWorker
await pwaRegister();
// parse url search params
const params = new URLSearchParams(window.location.search);
const params = new URLSearchParams(location.search);
log('url options:', params.toString());
if (params.has('worker')) {
ui.useWorker = JSON.parse(params.get('worker'));
@ -957,14 +965,13 @@ async function main() {
// create instance of human
human = new Human(userConfig);
// human.env.perfadd = true;
log('human version:', human.version);
// we've merged human defaults with user config and now lets store it back so it can be accessed by methods such as menu
userConfig = human.config;
userConfig = { ...human.config, ...userConfig };
if (typeof tf !== 'undefined') {
log('TensorFlow external version:', tf.version); // eslint-disable-line no-undef
human.tf = tf; // eslint-disable-line no-undef
// eslint-disable-next-line no-undef
log('TensorFlow external version:', tf.version);
// eslint-disable-next-line no-undef
human.tf = tf; // use externally loaded version of tfjs
}
log('tfjs version:', human.tf.version.tfjs);
@ -977,9 +984,8 @@ async function main() {
if (ui.modelsPreload && !ui.useWorker) {
status('loading');
await human.load(userConfig); // this is not required, just pre-loads all models
log('demo loaded models:', human.models.loaded());
} else {
await human.init();
const loaded = Object.keys(human.models).filter((a) => human.models[a]);
log('demo loaded models:', loaded);
}
// warmup models
@ -999,6 +1005,9 @@ async function main() {
// init drag & drop
await dragAndDrop();
// init segmentation
await runSegmentation();
if (params.has('image')) {
try {
const image = JSON.parse(params.get('image'));
@ -1019,7 +1028,6 @@ async function main() {
}
if (human.config.debug) log('environment:', human.env);
if (human.config.backend === 'webgl' && human.config.debug) log('backend:', human.gl);
}
window.onload = main;

View File

@ -1,71 +0,0 @@
# Human Multithreading Demos
- **Browser** demo `multithread` & `worker`
Runs each `human` module in a separate web worker for highest possible performance
- **NodeJS** demo `node-multiprocess` & `node-multiprocess-worker`
Runs multiple parallel `human` by dispaching them to pool of pre-created worker processes
<br><hr><br>
## NodeJS Multi-process Demo
`nodejs/node-multiprocess.js` and `nodejs/node-multiprocess-worker.js`: Demo using NodeJS with CommonJS module
Demo that starts n child worker processes for parallel execution
```shell
node demo/nodejs/node-multiprocess.js
```
<!-- eslint-skip -->
```json
2021-06-01 08:54:19 INFO: @vladmandic/human version 2.0.0
2021-06-01 08:54:19 INFO: User: vlado Platform: linux Arch: x64 Node: v16.0.0
2021-06-01 08:54:19 INFO: Human multi-process test
2021-06-01 08:54:19 STATE: Enumerated images: ./assets 15
2021-06-01 08:54:19 STATE: Main: started worker: 130362
2021-06-01 08:54:19 STATE: Main: started worker: 130363
2021-06-01 08:54:19 STATE: Main: started worker: 130369
2021-06-01 08:54:19 STATE: Main: started worker: 130370
2021-06-01 08:54:20 STATE: Worker: PID: 130370 TensorFlow/JS 3.6.0 Human 2.0.0 Backend: tensorflow
2021-06-01 08:54:20 STATE: Worker: PID: 130362 TensorFlow/JS 3.6.0 Human 2.0.0 Backend: tensorflow
2021-06-01 08:54:20 STATE: Worker: PID: 130369 TensorFlow/JS 3.6.0 Human 2.0.0 Backend: tensorflow
2021-06-01 08:54:20 STATE: Worker: PID: 130363 TensorFlow/JS 3.6.0 Human 2.0.0 Backend: tensorflow
2021-06-01 08:54:21 STATE: Main: dispatching to worker: 130370
2021-06-01 08:54:21 INFO: Latency: worker initializtion: 1348 message round trip: 0
2021-06-01 08:54:21 DATA: Worker received message: 130370 { test: true }
2021-06-01 08:54:21 STATE: Main: dispatching to worker: 130362
2021-06-01 08:54:21 DATA: Worker received message: 130362 { image: 'samples/ai-face.jpg' }
2021-06-01 08:54:21 DATA: Worker received message: 130370 { image: 'samples/ai-body.jpg' }
2021-06-01 08:54:21 STATE: Main: dispatching to worker: 130369
2021-06-01 08:54:21 STATE: Main: dispatching to worker: 130363
2021-06-01 08:54:21 DATA: Worker received message: 130369 { image: 'assets/human-sample-upper.jpg' }
2021-06-01 08:54:21 DATA: Worker received message: 130363 { image: 'assets/sample-me.jpg' }
2021-06-01 08:54:24 DATA: Main: worker finished: 130362 detected faces: 1 bodies: 1 hands: 0 objects: 1
2021-06-01 08:54:24 STATE: Main: dispatching to worker: 130362
2021-06-01 08:54:24 DATA: Worker received message: 130362 { image: 'assets/sample1.jpg' }
2021-06-01 08:54:25 DATA: Main: worker finished: 130369 detected faces: 1 bodies: 1 hands: 0 objects: 1
2021-06-01 08:54:25 STATE: Main: dispatching to worker: 130369
2021-06-01 08:54:25 DATA: Main: worker finished: 130370 detected faces: 1 bodies: 1 hands: 0 objects: 1
2021-06-01 08:54:25 STATE: Main: dispatching to worker: 130370
2021-06-01 08:54:25 DATA: Worker received message: 130369 { image: 'assets/sample2.jpg' }
2021-06-01 08:54:25 DATA: Main: worker finished: 130363 detected faces: 1 bodies: 1 hands: 0 objects: 2
2021-06-01 08:54:25 STATE: Main: dispatching to worker: 130363
2021-06-01 08:54:25 DATA: Worker received message: 130370 { image: 'assets/sample3.jpg' }
2021-06-01 08:54:25 DATA: Worker received message: 130363 { image: 'assets/sample4.jpg' }
2021-06-01 08:54:30 DATA: Main: worker finished: 130362 detected faces: 3 bodies: 1 hands: 0 objects: 7
2021-06-01 08:54:30 STATE: Main: dispatching to worker: 130362
2021-06-01 08:54:30 DATA: Worker received message: 130362 { image: 'assets/sample5.jpg' }
2021-06-01 08:54:31 DATA: Main: worker finished: 130369 detected faces: 3 bodies: 1 hands: 0 objects: 5
2021-06-01 08:54:31 STATE: Main: dispatching to worker: 130369
2021-06-01 08:54:31 DATA: Worker received message: 130369 { image: 'assets/sample6.jpg' }
2021-06-01 08:54:31 DATA: Main: worker finished: 130363 detected faces: 4 bodies: 1 hands: 2 objects: 2
2021-06-01 08:54:31 STATE: Main: dispatching to worker: 130363
2021-06-01 08:54:39 STATE: Main: worker exit: 130370 0
2021-06-01 08:54:39 DATA: Main: worker finished: 130362 detected faces: 1 bodies: 1 hands: 0 objects: 1
2021-06-01 08:54:39 DATA: Main: worker finished: 130369 detected faces: 1 bodies: 1 hands: 1 objects: 3
2021-06-01 08:54:39 STATE: Main: worker exit: 130362 0
2021-06-01 08:54:39 STATE: Main: worker exit: 130369 0
2021-06-01 08:54:41 DATA: Main: worker finished: 130363 detected faces: 9 bodies: 1 hands: 0 objects: 10
2021-06-01 08:54:41 STATE: Main: worker exit: 130363 0
2021-06-01 08:54:41 INFO: Processed: 15 images in total: 22006 ms working: 20658 ms average: 1377 ms
```

View File

@ -9,10 +9,10 @@
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="theme-color" content="#000000">
<link rel="manifest" href="../../manifest.webmanifest">
<link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../../assets/icon.png">
<script src="../multithread/index.js" type="module"></script>
<script src="./index.js" type="module"></script>
<style>
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; }

View File

@ -4,16 +4,17 @@
* @description Demo app that enables all Human modules and runs them in separate worker threads
*
*/
// @ts-nocheck // typescript checks disabled as this is pure javascript
import { Human } from '../../dist/human.esm.js'; // equivalent of @vladmandic/human
import Human from '../../dist/human.esm.js'; // equivalent of @vladmandic/human
import GLBench from '../helpers/gl-bench.js';
const workerJS = '../multithread/worker.js';
const workerJS = './worker.js';
const config = {
main: { // processes input and runs gesture analysis
warmup: 'none',
backend: 'webgl',
backend: 'humangl',
modelBasePath: '../../models/',
async: false,
filter: { enabled: true },
@ -26,7 +27,7 @@ const config = {
},
face: { // runs all face models
warmup: 'none',
backend: 'webgl',
backend: 'humangl',
modelBasePath: '../../models/',
async: false,
filter: { enabled: false },
@ -39,7 +40,7 @@ const config = {
},
body: { // runs body model
warmup: 'none',
backend: 'webgl',
backend: 'humangl',
modelBasePath: '../../models/',
async: false,
filter: { enabled: false },
@ -52,7 +53,7 @@ const config = {
},
hand: { // runs hands model
warmup: 'none',
backend: 'webgl',
backend: 'humangl',
modelBasePath: '../../models/',
async: false,
filter: { enabled: false },
@ -65,7 +66,7 @@ const config = {
},
object: { // runs object model
warmup: 'none',
backend: 'webgl',
backend: 'humangl',
modelBasePath: '../../models/',
async: false,
filter: { enabled: false },
@ -91,13 +92,9 @@ const busy = {
};
const workers = {
/** @type {Worker | null} */
face: null,
/** @type {Worker | null} */
body: null,
/** @type {Worker | null} */
hand: null,
/** @type {Worker | null} */
object: null,
};
@ -130,58 +127,60 @@ const result = { // initialize empty result object which will be partially fille
function log(...msg) {
const dt = new Date();
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
console.log(ts, ...msg); // eslint-disable-line no-console
// eslint-disable-next-line no-console
console.log(ts, ...msg);
}
async function drawResults() {
start.draw = human.now();
start.draw = performance.now();
const interpolated = human.next(result);
await human.draw.all(canvas, interpolated);
time.draw = Math.round(1 + human.now() - start.draw);
time.draw = Math.round(1 + performance.now() - start.draw);
const fps = Math.round(10 * 1000 / time.main) / 10;
const draw = Math.round(10 * 1000 / time.draw) / 10;
const div = document.getElementById('log');
if (div) div.innerText = `Human: version ${human.version} | Performance: Main ${time.main}ms Face: ${time.face}ms Body: ${time.body}ms Hand: ${time.hand}ms Object ${time.object}ms | FPS: ${fps} / ${draw}`;
document.getElementById('log').innerText = `Human: version ${human.version} | Performance: Main ${time.main}ms Face: ${time.face}ms Body: ${time.body}ms Hand: ${time.hand}ms Object ${time.object}ms | FPS: ${fps} / ${draw}`;
requestAnimationFrame(drawResults);
}
async function receiveMessage(msg) {
result[msg.data.type] = msg.data.result;
busy[msg.data.type] = false;
time[msg.data.type] = Math.round(human.now() - start[msg.data.type]);
time[msg.data.type] = Math.round(performance.now() - start[msg.data.type]);
}
async function runDetection() {
start.main = human.now();
start.main = performance.now();
if (!bench) {
bench = new GLBench(null, { trackGPU: false, chartHz: 20, chartLen: 20 });
bench.begin('human');
bench.begin();
}
const ctx = canvas.getContext('2d');
// const image = await human.image(video);
// ctx.drawImage(image.canvas, 0, 0, canvas.width, canvas.height);
ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
if (!busy.face) {
busy.face = true;
start.face = human.now();
if (workers.face) workers.face.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.face, type: 'face' }, [imageData.data.buffer.slice(0)]);
start.face = performance.now();
workers.face.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.face, type: 'face' }, [imageData.data.buffer.slice(0)]);
}
if (!busy.body) {
busy.body = true;
start.body = human.now();
if (workers.body) workers.body.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.body, type: 'body' }, [imageData.data.buffer.slice(0)]);
start.body = performance.now();
workers.body.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.body, type: 'body' }, [imageData.data.buffer.slice(0)]);
}
if (!busy.hand) {
busy.hand = true;
start.hand = human.now();
if (workers.hand) workers.hand.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.hand, type: 'hand' }, [imageData.data.buffer.slice(0)]);
start.hand = performance.now();
workers.hand.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.hand, type: 'hand' }, [imageData.data.buffer.slice(0)]);
}
if (!busy.object) {
busy.object = true;
start.object = human.now();
if (workers.object) workers.object.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.object, type: 'object' }, [imageData.data.buffer.slice(0)]);
start.object = performance.now();
workers.object.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.object, type: 'object' }, [imageData.data.buffer.slice(0)]);
}
time.main = Math.round(human.now() - start.main);
time.main = Math.round(performance.now() - start.main);
bench.nextFrame();
requestAnimationFrame(runDetection);
@ -198,40 +197,37 @@ async function setupCamera() {
facingMode: 'user',
resizeMode: 'crop-and-scale',
width: { ideal: document.body.clientWidth },
// height: { ideal: document.body.clientHeight }, // not set as we're using aspectRation to get height instead
aspectRatio: document.body.clientWidth / document.body.clientHeight,
},
};
// enumerate devices for diag purposes
navigator.mediaDevices.enumerateDevices()
.then((devices) => log('enumerated devices:', devices))
.catch(() => log('mediaDevices error'));
navigator.mediaDevices.enumerateDevices().then((devices) => log('enumerated devices:', devices));
log('camera constraints', constraints);
try {
stream = await navigator.mediaDevices.getUserMedia(constraints);
} catch (err) {
if (output) output.innerText += `\n${err.name}: ${err.message}`;
output.innerText += `\n${err.name}: ${err.message}`;
status(err.name);
log('camera error:', err);
}
if (stream) {
const tracks = stream.getVideoTracks();
log('enumerated viable tracks:', tracks);
const track = stream.getVideoTracks()[0];
const settings = track.getSettings();
log('selected video source:', track, settings);
} else {
log('missing video stream');
}
const promise = !stream || new Promise((resolve) => {
video.onloadeddata = () => {
canvas.style.height = '100vh';
if (settings.width > settings.height) canvas.style.width = '100vw';
else canvas.style.height = '100vh';
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
video.play();
resolve(true);
resolve();
};
});
// attach input to video element
if (stream && video) video.srcObject = stream;
if (stream) video.srcObject = stream;
return promise;
}
@ -247,13 +243,21 @@ async function startWorkers() {
}
async function main() {
window.addEventListener('unhandledrejection', (evt) => {
// eslint-disable-next-line no-console
console.error(evt.reason || evt);
document.getElementById('log').innerHTML = evt.reason.message || evt.reason || evt;
status('exception error');
evt.preventDefault();
});
if (typeof Worker === 'undefined' || typeof OffscreenCanvas === 'undefined') {
status('workers are not supported');
return;
}
human = new Human(config.main);
const div = document.getElementById('log');
if (div) div.innerText = `Human: version ${human.version}`;
document.getElementById('log').innerText = `Human: version ${human.version}`;
await startWorkers();
await setupCamera();

View File

@ -1,7 +1,9 @@
// load Human using IIFE script as Chome Mobile does not support Modules as Workers
/// <reference lib="webworker" />
// load Human using IIFE script as Chome Mobile does not support Modules as Workers
self.importScripts('../../dist/human.js'); // eslint-disable-line no-restricted-globals
// import Human from '../dist/human.esm.js';
self.importScripts('../../dist/human.js');
let human;
@ -9,8 +11,9 @@ onmessage = async (msg) => {
// received from index.js using:
// worker.postMessage({ image: image.data.buffer, width: canvas.width, height: canvas.height, config }, [image.data.buffer]);
// Human is registered as global namespace using IIFE script
if (!human) human = new Human.default(msg.data.config); // eslint-disable-line no-undef, new-cap
// @ts-ignore // Human is registered as global namespace using IIFE script
// eslint-disable-next-line no-undef, new-cap
if (!human) human = new Human.default(msg.data.config);
const image = new ImageData(new Uint8ClampedArray(msg.data.image), msg.data.width, msg.data.height);
let result = {};
result = await human.detect(image, msg.data.config);

View File

@ -1,121 +0,0 @@
# Human Demos for NodeJS
- `node`: Process images from files, folders or URLs
uses native methods for image loading and decoding without external dependencies
- `node-canvas`: Process image from file or URL and draw results to a new image file using `node-canvas`
uses `node-canvas` library to load and decode images from files, draw detection results and write output to a new image file
- `node-video`: Processing of video input using `ffmpeg`
uses `ffmpeg` to decode video input (can be a file, stream or device such as webcam) and
output results in a pipe that are captured by demo app as frames and processed by `Human` library
- `node-webcam`: Processing of webcam screenshots using `fswebcam`
uses `fswebcam` to connect to web cam and take screenshots at regular interval which are then processed by `Human` library
- `node-event`: Showcases usage of `Human` eventing to get notifications on processing
- `node-similarity`: Compares two input images for similarity of detected faces
- `process-folder`: Processing all images in input folder and creates output images
interally used to generate samples gallery
<br>
## Main Demo
`nodejs/node.js`: Demo using NodeJS with CommonJS module
Simple demo that can process any input image
Note that you can run demo as-is and it will perform detection on provided sample images,
or you can pass a path to image to analyze, either on local filesystem or using URL
```shell
node demo/nodejs/node.js
```
<!-- eslint-skip -->
```js
2021-06-01 08:52:15 INFO: @vladmandic/human version 2.0.0
2021-06-01 08:52:15 INFO: User: vlado Platform: linux Arch: x64 Node: v16.0.0
2021-06-01 08:52:15 INFO: Current folder: /home/vlado/dev/human
2021-06-01 08:52:15 INFO: Human: 2.0.0
2021-06-01 08:52:15 INFO: Active Configuration {
backend: 'tensorflow',
modelBasePath: 'file://models/',
wasmPath: '../node_modules/@tensorflow/tfjs-backend-wasm/dist/',
debug: true,
async: false,
warmup: 'full',
cacheSensitivity: 0.75,
filter: {
enabled: true,
width: 0,
height: 0,
flip: true,
return: true,
brightness: 0,
contrast: 0,
sharpness: 0,
blur: 0,
saturation: 0,
hue: 0,
negative: false,
sepia: false,
vintage: false,
kodachrome: false,
technicolor: false,
polaroid: false,
pixelate: 0
},
gesture: { enabled: true },
face: {
enabled: true,
detector: { modelPath: 'blazeface.json', rotation: false, maxDetected: 10, skipFrames: 15, minConfidence: 0.2, iouThreshold: 0.1, return: false, enabled: true },
mesh: { enabled: true, modelPath: 'facemesh.json' },
iris: { enabled: true, modelPath: 'iris.json' },
description: { enabled: true, modelPath: 'faceres.json', skipFrames: 16, minConfidence: 0.1 },
emotion: { enabled: true, minConfidence: 0.1, skipFrames: 17, modelPath: 'emotion.json' }
},
body: { enabled: true, modelPath: 'movenet-lightning.json', maxDetected: 1, minConfidence: 0.2 },
hand: {
enabled: true,
rotation: true,
skipFrames: 18,
minConfidence: 0.1,
iouThreshold: 0.1,
maxDetected: 2,
landmarks: true,
detector: { modelPath: 'handdetect.json' },
skeleton: { modelPath: 'handskeleton.json' }
},
object: { enabled: true, modelPath: 'centernet.json', minConfidence: 0.2, iouThreshold: 0.4, maxDetected: 10, skipFrames: 19 }
}
08:52:15.673 Human: version: 2.0.0
08:52:15.674 Human: tfjs version: 3.6.0
08:52:15.674 Human: platform: linux x64
08:52:15.674 Human: agent: NodeJS v16.0.0
08:52:15.674 Human: setting backend: tensorflow
08:52:15.710 Human: load model: file://models/blazeface.json
08:52:15.743 Human: load model: file://models/facemesh.json
08:52:15.744 Human: load model: file://models/iris.json
08:52:15.760 Human: load model: file://models/emotion.json
08:52:15.847 Human: load model: file://models/handdetect.json
08:52:15.847 Human: load model: file://models/handskeleton.json
08:52:15.914 Human: load model: file://models/movenet-lightning.json
08:52:15.957 Human: load model: file://models/centernet.json
08:52:16.015 Human: load model: file://models/faceres.json
08:52:16.015 Human: tf engine state: 50796152 bytes 1318 tensors
2021-06-01 08:52:16 INFO: Loaded: [ 'face', 'movenet', 'handpose', 'emotion', 'centernet', 'faceres', [length]: 6 ]
2021-06-01 08:52:16 INFO: Memory state: { unreliable: true, numTensors: 1318, numDataBuffers: 1318, numBytes: 50796152 }
2021-06-01 08:52:16 INFO: Loading image: private/daz3d/daz3d-kiaria-02.jpg
2021-06-01 08:52:16 STATE: Processing: [ 1, 1300, 1000, 3, [length]: 4 ]
2021-06-01 08:52:17 DATA: Results:
2021-06-01 08:52:17 DATA: Face: #0 boxScore:0.88 faceScore:1 age:16.3 genderScore:0.97 gender:female emotionScore:0.85 emotion:happy iris:61.05
2021-06-01 08:52:17 DATA: Body: #0 score:0.82 keypoints:17
2021-06-01 08:52:17 DATA: Hand: #0 score:0.89
2021-06-01 08:52:17 DATA: Hand: #1 score:0.97
2021-06-01 08:52:17 DATA: Gesture: face#0 gesture:facing left
2021-06-01 08:52:17 DATA: Gesture: body#0 gesture:leaning right
2021-06-01 08:52:17 DATA: Gesture: hand#0 gesture:pinky forward middlefinger up
2021-06-01 08:52:17 DATA: Gesture: hand#1 gesture:pinky forward middlefinger up
2021-06-01 08:52:17 DATA: Gesture: iris#0 gesture:looking left
2021-06-01 08:52:17 DATA: Object: #0 score:0.55 label:person
2021-06-01 08:52:17 DATA: Object: #1 score:0.23 label:bottle
2021-06-01 08:52:17 DATA: Persons:
2021-06-01 08:52:17 DATA: #0: Face:score:1 age:16.3 gender:female iris:61.05 Body:score:0.82 keypoints:17 LeftHand:no RightHand:yes Gestures:4
```

View File

@ -1,66 +0,0 @@
/**
* Human simple demo for NodeJS
*/
const childProcess = require('child_process'); // eslint-disable-line camelcase
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
const canvas = require('canvas'); // eslint-disable-line node/no-unpublished-require
const config = {
cacheSensitivity: 0.01,
wasmPlatformFetch: true,
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
};
const count = 10;
async function loadImage(input) {
const inputImage = await canvas.loadImage(input);
const inputCanvas = new canvas.Canvas(inputImage.width, inputImage.height);
const inputCtx = inputCanvas.getContext('2d');
inputCtx.drawImage(inputImage, 0, 0);
const imageData = inputCtx.getImageData(0, 0, inputCanvas.width, inputCanvas.height);
process.send({ input, resolution: [inputImage.width, inputImage.height] });
return imageData;
}
async function runHuman(module, backend) {
if (backend === 'wasm') require('@tensorflow/tfjs-backend-wasm'); // eslint-disable-line node/no-unpublished-require, global-require
const Human = require('../../dist/' + module); // eslint-disable-line global-require, import/no-dynamic-require
config.backend = backend;
const human = new Human.Human(config);
human.env.Canvas = canvas.Canvas;
human.env.Image = canvas.Image;
human.env.ImageData = canvas.ImageData;
process.send({ human: human.version, module });
await human.init();
process.send({ desired: human.config.backend, wasm: human.env.wasm, tfjs: human.tf.version.tfjs, tensorflow: human.env.tensorflow });
const imageData = await loadImage('samples/in/ai-body.jpg');
const t0 = human.now();
await human.load();
const t1 = human.now();
await human.warmup();
const t2 = human.now();
for (let i = 0; i < count; i++) await human.detect(imageData);
const t3 = human.now();
process.send({ backend: human.tf.getBackend(), load: Math.round(t1 - t0), warmup: Math.round(t2 - t1), detect: Math.round(t3 - t2), count, memory: human.tf.memory().numBytes });
}
async function executeWorker(args) {
return new Promise((resolve) => {
const worker = childProcess.fork(process.argv[1], args);
worker.on('message', (msg) => log.data(msg));
worker.on('exit', () => resolve(true));
});
}
async function main() {
if (process.argv[2]) {
await runHuman(process.argv[2], process.argv[3]);
} else {
await executeWorker(['human.node.js', 'tensorflow']);
await executeWorker(['human.node-gpu.js', 'tensorflow']);
await executeWorker(['human.node-wasm.js', 'wasm']);
}
}
main();

View File

@ -1,22 +1,18 @@
/**
* Human demo for NodeJS using Canvas library
*
* Requires [canvas](https://www.npmjs.com/package/canvas) to provide Canvas functionality in NodeJS environment
*/
const fs = require('fs');
const process = require('process');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// in nodejs environments tfjs-node is required to be loaded before human
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
const canvas = require('canvas'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
const log = require('@vladmandic/pilogger');
const canvas = require('canvas');
require('@tensorflow/tfjs-node'); // for nodejs, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human
const Human = require('../../dist/human.node.js'); // this is 'const Human = require('../dist/human.node-gpu.js').default;'
const config = { // just enable all and leave default settings
debug: false,
face: { enabled: true, detector: { maxDetected: 10 } }, // includes mesh, iris, emotion, descriptor
hand: { enabled: true, maxDetected: 20, minConfidence: 0.5, detector: { modelPath: 'handtrack.json' } }, // use alternative hand model
face: { enabled: true }, // includes mesh, iris, emotion, descriptor
hand: { enabled: true },
body: { enabled: true },
object: { enabled: true },
gestures: { enabled: true },
@ -25,23 +21,18 @@ const config = { // just enable all and leave default settings
async function main() {
log.header();
globalThis.Canvas = canvas.Canvas; // patch global namespace with canvas library
globalThis.ImageData = canvas.ImageData; // patch global namespace with canvas library
// human.env.Canvas = canvas.Canvas; // alternatively monkey-patch human to use external canvas library
// human.env.ImageData = canvas.ImageData; // alternatively monkey-patch human to use external canvas library
// init
const human = new Human.Human(config); // create instance of human
log.info('Human:', human.version, 'TF:', tf.version_core);
log.info('Human:', human.version);
// @ts-ignore
human.env.Canvas = canvas.Canvas; // monkey-patch human to use external canvas library
await human.load(); // pre-load models
log.info('Loaded models:', human.models.loaded());
log.info('Loaded models:', Object.keys(human.models).filter((a) => human.models[a]));
log.info('Memory state:', human.tf.engine().memory());
// parse cmdline
const input = process.argv[2];
let output = process.argv[3];
if (!output.toLowerCase().endsWith('.jpg')) output += '.jpg';
const output = process.argv[3];
if (process.argv.length !== 4) log.error('Parameters: <input-image> <output-image> missing');
else if (!fs.existsSync(input) && !input.startsWith('http')) log.error(`File not found: ${process.argv[2]}`);
else {
@ -49,12 +40,11 @@ async function main() {
const inputImage = await canvas.loadImage(input); // load image using canvas library
log.info('Loaded image', input, inputImage.width, inputImage.height);
const inputCanvas = new canvas.Canvas(inputImage.width, inputImage.height); // create canvas
const inputCtx = inputCanvas.getContext('2d');
inputCtx.drawImage(inputImage, 0, 0); // draw input image onto canvas
const imageData = inputCtx.getImageData(0, 0, inputCanvas.width, inputCanvas.height);
const ctx = inputCanvas.getContext('2d');
ctx.drawImage(inputImage, 0, 0); // draw input image onto canvas
// run detection
const result = await human.detect(imageData);
const result = await human.detect(inputCanvas);
// print results summary
const persons = result.persons; // invoke persons getter, only used to print summary on console
@ -62,19 +52,16 @@ async function main() {
const face = persons[i].face;
const faceTxt = face ? `score:${face.score} age:${face.age} gender:${face.gender} iris:${face.iris}` : null;
const body = persons[i].body;
const bodyTxt = body ? `score:${body.score} keypoints:${body.keypoints.length}` : null;
const bodyTxt = body ? `score:${body.score} keypoints:${body.keypoints?.length}` : null;
log.data(`Detected: #${i}: Face:${faceTxt} Body:${bodyTxt} LeftHand:${persons[i].hands.left ? 'yes' : 'no'} RightHand:${persons[i].hands.right ? 'yes' : 'no'} Gestures:${persons[i].gestures.length}`);
}
// draw detected results onto canvas and save it to a file
const outputCanvas = new canvas.Canvas(inputImage.width, inputImage.height); // create canvas
const outputCtx = outputCanvas.getContext('2d');
outputCtx.drawImage(result.canvas || inputImage, 0, 0); // draw input image onto canvas
human.draw.all(outputCanvas, result); // use human build-in method to draw results as overlays on canvas
human.draw.all(inputCanvas, result); // use human build-in method to draw results as overlays on canvas
const outFile = fs.createWriteStream(output); // write canvas to new image file
outFile.on('finish', () => log.state('Output image:', output, outputCanvas.width, outputCanvas.height));
outFile.on('finish', () => log.state('Output image:', output, inputCanvas.width, inputCanvas.height));
outFile.on('error', (err) => log.error('Output error:', output, err));
const stream = outputCanvas.createJPEGStream({ quality: 0.5, progressive: true, chromaSubsampling: true });
const stream = inputCanvas.createJPEGStream({ quality: 0.5, progressive: true, chromaSubsampling: true });
stream.pipe(outFile);
}
}

21
demo/nodejs/node-env.js Normal file
View File

@ -0,0 +1,21 @@
const log = require('@vladmandic/pilogger');
const Human = require('../../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
const config = {
debug: false,
};
async function main() {
const human = new Human(config);
await human.tf.ready();
log.info('Human:', human.version);
log.data('Environment', human.env);
await human.load();
const models = Object.keys(human.models).map((model) => ({ name: model, loaded: (human.models[model] !== null) }));
log.data('Models:', models);
log.info('Memory state:', human.tf.engine().memory());
// log.data('Config', human.config);
log.info('TFJS flags:', human.tf.ENV.flags);
}
main();

View File

@ -2,18 +2,23 @@
* Human demo for NodeJS
*/
const log = require('@vladmandic/pilogger');
const fs = require('fs');
const process = require('process');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// in nodejs environments tfjs-node is required to be loaded before human
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
let fetch; // fetch is dynamically imported later
// for NodeJS, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
const tf = require('@tensorflow/tfjs-node'); // or const tf = require('@tensorflow/tfjs-node-gpu');
// load specific version of Human library that matches TensorFlow mode
const Human = require('../../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
let human = null;
const myConfig = {
backend: 'tensorflow',
modelBasePath: 'file://models/',
debug: false,
async: true,
@ -36,17 +41,29 @@ async function detect(input) {
let buffer;
log.info('Loading image:', input);
if (input.startsWith('http:') || input.startsWith('https:')) {
fetch = (await import('node-fetch')).default;
const res = await fetch(input);
if (res && res.ok) buffer = Buffer.from(await res.arrayBuffer());
if (res && res.ok) buffer = await res.buffer();
else log.error('Invalid image URL:', input, res.status, res.statusText, res.headers.get('content-type'));
} else {
buffer = fs.readFileSync(input);
}
log.data('Image bytes:', buffer?.length, 'buffer:', buffer?.slice(0, 32));
// decode image using tfjs-node so we don't need external depenencies
if (!buffer) return;
const tensor = human.tf.node.decodeImage(buffer, 3);
const tensor = human.tf.tidy(() => {
const decode = human.tf.node.decodeImage(buffer, 3);
let expand;
if (decode.shape[2] === 4) { // input is in rgba format, need to convert to rgb
const channels = human.tf.split(decode, 4, 2); // split rgba to channels
const rgb = human.tf.stack([channels[0], channels[1], channels[2]], 2); // stack channels back to rgb and ignore alpha
expand = human.tf.reshape(rgb, [1, decode.shape[0], decode.shape[1], 3]); // move extra dim from the end of tensor and use it as batch number instead
} else {
expand = human.tf.expandDims(decode, 0);
}
const cast = human.tf.cast(expand, 'float32');
return cast;
});
// run detection
await human.detect(tensor, myConfig);
@ -56,16 +73,15 @@ async function detect(input) {
async function main() {
log.header();
human = new Human.Human(myConfig);
log.info('Human:', human.version, 'TF:', tf.version_core);
human = new Human(myConfig);
if (human.events) {
human.events.addEventListener('warmup', () => {
log.info('Event Warmup');
});
human.events.addEventListener('load', () => {
log.info('Event Loaded:', human.models.loaded(), human.tf.engine().memory());
const loaded = Object.keys(human.models).filter((a) => human.models[a]);
log.info('Event Loaded:', loaded, human.tf.engine().memory());
});
human.events.addEventListener('image', () => {
@ -77,13 +93,12 @@ async function main() {
const persons = human.result.persons;
for (let i = 0; i < persons.length; i++) {
const face = persons[i].face;
const faceTxt = face ? `score:${face.score} age:${face.age} gender:${face.gender} iris:${face.distance}` : null;
const faceTxt = face ? `score:${face.score} age:${face.age} gender:${face.gender} iris:${face.iris}` : null;
const body = persons[i].body;
const bodyTxt = body ? `score:${body.score} keypoints:${body.keypoints?.length}` : null;
log.data(` #${i}: Face:${faceTxt} Body:${bodyTxt} LeftHand:${persons[i].hands.left ? 'yes' : 'no'} RightHand:${persons[i].hands.right ? 'yes' : 'no'} Gestures:${persons[i].gestures.length}`);
}
});
}
await human.tf.ready(); // wait until tf is ready

View File

@ -1,30 +0,0 @@
/**
* Human demo for NodeJS using http fetch to get image file
*
* Requires [node-fetch](https://www.npmjs.com/package/node-fetch) to provide `fetch` functionality in NodeJS environment
*/
const fs = require('fs');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// in nodejs environments tfjs-node is required to be loaded before human
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
const humanConfig = {
modelBasePath: 'https://vladmandic.github.io/human/models/',
};
async function main(inputFile) {
global.fetch = (await import('node-fetch')).default; // eslint-disable-line node/no-unpublished-import, import/no-unresolved, node/no-missing-import, node/no-extraneous-import
const human = new Human.Human(humanConfig); // create instance of human using default configuration
log.info('Human:', human.version, 'TF:', tf.version_core);
await human.load(); // optional as models would be loaded on-demand first time they are required
await human.warmup(); // optional as model warmup is performed on-demand first time its executed
const buffer = fs.readFileSync(inputFile); // read file data into buffer
const tensor = human.tf.node.decodeImage(buffer); // decode jpg data
const result = await human.detect(tensor); // run detection; will initialize backend and on-demand load models
log.data(result.gesture);
}
main('samples/in/ai-body.jpg');

View File

@ -6,16 +6,17 @@
*/
const fs = require('fs');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
const log = require('@vladmandic/pilogger');
// workers actual import tfjs and human modules
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// workers actual import tfjs and faceapi modules
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
const tf = require('@tensorflow/tfjs-node');
const Human = require('../../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
let human = null;
const myConfig = {
// backend: 'tensorflow',
backend: 'tensorflow',
modelBasePath: 'file://models/',
debug: false,
async: true,
@ -35,7 +36,7 @@ const myConfig = {
object: { enabled: true },
};
// read image from a file and create tensor to be used by human
// read image from a file and create tensor to be used by faceapi
// this way we don't need any monkey patches
// you can add any pre-proocessing here such as resizing, etc.
async function image(img) {
@ -44,7 +45,7 @@ async function image(img) {
return tensor;
}
// actual human detection
// actual faceapi detection
async function detect(img) {
const tensor = await image(img);
const result = await human.detect(tensor);
@ -63,9 +64,11 @@ async function main() {
// on worker start first initialize message handler so we don't miss any messages
process.on('message', (msg) => {
// if main told worker to exit
if (msg.exit && process.exit) process.exit(); // eslint-disable-line no-process-exit
// @ts-ignore
if (msg.exit && process.exit) process.exit(); // if main told worker to exit
// @ts-ignore
if (msg.test && process.send) process.send({ test: true });
// @ts-ignore
if (msg.image) detect(msg.image); // if main told worker to process image
log.data('Worker received message:', process.pid, msg); // generic log
});
@ -75,7 +78,7 @@ async function main() {
// wait until tf is ready
await human.tf.ready();
// pre-load models
log.state('Worker: PID:', process.pid, `TensorFlow/JS ${human.tf.version['tfjs-core']} Human ${human.version} Backend: ${human.tf.getBackend()}`);
log.state('Worker: PID:', process.pid, `TensorFlow/JS ${human.tf.version_core} Human ${human.version} Backend: ${human.tf.getBackend()}`);
await human.load();
// now we're ready, so send message back to main that it knows it can use this worker

View File

@ -8,12 +8,13 @@
const fs = require('fs');
const path = require('path');
const childProcess = require('child_process'); // eslint-disable-line camelcase
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// eslint-disable-next-line import/no-extraneous-dependencies, node/no-unpublished-require
const log = require('@vladmandic/pilogger'); // this is my simple logger with few extra features
const child_process = require('child_process');
// note that main process does not import human or tfjs at all, it's all done from worker process
const workerFile = 'demo/multithread/node-multiprocess-worker.js';
const imgPathRoot = './samples/in'; // modify to include your sample images
const workerFile = 'demo/nodejs/node-multiprocess-worker.js';
const imgPathRoot = './assets'; // modify to include your sample images
const numWorkers = 4; // how many workers will be started
const workers = []; // this holds worker processes
const images = []; // this holds queue of enumerated images
@ -22,7 +23,7 @@ let numImages;
// trigered by main when worker sends ready message
// if image pool is empty, signal worker to exit otherwise dispatch image to worker and remove image from queue
async function submitDetect(worker) {
async function detect(worker) {
if (!t[2]) t[2] = process.hrtime.bigint(); // first time do a timestamp so we can measure initial latency
if (images.length === numImages) worker.send({ test: true }); // for first image in queue just measure latency
if (images.length === 0) worker.send({ exit: true }); // nothing left in queue
@ -57,7 +58,7 @@ async function main() {
});
log.header();
log.info('Human multi-process test');
log.info('FaceAPI multi-process test');
// enumerate all images into queue
const dir = fs.readdirSync(imgPathRoot);
@ -73,13 +74,13 @@ async function main() {
// manage worker processes
for (let i = 0; i < numWorkers; i++) {
// create worker process
workers[i] = await childProcess.fork(workerFile, ['special']);
workers[i] = await child_process.fork(workerFile, ['special']);
// parse message that worker process sends back to main
// if message is ready, dispatch next image in queue
// if message is processing result, just print how many faces were detected
// otherwise it's an unknown message
workers[i].on('message', (msg) => {
if (msg.ready) submitDetect(workers[i]);
if (msg.ready) detect(workers[i]);
else if (msg.image) log.data('Main: worker finished:', workers[i].pid, 'detected faces:', msg.detected.face?.length, 'bodies:', msg.detected.body?.length, 'hands:', msg.detected.hand?.length, 'objects:', msg.detected.object?.length);
else if (msg.test) measureLatency();
else log.data('Main: worker message:', workers[i].pid, msg);

View File

@ -1,64 +0,0 @@
/**
* Human Person Similarity test for NodeJS
*/
const fs = require('fs');
const process = require('process');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// in nodejs environments tfjs-node is required to be loaded before human
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
let human = null;
const myConfig = {
modelBasePath: 'file://models/',
debug: true,
face: { emotion: { enabled: false } },
body: { enabled: false },
hand: { enabled: false },
gesture: { enabled: false },
};
async function init() {
human = new Human.Human(myConfig);
await human.tf.ready();
log.info('Human:', human.version, 'TF:', tf.version_core);
await human.load();
log.info('Loaded:', human.models.loaded());
log.info('Memory state:', human.tf.engine().memory());
}
async function detect(input) {
if (!fs.existsSync(input)) {
throw new Error('Cannot load image:', input);
}
const buffer = fs.readFileSync(input);
const tensor = human.tf.node.decodeImage(buffer, 3);
log.state('Loaded image:', input, tensor.shape);
const result = await human.detect(tensor, myConfig);
human.tf.dispose(tensor);
log.state('Detected faces:', result.face.length);
return result;
}
async function main() {
log.configure({ inspect: { breakLength: 265 } });
log.header();
if (process.argv.length !== 4) {
log.error('Parameters: <first image> <second image> missing');
return;
}
await init();
const res1 = await detect(process.argv[2]);
const res2 = await detect(process.argv[3]);
if (!res1 || !res1.face || res1.face.length === 0 || !res2 || !res2.face || res2.face.length === 0) {
throw new Error('Could not detect face descriptors');
}
const similarity = human.match.similarity(res1.face[0].embedding, res2.face[0].embedding, { order: 2 });
log.data('Similarity: ', similarity);
}
main();

View File

@ -1,32 +0,0 @@
/**
* Human simple demo for NodeJS
*/
const fs = require('fs');
const process = require('process');
// in nodejs environments tfjs-node is required to be loaded before human
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
const humanConfig = {
// add any custom config here
debug: true,
body: { enabled: false },
};
async function detect(inputFile) {
const human = new Human.Human(humanConfig); // create instance of human using default configuration
console.log('Human:', human.version, 'TF:', tf.version_core); // eslint-disable-line no-console
await human.load(); // optional as models would be loaded on-demand first time they are required
await human.warmup(); // optional as model warmup is performed on-demand first time its executed
const buffer = fs.readFileSync(inputFile); // read file data into buffer
const tensor = human.tf.node.decodeImage(buffer); // decode jpg data
console.log('loaded input file:', inputFile, 'resolution:', tensor.shape); // eslint-disable-line no-console
const result = await human.detect(tensor); // run detection; will initialize backend and on-demand load models
console.log(result); // eslint-disable-line no-console
}
if (process.argv.length === 3) detect(process.argv[2]); // if input file is provided as cmdline parameter use it
else detect('samples/in/ai-body.jpg'); // else use built-in test inputfile

View File

@ -7,26 +7,27 @@
* If you want process at specific intervals, set output fps to some value
* If you want to process an input stream, set real-time flag and set input as required
*
* Note that [pipe2jpeg](https://www.npmjs.com/package/pipe2jpeg) is not part of Human dependencies and should be installed manually
* Working version of `ffmpeg` must be present on the system
* Note that pipe2jpeg is not part of Human dependencies and should be installed manually
* Working version of ffmpeg must be present on the system
*/
const process = require('process');
const spawn = require('child_process').spawn;
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// in nodejs environments tfjs-node is required to be loaded before human
// const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Pipe2Jpeg = require('pipe2jpeg'); // eslint-disable-line node/no-missing-require, import/no-unresolved
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
const log = require('@vladmandic/pilogger');
// @ts-ignore pipe2jpeg is not installed by default
// eslint-disable-next-line node/no-missing-require
const Pipe2Jpeg = require('pipe2jpeg');
// for NodeJS, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
const tf = require('@tensorflow/tfjs-node'); // or const tf = require('@tensorflow/tfjs-node-gpu');
// load specific version of Human library that matches TensorFlow mode
const Human = require('../../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
let count = 0; // counter
let busy = false; // busy flag
let inputFile = './test.mp4';
if (process.argv.length === 3) inputFile = process.argv[2];
const inputFile = './test.mp4';
const humanConfig = {
backend: 'tensorflow',
modelBasePath: 'file://models/',
debug: false,
async: true,
@ -44,7 +45,7 @@ const humanConfig = {
object: { enabled: false },
};
const human = new Human.Human(humanConfig);
const human = new Human(humanConfig);
const pipe2jpeg = new Pipe2Jpeg();
const ffmpegParams = [
@ -61,16 +62,18 @@ const ffmpegParams = [
'pipe:1', // output to unix pipe that is then captured by pipe2jpeg
];
async function detect(jpegBuffer) {
async function process(jpegBuffer) {
if (busy) return; // skip processing if busy
busy = true;
const tensor = human.tf.node.decodeJpeg(jpegBuffer, 3); // decode jpeg buffer to raw tensor
const decoded = tf.node.decodeJpeg(jpegBuffer, 3); // decode jpeg buffer to raw tensor
const tensor = tf.expandDims(decoded, 0); // almost all tf models use first dimension as batch number so we add it
tf.dispose(decoded);
log.state('input frame:', ++count, 'size:', jpegBuffer.length, 'decoded shape:', tensor.shape);
const res = await human.detect(tensor);
human.tf.dispose(tensor); // must dispose tensor
// start custom processing here
log.data('frame', { frame: ++count, size: jpegBuffer.length, shape: tensor.shape, face: res?.face?.length, body: res?.body?.length, hand: res?.hand?.length, gesture: res?.gesture?.length });
if (res?.face?.[0]) log.data('person', { score: [res.face[0].boxScore, res.face[0].faceScore], age: res.face[0].age || 0, gender: [res.face[0].genderScore || 0, res.face[0].gender], emotion: res.face[0].emotion?.[0] });
// at the of processing mark loop as not busy so it can process next frame
log.data('gesture', JSON.stringify(res.gesture));
// do processing here
tf.dispose(tensor); // must dispose tensor
busy = false;
}
@ -78,9 +81,8 @@ async function main() {
log.header();
await human.tf.ready();
// pre-load models
log.info({ human: human.version, tf: human.tf.version_core });
log.info({ input: inputFile });
pipe2jpeg.on('data', (jpegBuffer) => detect(jpegBuffer));
log.info('human:', human.version);
pipe2jpeg.on('jpeg', (jpegBuffer) => process(jpegBuffer));
const ffmpeg = spawn('ffmpeg', ffmpegParams, { stdio: ['ignore', 'pipe', 'ignore'] });
ffmpeg.on('error', (error) => log.error('ffmpeg error:', error));

View File

@ -2,18 +2,20 @@
* Human demo for NodeJS
* Unsupported sample of using external utility fswebcam to capture screenshot from attached webcam in regular intervals and process it using Human
*
* Note that [node-webcam](https://www.npmjs.com/package/node-webcam) is not part of Human dependencies and should be installed manually
* Working version of `fswebcam` must be present on the system
* Note that node-webcam is not part of Human dependencies and should be installed manually
* Working version of fswebcam must be present on the system
*/
let initial = true; // remember if this is the first run to print additional details
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
const nodeWebCam = require('node-webcam'); // eslint-disable-line import/no-unresolved, node/no-missing-require
// in nodejs environments tfjs-node is required to be loaded before human
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
const log = require('@vladmandic/pilogger');
// @ts-ignore node-webcam is not installed by default
// eslint-disable-next-line node/no-missing-require
const nodeWebCam = require('node-webcam');
// for NodeJS, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
const tf = require('@tensorflow/tfjs-node'); // or const tf = require('@tensorflow/tfjs-node-gpu');
// load specific version of Human library that matches TensorFlow mode
const Human = require('../../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
// options for node-webcam
const tempFile = 'webcam-snap'; // node-webcam requires writting snapshot to a file, recommended to use tmpfs to avoid excessive disk writes
@ -25,10 +27,10 @@ const camera = nodeWebCam.create(optionsCamera);
// options for human
const optionsHuman = {
backend: 'tensorflow',
modelBasePath: 'file://models/',
};
const human = new Human.Human(optionsHuman);
const human = new Human(optionsHuman);
function buffer2tensor(buffer) {
return human.tf.tidy(() => {
@ -60,20 +62,18 @@ async function detect() {
} else {
const tensor = buffer2tensor(data); // create tensor from image buffer
if (initial) log.data('input tensor:', tensor.shape);
human.detect(tensor) // eslint-disable-line promise/no-promise-in-callback
.then((result) => {
// eslint-disable-next-line promise/no-promise-in-callback
human.detect(tensor).then((result) => {
if (result && result.face && result.face.length > 0) {
for (let i = 0; i < result.face.length; i++) {
const face = result.face[i];
const emotion = face.emotion?.reduce((prev, curr) => (prev.score > curr.score ? prev : curr));
log.data(`detected face: #${i} boxScore:${face.boxScore} faceScore:${face.faceScore} age:${face.age} genderScore:${face.genderScore} gender:${face.gender} emotionScore:${emotion?.score} emotion:${emotion?.emotion} iris:${face.iris}`);
const emotion = face.emotion.reduce((prev, curr) => (prev.score > curr.score ? prev : curr));
log.data(`detected face: #${i} boxScore:${face.boxScore} faceScore:${face.faceScore} age:${face.age} genderScore:${face.genderScore} gender:${face.gender} emotionScore:${emotion.score} emotion:${emotion.emotion} iris:${face.iris}`);
}
} else {
log.data(' Face: N/A');
}
return result;
})
.catch(() => log.error('human detect error'));
});
}
initial = false;
});
@ -82,7 +82,6 @@ async function detect() {
}
async function main() {
log.info('human:', human.version, 'tf:', tf.version_core);
camera.list((list) => {
log.data('detected camera:', list);
});

View File

@ -1,21 +1,25 @@
/**
* Human demo for NodeJS
*/
*/
const log = require('@vladmandic/pilogger');
const fs = require('fs');
const path = require('path');
const process = require('process');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// in nodejs environments tfjs-node is required to be loaded before human
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
let fetch; // fetch is dynamically imported later
// for NodeJS, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
const tf = require('@tensorflow/tfjs-node'); // or const tf = require('@tensorflow/tfjs-node-gpu');
// load specific version of Human library that matches TensorFlow mode
const Human = require('../../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
let human = null;
const myConfig = {
// backend: 'tensorflow',
backend: 'tensorflow',
modelBasePath: 'file://models/',
debug: true,
async: false,
@ -41,17 +45,16 @@ const myConfig = {
async function init() {
// create instance of human
human = new Human.Human(myConfig);
human = new Human(myConfig);
// wait until tf is ready
await human.tf.ready();
log.info('human:', human.version, 'tf:', tf.version_core);
// pre-load models
log.info('Human:', human.version);
// log.info('Active Configuration', human.config);
await human.load();
log.info('Loaded:', human.models.loaded());
// log.info('Memory state:', human.tf.engine().memory());
log.data(tf.backend().binding ? tf.backend().binding.TF_Version : null);
const loaded = Object.keys(human.models).filter((a) => human.models[a]);
log.info('Loaded:', loaded);
log.info('Memory state:', human.tf.engine().memory());
}
async function detect(input) {
@ -60,12 +63,11 @@ async function detect(input) {
log.info('Loading image:', input);
if (input.startsWith('http:') || input.startsWith('https:')) {
const res = await fetch(input);
if (res && res.ok) buffer = Buffer.from(await res.arrayBuffer());
if (res && res.ok) buffer = await res.buffer();
else log.error('Invalid image URL:', input, res.status, res.statusText, res.headers.get('content-type'));
} else {
buffer = fs.readFileSync(input);
}
log.data('Image bytes:', buffer?.length, 'buffer:', buffer?.slice(0, 32));
// decode image using tfjs-node so we don't need external depenencies
// can also be done using canvas.js or some other 3rd party image library
@ -85,14 +87,14 @@ async function detect(input) {
});
// image shape contains image dimensions and depth
log.state('Processing:', tensor.shape);
log.state('Processing:', tensor['shape']);
// run actual detection
let result;
try {
result = await human.detect(tensor, myConfig);
} catch (err) {
log.error('caught', err);
log.error('caught');
}
// dispose image tensor as we no longer need it
@ -104,7 +106,7 @@ async function detect(input) {
for (let i = 0; i < result.face.length; i++) {
const face = result.face[i];
const emotion = face.emotion.reduce((prev, curr) => (prev.score > curr.score ? prev : curr));
log.data(` Face: #${i} boxScore:${face.boxScore} faceScore:${face.faceScore} age:${face.age} genderScore:${face.genderScore} gender:${face.gender} emotionScore:${emotion.score} emotion:${emotion.emotion} distance:${face.distance}`);
log.data(` Face: #${i} boxScore:${face.boxScore} faceScore:${face.faceScore} age:${face.age} genderScore:${face.genderScore} gender:${face.gender} emotionScore:${emotion.score} emotion:${emotion.emotion} iris:${face.iris}`);
}
} else {
log.data(' Face: N/A');
@ -133,7 +135,6 @@ async function detect(input) {
} else {
log.data(' Gesture: N/A');
}
if (result && result.object && result.object.length > 0) {
for (let i = 0; i < result.object.length; i++) {
const object = result.object[i];
@ -188,6 +189,7 @@ async function main() {
log.configure({ inspect: { breakLength: 265 } });
log.header();
log.info('Current folder:', process.env.PWD);
fetch = (await import('node-fetch')).default;
await init();
const f = process.argv[2];
if (process.argv.length !== 3) {
@ -195,7 +197,8 @@ async function main() {
await test();
} else if (!fs.existsSync(f) && !f.startsWith('http')) {
log.error(`File not found: ${process.argv[2]}`);
} else if (fs.existsSync(f)) {
} else {
if (fs.existsSync(f)) {
const stat = fs.statSync(f);
if (stat.isDirectory()) {
const dir = fs.readdirSync(f);
@ -208,6 +211,7 @@ async function main() {
} else {
await detect(f);
}
}
}
main();

View File

@ -1,119 +0,0 @@
/**
* Human demo for NodeJS
*
* Takes input and output folder names parameters and processes all images
* found in input folder and creates annotated images in output folder
*
* Requires [canvas](https://www.npmjs.com/package/canvas) to provide Canvas functionality in NodeJS environment
*/
const fs = require('fs');
const path = require('path');
const process = require('process');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
const canvas = require('canvas'); // eslint-disable-line node/no-unpublished-require
// for nodejs, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human
const tf = require('@tensorflow/tfjs-node-gpu'); // eslint-disable-line node/no-unpublished-require
const Human = require('../../dist/human.node-gpu.js'); // this is 'const Human = require('../dist/human.node-gpu.js').default;'
const config = { // just enable all and leave default settings
modelBasePath: 'file://models',
debug: true,
softwareKernels: true, // slower but enhanced precision since face rotation can work in software mode in nodejs environments
cacheSensitivity: 0.01,
face: { enabled: true, detector: { maxDetected: 100, minConfidence: 0.1 } },
object: { enabled: true, maxDetected: 100, minConfidence: 0.1 },
gesture: { enabled: true },
hand: { enabled: true, maxDetected: 100, minConfidence: 0.2 },
body: { enabled: true, maxDetected: 100, minConfidence: 0.1, modelPath: 'https://vladmandic.github.io/human-models/models/movenet-multipose.json' },
};
const poolSize = 4;
const human = new Human.Human(config); // create instance of human
async function saveFile(shape, buffer, result, outFile) {
return new Promise(async (resolve, reject) => { // eslint-disable-line no-async-promise-executor
const outputCanvas = new canvas.Canvas(shape[2], shape[1]); // create canvas
const outputCtx = outputCanvas.getContext('2d');
const inputImage = await canvas.loadImage(buffer); // load image using canvas library
outputCtx.drawImage(inputImage, 0, 0); // draw input image onto canvas
human.draw.all(outputCanvas, result); // use human build-in method to draw results as overlays on canvas
const outStream = fs.createWriteStream(outFile); // write canvas to new image file
outStream.on('finish', () => {
log.data('Output image:', outFile, outputCanvas.width, outputCanvas.height);
resolve();
});
outStream.on('error', (err) => {
log.error('Output error:', outFile, err);
reject();
});
const stream = outputCanvas.createJPEGStream({ quality: 0.5, progressive: true, chromaSubsampling: true });
stream.pipe(outStream);
});
}
async function processFile(image, inFile, outFile) {
const buffer = fs.readFileSync(inFile);
const tensor = tf.tidy(() => {
const decode = tf.node.decodeImage(buffer, 3);
const expand = tf.expandDims(decode, 0);
const cast = tf.cast(expand, 'float32');
return cast;
});
log.state('Loaded image:', inFile, tensor.shape);
const result = await human.detect(tensor);
human.tf.dispose(tensor);
log.data(`Detected: ${image}:`, 'Face:', result.face.length, 'Body:', result.body.length, 'Hand:', result.hand.length, 'Objects:', result.object.length, 'Gestures:', result.gesture.length);
if (outFile) await saveFile(tensor.shape, buffer, result, outFile);
}
async function main() {
log.header();
globalThis.Canvas = canvas.Canvas; // patch global namespace with canvas library
globalThis.ImageData = canvas.ImageData; // patch global namespace with canvas library
log.info('Human:', human.version, 'TF:', tf.version_core);
const configErrors = await human.validate();
if (configErrors.length > 0) log.error('Configuration errors:', configErrors);
await human.load(); // pre-load models
log.info('Loaded models:', human.models.loaded());
const inDir = process.argv[2];
const outDir = process.argv[3];
if (!inDir) {
log.error('Parameters: <input-directory> missing');
return;
}
if (inDir && (!fs.existsSync(inDir) || !fs.statSync(inDir).isDirectory())) {
log.error('Invalid input directory:', fs.existsSync(inDir) ?? fs.statSync(inDir).isDirectory());
return;
}
if (!outDir) {
log.info('Parameters: <output-directory> missing, images will not be saved');
}
if (outDir && (!fs.existsSync(outDir) || !fs.statSync(outDir).isDirectory())) {
log.error('Invalid output directory:', fs.existsSync(outDir) ?? fs.statSync(outDir).isDirectory());
return;
}
const dir = fs.readdirSync(inDir);
const images = dir.filter((f) => fs.statSync(path.join(inDir, f)).isFile() && (f.toLocaleLowerCase().endsWith('.jpg') || f.toLocaleLowerCase().endsWith('.jpeg')));
log.info(`Processing folder: ${inDir} entries:`, dir.length, 'images', images.length);
const t0 = performance.now();
const promises = [];
for (let i = 0; i < images.length; i++) {
const inFile = path.join(inDir, images[i]);
const outFile = outDir ? path.join(outDir, images[i]) : null;
promises.push(processFile(images[i], inFile, outFile));
if (i % poolSize === 0) await Promise.all(promises);
}
await Promise.all(promises);
const t1 = performance.now();
log.info(`Processed ${images.length} images in ${Math.round(t1 - t0)} ms`);
}
main();

View File

@ -24,13 +24,13 @@
a:hover { color: lightskyblue; text-decoration: none; }
.row { width: 90vw; margin: auto; margin-top: 100px; text-align: center; }
</style>
</head>
<body>
</head>
<body>
<div class="row text-center">
<h1>
<a href="/">Human: Offline</a><br>
<img alt="icon" src="../assets/icon.png">
</h1>
</div>
</body>
</body>
</html>

View File

@ -1,61 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Human Demo</title>
<meta name="viewport" content="width=device-width, shrink-to-fit=yes">
<meta name="mobile-web-app-capable" content="yes">
<meta name="application-name" content="Human Demo">
<meta name="keywords" content="Human Demo">
<meta name="description" content="Human Demo; Author: Vladimir Mandic <mandic00@live.com>">
<link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../favicon.ico" type="image/x-icon">
<link rel="icon" sizes="256x256" href="../assets/icons/dash-256.png">
<link rel="apple-touch-icon" href="../assets/icons/dash-256.png">
<link rel="apple-touch-startup-image" href="../assets/icons/dash-256.png">
<style>
@font-face { font-family: 'CenturyGothic'; font-display: swap; font-style: normal; font-weight: 400; src: local('CenturyGothic'), url('../assets/century-gothic.ttf') format('truetype'); }
html { font-size: 18px; }
body { font-size: 1rem; font-family: "CenturyGothic", "Segoe UI", sans-serif; font-variant: small-caps; width: -webkit-fill-available; height: 100%; background: black; color: white; overflow: hidden; margin: 0; }
select { font-size: 1rem; font-family: "CenturyGothic", "Segoe UI", sans-serif; font-variant: small-caps; background: gray; color: white; border: none; }
</style>
<script src="../segmentation/index.js" type="module"></script>
</head>
<body>
<noscript><h1>javascript is required</h1></noscript>
<nav>
<div id="nav" class="nav"></div>
</nav>
<header>
<div id="header" class="header" style="position: fixed; top: 0; right: 0; padding: 4px; margin: 16px; background: rgba(0, 0, 0, 0.5); z-index: 10; line-height: 2rem;">
<label for="mode">mode</label>
<select id="mode" name="mode">
<option value="default">remove background</option>
<option value="alpha">draw alpha channel</option>
<option value="foreground">full foreground</option>
<option value="state">recurrent state</option>
</select><br>
<label for="composite">composite</label>
<select id="composite" name="composite"></select><br>
<label for="ratio">downsample ratio</label>
<input type="range" name="ratio" id="ratio" min="0.1" max="1" value="0.5" step="0.05">
<div id="fps" style="margin-top: 8px"></div>
</div>
</header>
<main>
<div id="main" class="main">
<video id="webcam" style="position: fixed; top: 0; left: 0; width: 50vw; height: 50vh"></video>
<img id="background" alt="background" style="position: fixed; top: 0; right: 0; width: 50vw; height: 50vh" controls></img>
<canvas id="output" style="position: fixed; bottom: 0; left: 0; height: 50vh"></canvas>
<canvas id="merge" style="position: fixed; bottom: 0; right: 0; height: 50vh"></canvas>
</div>
</main>
<footer>
<div id="footer" class="footer"></div>
</footer>
<aside>
<div id="aside" class="aside"></div>
</aside>
</body>
</html>

View File

@ -1,99 +0,0 @@
/**
* Human demo for browsers
* @default Human Library
* @summary <https://github.com/vladmandic/human>
* @author <https://github.com/vladmandic>
* @copyright <https://github.com/vladmandic>
* @license MIT
*/
import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
const humanConfig = { // user configuration for human, used to fine-tune behavior
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
filter: { enabled: true, equalization: false, flip: false },
face: { enabled: false },
body: { enabled: false },
hand: { enabled: false },
object: { enabled: false },
gesture: { enabled: false },
segmentation: {
enabled: true,
modelPath: 'rvm.json', // can use rvm, selfie or meet
ratio: 0.5,
mode: 'default',
},
};
const backgroundImage = '../../samples/in/background.jpg';
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
const log = (...msg) => console.log(...msg); // eslint-disable-line no-console
async function main() {
// gather dom elements
const dom = {
background: document.getElementById('background'),
webcam: document.getElementById('webcam'),
output: document.getElementById('output'),
merge: document.getElementById('merge'),
mode: document.getElementById('mode'),
composite: document.getElementById('composite'),
ratio: document.getElementById('ratio'),
fps: document.getElementById('fps'),
};
// set defaults
dom.fps.innerText = 'initializing';
dom.ratio.valueAsNumber = human.config.segmentation.ratio;
dom.background.src = backgroundImage;
dom.composite.innerHTML = ['source-atop', 'color', 'color-burn', 'color-dodge', 'copy', 'darken', 'destination-atop', 'destination-in', 'destination-out', 'destination-over', 'difference', 'exclusion', 'hard-light', 'hue', 'lighten', 'lighter', 'luminosity', 'multiply', 'overlay', 'saturation', 'screen', 'soft-light', 'source-in', 'source-out', 'source-over', 'xor'].map((gco) => `<option value="${gco}">${gco}</option>`).join(''); // eslint-disable-line max-len
const ctxMerge = dom.merge.getContext('2d');
log('human version:', human.version, '| tfjs version:', human.tf.version['tfjs-core']);
log('platform:', human.env.platform, '| agent:', human.env.agent);
await human.load(); // preload all models
log('backend:', human.tf.getBackend(), '| available:', human.env.backends);
log('models stats:', human.models.stats());
log('models loaded:', human.models.loaded());
await human.warmup(); // warmup function to initialize backend for future faster detection
const numTensors = human.tf.engine().state.numTensors;
// initialize webcam
dom.webcam.onplay = () => { // start processing on video play
log('start processing');
dom.output.width = human.webcam.width;
dom.output.height = human.webcam.height;
dom.merge.width = human.webcam.width;
dom.merge.height = human.webcam.height;
loop(); // eslint-disable-line no-use-before-define
};
await human.webcam.start({ element: dom.webcam, crop: true, width: window.innerWidth / 2, height: window.innerHeight / 2 }); // use human webcam helper methods and associate webcam stream with a dom element
if (!human.webcam.track) dom.fps.innerText = 'webcam error';
// processing loop
async function loop() {
if (!human.webcam.element || human.webcam.paused) return; // check if webcam is valid and playing
human.config.segmentation.mode = dom.mode.value; // get segmentation mode from ui
human.config.segmentation.ratio = dom.ratio.valueAsNumber; // get segmentation downsample ratio from ui
const t0 = Date.now();
const rgba = await human.segmentation(human.webcam.element, human.config); // run model and process results
const t1 = Date.now();
if (!rgba) {
dom.fps.innerText = 'error';
return;
}
dom.fps.innerText = `fps: ${Math.round(10000 / (t1 - t0)) / 10}`; // mark performance
human.draw.tensor(rgba, dom.output); // draw raw output
human.tf.dispose(rgba); // dispose tensors
ctxMerge.globalCompositeOperation = 'source-over';
ctxMerge.drawImage(dom.background, 0, 0); // draw original video to first stacked canvas
ctxMerge.globalCompositeOperation = dom.composite.value;
ctxMerge.drawImage(dom.output, 0, 0); // draw processed output to second stacked canvas
if (numTensors !== human.tf.engine().state.numTensors) log({ leak: human.tf.engine().state.numTensors - numTensors }); // check for memory leaks
requestAnimationFrame(loop);
}
}
window.onload = main;

25
demo/simple/index.html Normal file
View File

@ -0,0 +1,25 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Human</title>
<meta name="viewport" content="width=device-width" id="viewport">
<meta name="keywords" content="Human">
<meta name="application-name" content="Human">
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="theme-color" content="#000000">
<link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../../assets/icon.png">
<script src="./index.js" type="module"></script>
<style>
body { margin: 0; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; text-align: center; }
body::-webkit-scrollbar { display: none; }
</style>
</head>
<body>
<canvas id="canvas" style="margin: 0 auto"></canvas>
<video id="video" playsinline style="display: none"></video>
</body>
</html>

86
demo/simple/index.js Normal file
View File

@ -0,0 +1,86 @@
/**
* Human demo for browsers
*
* @description Simple Human demo for browsers using WebCam or WebRTC
*
* @configuration
* config={}: contains all model configuration used by human
*/
import Human from '../../dist/human.esm.js'; // equivalent of @vladmandic/human
import webRTC from '../helpers/webrtc.js'; // handle webrtc handshake and connects to webrtc stream
const config = { // use default values
modelBasePath: '../../models',
};
const human = new Human(config);
const webrtc = {
enabled: false, // use webrtc or use webcam if disabled
server: 'http://human.local:8002',
stream: 'reowhite',
};
// eslint-disable-next-line no-console
const log = (...msg) => console.log(...msg);
/** @type {HTMLVideoElement} */
// @ts-ignore
const video = document.getElementById('video') || document.createElement('video'); // used as input
/** @type {HTMLCanvasElement} */
// @ts-ignore
const canvas = document.getElementById('canvas') || document.createElement('canvas'); // used as output
async function webCam() {
const constraints = { audio: false, video: { facingMode: 'user', resizeMode: 'none', width: { ideal: document.body.clientWidth } } }; // set preffered camera options
const stream = await navigator.mediaDevices.getUserMedia(constraints); // get webcam stream that matches constraints
const ready = new Promise((resolve) => { video.onloadeddata = () => resolve(true); }); // resolve when stream is ready
video.srcObject = stream; // assign stream to video element
video.play(); // start stream
await ready; // wait until stream is ready
canvas.width = video.videoWidth; // resize output canvas to match input
canvas.height = video.videoHeight;
log('video stream:', video.srcObject, 'track state:', video.srcObject.getVideoTracks()[0].readyState, 'stream state:', video.readyState);
}
// eslint-disable-next-line no-unused-vars
let result;
async function detectionLoop() {
result = await human.detect(video); // updates result every time detection completes
// eslint-disable-next-line @typescript-eslint/no-unused-vars
requestAnimationFrame(detectionLoop); // run in loop
}
// eslint-disable-next-line no-unused-vars
async function drawLoop() {
const interpolated = await human.next(result); // interpolates results based on last known results
await human.draw.canvas(video, canvas); // draw input video to output canvas
await human.draw.all(canvas, interpolated); // draw results as overlay on output canvas
// eslint-disable-next-line @typescript-eslint/no-unused-vars
requestAnimationFrame(drawLoop); // run in loop
}
// eslint-disable-next-line no-unused-vars
async function singleLoop() {
result = await human.detect(video); // updates result every time detection completes
await human.draw.canvas(video, canvas); // draw input video to output canvas
await human.draw.all(canvas, result); // draw results as overlay on output canvas
// eslint-disable-next-line @typescript-eslint/no-unused-vars
requestAnimationFrame(singleLoop); // run in loop
}
async function main() {
await human.load(); // not required, pre-loads all models
await human.warmup(); // not required, warms up all models
if (webrtc.enabled) await webRTC(webrtc.server, webrtc.stream, video); // setup webrtc as input stream, uses helper implementation in
else await webCam(); // setup webcam as input stream
// preferred run in two loops, one for actual detection and one that draws interpolated results on screen so results appear much smoother
await detectionLoop();
await drawLoop();
// alternative run in single loop where we run detection and then draw results
// await singleLoop();
}
window.onload = main;

View File

@ -1,28 +0,0 @@
## Tracker
### Based on
<https://github.com/opendatacam/node-moving-things-tracker>
### Build
- remove reference to `lodash`:
> `isEqual` in <tracker.js>
- replace external lib:
> curl https://raw.githubusercontent.com/ubilabs/kd-tree-javascript/master/kdTree.js -o lib/kdTree-min.js
- build with `esbuild`:
> node_modules/.bin/esbuild --bundle tracker.js --format=esm --platform=browser --target=esnext --keep-names --tree-shaking=false --analyze --outfile=/home/vlado/dev/human/demo/tracker/tracker.js --banner:js="/* eslint-disable */"
### Usage
computeDistance(item1, item2)
disableKeepInMemory()
enableKeepInMemory()
getAllTrackedItems()
getJSONDebugOfTrackedItems(roundInt = true)
getJSONOfAllTrackedItems()
getJSONOfTrackedItems(roundInt = true)
getTrackedItemsInMOTFormat(frameNb)
reset()
setParams(newParams)
updateTrackedItemsWithNewFrame(detectionsOfThisFrame, frameNb)

View File

@ -1,65 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Human</title>
<meta name="viewport" content="width=device-width" id="viewport">
<meta name="keywords" content="Human">
<meta name="application-name" content="Human">
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="theme-color" content="#000000">
<link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../../assets/icon.png">
<script src="./index.js" type="module"></script>
<style>
html { font-family: 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
body { margin: 0; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
body::-webkit-scrollbar { display: none; }
input[type="file"] { font-family: 'Segoe UI'; font-size: 14px; font-variant: small-caps; }
::-webkit-file-upload-button { background: #333333; color: white; border: 0; border-radius: 0; padding: 6px 16px; box-shadow: 4px 4px 4px #222222; font-family: 'Segoe UI'; font-size: 14px; font-variant: small-caps; }
</style>
</head>
<body>
<div style="display: flex">
<video id="video" playsinline style="width: 25vw" controls controlslist="nofullscreen nodownload noremoteplayback" disablepictureinpicture loop></video>
<canvas id="canvas" style="width: 75vw"></canvas>
</div>
<div class="uploader" style="padding: 8px">
<input type="file" name="inputvideo" id="inputvideo" accept="video/*"></input>
<input type="checkbox" id="interpolation" name="interpolation"></input>
<label for="tracker">interpolation</label>
</div>
<form id="config" style="padding: 8px; line-height: 1.6rem;">
tracker |
<input type="checkbox" id="tracker" name="tracker" checked></input>
<label for="tracker">enabled</label> |
<input type="checkbox" id="keepInMemory" name="keepInMemory"></input>
<label for="keepInMemory">keepInMemory</label> |
<br>
tracker source |
<input type="radio" id="box-face" name="box" value="face" checked>
<label for="box-face">face</label> |
<input type="radio" id="box-body" name="box" value="body">
<label for="box-face">body</label> |
<input type="radio" id="box-object" name="box" value="object">
<label for="box-face">object</label> |
<br>
tracker config |
<input type="range" id="unMatchedFramesTolerance" name="unMatchedFramesTolerance" min="0" max="300" step="1", value="60"></input>
<label for="unMatchedFramesTolerance">unMatchedFramesTolerance</label> |
<input type="range" id="iouLimit" name="unMatchedFramesTolerance" min="0" max="1" step="0.01", value="0.1"></input>
<label for="iouLimit">iouLimit</label> |
<input type="range" id="distanceLimit" name="unMatchedFramesTolerance" min="0" max="1" step="0.01", value="0.1"></input>
<label for="distanceLimit">distanceLimit</label> |
<input type="radio" id="matchingAlgorithm-kdTree" name="matchingAlgorithm" value="kdTree" checked>
<label for="matchingAlgorithm-kdTree">kdTree</label> |
<input type="radio" id="matchingAlgorithm-munkres" name="matchingAlgorithm" value="munkres">
<label for="matchingAlgorithm-kdTree">munkres</label> |
</form>
<pre id="status" style="position: absolute; top: 12px; right: 20px; background-color: grey; padding: 8px; box-shadow: 2px 2px black"></pre>
<pre id="log" style="padding: 8px"></pre>
<div id="performance" style="position: absolute; bottom: 0; width: 100%; padding: 8px; font-size: 0.8rem;"></div>
</body>
</html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -1,208 +0,0 @@
/**
* Human demo for browsers
* @default Human Library
* @summary <https://github.com/vladmandic/human>
* @author <https://github.com/vladmandic>
* @copyright <https://github.com/vladmandic>
* @license MIT
*/
import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
import tracker from './tracker.js';
const humanConfig: Partial<H.Config> = { // user configuration for human, used to fine-tune behavior
debug: true,
backend: 'webgl',
// cacheSensitivity: 0,
// cacheModels: false,
// warmup: 'none',
modelBasePath: 'https://vladmandic.github.io/human-models/models',
filter: { enabled: true, equalization: false, flip: false },
face: {
enabled: true,
detector: { rotation: false, maxDetected: 10, minConfidence: 0.3 },
mesh: { enabled: true },
attention: { enabled: false },
iris: { enabled: false },
description: { enabled: false },
emotion: { enabled: false },
antispoof: { enabled: false },
liveness: { enabled: false },
},
body: { enabled: false, maxDetected: 6, modelPath: 'movenet-multipose.json' },
hand: { enabled: false },
object: { enabled: false, maxDetected: 10 },
segmentation: { enabled: false },
gesture: { enabled: false },
};
interface TrackerConfig {
unMatchedFramesTolerance: number, // number of frame when an object is not matched before considering it gone; ignored if fastDelete is set
iouLimit: number, // exclude things from beeing matched if their IOU less than; 1 means total overlap; 0 means no overlap
fastDelete: boolean, // remove new objects immediately if they could not be matched in the next frames; if set, ignores unMatchedFramesTolerance
distanceLimit: number, // distance limit for matching; if values need to be excluded from matching set their distance to something greater than the distance limit
matchingAlgorithm: 'kdTree' | 'munkres', // algorithm used to match tracks with new detections
}
interface TrackerResult {
id: number,
confidence: number,
bearing: number,
isZombie: boolean,
name: string,
x: number,
y: number,
w: number,
h: number,
}
const trackerConfig: TrackerConfig = {
unMatchedFramesTolerance: 100,
iouLimit: 0.05,
fastDelete: false,
distanceLimit: 1e4,
matchingAlgorithm: 'kdTree',
};
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
const dom = { // grab instances of dom objects so we dont have to look them up later
video: document.getElementById('video') as HTMLVideoElement,
canvas: document.getElementById('canvas') as HTMLCanvasElement,
log: document.getElementById('log') as HTMLPreElement,
fps: document.getElementById('status') as HTMLPreElement,
tracker: document.getElementById('tracker') as HTMLInputElement,
interpolation: document.getElementById('interpolation') as HTMLInputElement,
config: document.getElementById('config') as HTMLFormElement,
ctx: (document.getElementById('canvas') as HTMLCanvasElement).getContext('2d') as CanvasRenderingContext2D,
};
const timestamp = { detect: 0, draw: 0, tensors: 0, start: 0 }; // holds information used to calculate performance and possible memory leaks
const fps = { detectFPS: 0, drawFPS: 0, frames: 0, averageMs: 0 }; // holds calculated fps information for both detect and screen refresh
const log = (...msg) => { // helper method to output messages
dom.log.innerText += msg.join(' ') + '\n';
console.log(...msg); // eslint-disable-line no-console
};
const status = (msg) => dom.fps.innerText = msg; // print status element
async function detectionLoop() { // main detection loop
if (!dom.video.paused && dom.video.readyState >= 2) {
if (timestamp.start === 0) timestamp.start = human.now();
// log('profiling data:', await human.profile(dom.video));
await human.detect(dom.video, humanConfig); // actual detection; were not capturing output in a local variable as it can also be reached via human.result
const tensors = human.tf.memory().numTensors; // check current tensor usage for memory leaks
if (tensors - timestamp.tensors !== 0) log('allocated tensors:', tensors - timestamp.tensors); // printed on start and each time there is a tensor leak
timestamp.tensors = tensors;
fps.detectFPS = Math.round(1000 * 1000 / (human.now() - timestamp.detect)) / 1000;
fps.frames++;
fps.averageMs = Math.round(1000 * (human.now() - timestamp.start) / fps.frames) / 1000;
}
timestamp.detect = human.now();
requestAnimationFrame(detectionLoop); // start new frame immediately
}
function drawLoop() { // main screen refresh loop
if (!dom.video.paused && dom.video.readyState >= 2) {
const res: H.Result = dom.interpolation.checked ? human.next(human.result) : human.result; // interpolate results if enabled
let tracking: H.FaceResult[] | H.BodyResult[] | H.ObjectResult[] = [];
if (human.config.face.enabled) tracking = res.face;
else if (human.config.body.enabled) tracking = res.body;
else if (human.config.object.enabled) tracking = res.object;
else log('unknown object type');
let data: TrackerResult[] = [];
if (dom.tracker.checked) {
const items = tracking.map((obj) => ({
x: obj.box[0] + obj.box[2] / 2,
y: obj.box[1] + obj.box[3] / 2,
w: obj.box[2],
h: obj.box[3],
name: obj.label || (human.config.face.enabled ? 'face' : 'body'),
confidence: obj.score,
}));
tracker.updateTrackedItemsWithNewFrame(items, fps.frames);
data = tracker.getJSONOfTrackedItems(true) as TrackerResult[];
}
human.draw.canvas(dom.video, dom.canvas); // copy input video frame to output canvas
for (let i = 0; i < tracking.length; i++) {
// @ts-ignore
const name = tracking[i].label || (human.config.face.enabled ? 'face' : 'body');
dom.ctx.strokeRect(tracking[i].box[0], tracking[i].box[1], tracking[i].box[1], tracking[i].box[2]);
dom.ctx.fillText(`id: ${tracking[i].id} ${Math.round(100 * tracking[i].score)}% ${name}`, tracking[i].box[0] + 4, tracking[i].box[1] + 16);
if (data[i]) {
dom.ctx.fillText(`t: ${data[i].id} ${Math.round(100 * data[i].confidence)}% ${data[i].name} ${data[i].isZombie ? 'zombie' : ''}`, tracking[i].box[0] + 4, tracking[i].box[1] + 34);
}
}
}
const now = human.now();
fps.drawFPS = Math.round(1000 * 1000 / (now - timestamp.draw)) / 1000;
timestamp.draw = now;
status(dom.video.paused ? 'paused' : `fps: ${fps.detectFPS.toFixed(1).padStart(5, ' ')} detect | ${fps.drawFPS.toFixed(1).padStart(5, ' ')} draw`); // write status
setTimeout(drawLoop, 30); // use to slow down refresh from max refresh rate to target of 30 fps
}
async function handleVideo(file: File) {
const url = URL.createObjectURL(file);
dom.video.src = url;
await dom.video.play();
log('loaded video:', file.name, 'resolution:', [dom.video.videoWidth, dom.video.videoHeight], 'duration:', dom.video.duration);
dom.canvas.width = dom.video.videoWidth;
dom.canvas.height = dom.video.videoHeight;
dom.ctx.strokeStyle = 'white';
dom.ctx.fillStyle = 'white';
dom.ctx.font = '16px Segoe UI';
dom.video.playbackRate = 0.25;
}
function initInput() {
document.body.addEventListener('dragenter', (evt) => evt.preventDefault());
document.body.addEventListener('dragleave', (evt) => evt.preventDefault());
document.body.addEventListener('dragover', (evt) => evt.preventDefault());
document.body.addEventListener('drop', async (evt) => {
evt.preventDefault();
if (evt.dataTransfer) evt.dataTransfer.dropEffect = 'copy';
const file = evt.dataTransfer?.files?.[0];
if (file) await handleVideo(file);
log(dom.video.readyState);
});
(document.getElementById('inputvideo') as HTMLInputElement).onchange = async (evt) => {
evt.preventDefault();
const file = evt.target?.['files']?.[0];
if (file) await handleVideo(file);
};
dom.config.onchange = () => {
trackerConfig.distanceLimit = (document.getElementById('distanceLimit') as HTMLInputElement).valueAsNumber;
trackerConfig.iouLimit = (document.getElementById('iouLimit') as HTMLInputElement).valueAsNumber;
trackerConfig.unMatchedFramesTolerance = (document.getElementById('unMatchedFramesTolerance') as HTMLInputElement).valueAsNumber;
trackerConfig.unMatchedFramesTolerance = (document.getElementById('unMatchedFramesTolerance') as HTMLInputElement).valueAsNumber;
trackerConfig.matchingAlgorithm = (document.getElementById('matchingAlgorithm-kdTree') as HTMLInputElement).checked ? 'kdTree' : 'munkres';
tracker.setParams(trackerConfig);
if ((document.getElementById('keepInMemory') as HTMLInputElement).checked) tracker.enableKeepInMemory();
else tracker.disableKeepInMemory();
tracker.reset();
log('tracker config change', JSON.stringify(trackerConfig));
humanConfig.face!.enabled = (document.getElementById('box-face') as HTMLInputElement).checked; // eslint-disable-line @typescript-eslint/no-non-null-assertion
humanConfig.body!.enabled = (document.getElementById('box-body') as HTMLInputElement).checked; // eslint-disable-line @typescript-eslint/no-non-null-assertion
humanConfig.object!.enabled = (document.getElementById('box-object') as HTMLInputElement).checked; // eslint-disable-line @typescript-eslint/no-non-null-assertion
};
dom.tracker.onchange = (evt) => {
log('tracker', (evt.target as HTMLInputElement).checked ? 'enabled' : 'disabled');
tracker.setParams(trackerConfig);
tracker.reset();
};
}
async function main() { // main entry point
log('human version:', human.version, '| tfjs version:', human.tf.version['tfjs-core']);
log('platform:', human.env.platform, '| agent:', human.env.agent);
status('loading...');
await human.load(); // preload all models
log('backend:', human.tf.getBackend(), '| available:', human.env.backends);
log('models loaded:', human.models.loaded());
status('initializing...');
await human.warmup(); // warmup function to initialize backend for future faster detection
initInput(); // initialize input
await detectionLoop(); // start detection loop
drawLoop(); // start draw loop
}
window.onload = main;

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +0,0 @@
# Human Demo in TypeScript for Browsers
Simple demo app that can be used as a quick-start guide for use of `Human` in browser environments
- `index.ts` is compiled to `index.js` which is loaded from `index.html`

View File

@ -1,9 +0,0 @@
/*
Human
homepage: <https://github.com/vladmandic/human>
author: <https://github.com/vladmandic>'
*/
import*as m from"../../dist/human.esm.js";var v=1920,b={debug:!0,backend:"webgl",modelBasePath:"https://vladmandic.github.io/human-models/models/",filter:{enabled:!0,equalization:!1,flip:!1},face:{enabled:!0,detector:{rotation:!1},mesh:{enabled:!0},attention:{enabled:!1},iris:{enabled:!0},description:{enabled:!0},emotion:{enabled:!0},antispoof:{enabled:!0},liveness:{enabled:!0}},body:{enabled:!1},hand:{enabled:!1},object:{enabled:!1},segmentation:{enabled:!1},gesture:{enabled:!0}},e=new m.Human(b);e.env.perfadd=!1;e.draw.options.font='small-caps 18px "Lato"';e.draw.options.lineHeight=20;e.draw.options.drawPoints=!0;var a={video:document.getElementById("video"),canvas:document.getElementById("canvas"),log:document.getElementById("log"),fps:document.getElementById("status"),perf:document.getElementById("performance")},n={detect:0,draw:0,tensors:0,start:0},s={detectFPS:0,drawFPS:0,frames:0,averageMs:0},o=(...t)=>{a.log.innerText+=t.join(" ")+`
`,console.log(...t)},i=t=>a.fps.innerText=t,g=t=>a.perf.innerText="tensors:"+e.tf.memory().numTensors.toString()+" | performance: "+JSON.stringify(t).replace(/"|{|}/g,"").replace(/,/g," | ");async function f(){if(!a.video.paused){n.start===0&&(n.start=e.now()),await e.detect(a.video);let t=e.tf.memory().numTensors;t-n.tensors!==0&&o("allocated tensors:",t-n.tensors),n.tensors=t,s.detectFPS=Math.round(1e3*1e3/(e.now()-n.detect))/1e3,s.frames++,s.averageMs=Math.round(1e3*(e.now()-n.start)/s.frames)/1e3,s.frames%100===0&&!a.video.paused&&o("performance",{...s,tensors:n.tensors})}n.detect=e.now(),requestAnimationFrame(f)}async function u(){var d,r,c;if(!a.video.paused){let l=e.next(e.result),w=await e.image(a.video);e.draw.canvas(w.canvas,a.canvas);let p={bodyLabels:`person confidence [score] and ${(c=(r=(d=e.result)==null?void 0:d.body)==null?void 0:r[0])==null?void 0:c.keypoints.length} keypoints`};await e.draw.all(a.canvas,l,p),g(l.performance)}let t=e.now();s.drawFPS=Math.round(1e3*1e3/(t-n.draw))/1e3,n.draw=t,i(a.video.paused?"paused":`fps: ${s.detectFPS.toFixed(1).padStart(5," ")} detect | ${s.drawFPS.toFixed(1).padStart(5," ")} draw`),setTimeout(u,30)}async function h(){let d=(await e.webcam.enumerate())[0].deviceId,r=await e.webcam.start({element:a.video,crop:!1,width:v,id:d});o(r),a.canvas.width=e.webcam.width,a.canvas.height=e.webcam.height,a.canvas.onclick=async()=>{e.webcam.paused?await e.webcam.play():e.webcam.pause()}}async function y(){o("human version:",e.version,"| tfjs version:",e.tf.version["tfjs-core"]),o("platform:",e.env.platform,"| agent:",e.env.agent),i("loading..."),await e.load(),o("backend:",e.tf.getBackend(),"| available:",e.env.backends),o("models stats:",e.models.stats()),o("models loaded:",e.models.loaded()),o("environment",e.env),i("initializing..."),await e.warmup(),await h(),await f(),await u()}window.onload=y;
//# sourceMappingURL=index.js.map

File diff suppressed because one or more lines are too long

View File

@ -1,119 +0,0 @@
/**
* Human demo for browsers
* @default Human Library
* @summary <https://github.com/vladmandic/human>
* @author <https://github.com/vladmandic>
* @copyright <https://github.com/vladmandic>
* @license MIT
*/
import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
const width = 1920; // used by webcam config as well as human maximum resultion // can be anything, but resolutions higher than 4k will disable internal optimizations
const humanConfig: Partial<H.Config> = { // user configuration for human, used to fine-tune behavior
debug: true,
backend: 'webgl',
// cacheSensitivity: 0,
// cacheModels: false,
// warmup: 'none',
// modelBasePath: '../../models',
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
filter: { enabled: true, equalization: false, flip: false },
face: { enabled: true, detector: { rotation: false }, mesh: { enabled: true }, attention: { enabled: false }, iris: { enabled: true }, description: { enabled: true }, emotion: { enabled: true }, antispoof: { enabled: true }, liveness: { enabled: true } },
body: { enabled: false },
hand: { enabled: false },
object: { enabled: false },
segmentation: { enabled: false },
gesture: { enabled: true },
};
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
human.env.perfadd = false; // is performance data showing instant or total values
human.draw.options.font = 'small-caps 18px "Lato"'; // set font used to draw labels when using draw methods
human.draw.options.lineHeight = 20;
human.draw.options.drawPoints = true; // draw points on face mesh
// human.draw.options.fillPolygons = true;
const dom = { // grab instances of dom objects so we dont have to look them up later
video: document.getElementById('video') as HTMLVideoElement,
canvas: document.getElementById('canvas') as HTMLCanvasElement,
log: document.getElementById('log') as HTMLPreElement,
fps: document.getElementById('status') as HTMLPreElement,
perf: document.getElementById('performance') as HTMLDivElement,
};
const timestamp = { detect: 0, draw: 0, tensors: 0, start: 0 }; // holds information used to calculate performance and possible memory leaks
const fps = { detectFPS: 0, drawFPS: 0, frames: 0, averageMs: 0 }; // holds calculated fps information for both detect and screen refresh
const log = (...msg) => { // helper method to output messages
dom.log.innerText += msg.join(' ') + '\n';
console.log(...msg); // eslint-disable-line no-console
};
const status = (msg) => dom.fps.innerText = msg; // print status element
const perf = (msg) => dom.perf.innerText = 'tensors:' + human.tf.memory().numTensors.toString() + ' | performance: ' + JSON.stringify(msg).replace(/"|{|}/g, '').replace(/,/g, ' | '); // print performance element
async function detectionLoop() { // main detection loop
if (!dom.video.paused) {
if (timestamp.start === 0) timestamp.start = human.now();
// log('profiling data:', await human.profile(dom.video));
await human.detect(dom.video); // actual detection; were not capturing output in a local variable as it can also be reached via human.result
const tensors = human.tf.memory().numTensors; // check current tensor usage for memory leaks
if (tensors - timestamp.tensors !== 0) log('allocated tensors:', tensors - timestamp.tensors); // printed on start and each time there is a tensor leak
timestamp.tensors = tensors;
fps.detectFPS = Math.round(1000 * 1000 / (human.now() - timestamp.detect)) / 1000;
fps.frames++;
fps.averageMs = Math.round(1000 * (human.now() - timestamp.start) / fps.frames) / 1000;
if (fps.frames % 100 === 0 && !dom.video.paused) log('performance', { ...fps, tensors: timestamp.tensors });
}
timestamp.detect = human.now();
requestAnimationFrame(detectionLoop); // start new frame immediately
}
async function drawLoop() { // main screen refresh loop
if (!dom.video.paused) {
const interpolated = human.next(human.result); // smoothen result using last-known results
const processed = await human.image(dom.video); // get current video frame, but enhanced with human.filters
human.draw.canvas(processed.canvas as HTMLCanvasElement, dom.canvas);
const opt: Partial<H.DrawOptions> = { bodyLabels: `person confidence [score] and ${human.result?.body?.[0]?.keypoints.length} keypoints` };
await human.draw.all(dom.canvas, interpolated, opt); // draw labels, boxes, lines, etc.
perf(interpolated.performance); // write performance data
}
const now = human.now();
fps.drawFPS = Math.round(1000 * 1000 / (now - timestamp.draw)) / 1000;
timestamp.draw = now;
status(dom.video.paused ? 'paused' : `fps: ${fps.detectFPS.toFixed(1).padStart(5, ' ')} detect | ${fps.drawFPS.toFixed(1).padStart(5, ' ')} draw`); // write status
setTimeout(drawLoop, 30); // use to slow down refresh from max refresh rate to target of 30 fps
}
async function webCam() {
const devices = await human.webcam.enumerate();
const id = devices[0].deviceId; // use first available video source
const webcamStatus = await human.webcam.start({ element: dom.video, crop: false, width, id }); // use human webcam helper methods and associate webcam stream with a dom element
log(webcamStatus);
dom.canvas.width = human.webcam.width;
dom.canvas.height = human.webcam.height;
dom.canvas.onclick = async () => { // pause when clicked on screen and resume on next click
if (human.webcam.paused) await human.webcam.play();
else human.webcam.pause();
};
}
async function main() { // main entry point
log('human version:', human.version, '| tfjs version:', human.tf.version['tfjs-core']);
log('platform:', human.env.platform, '| agent:', human.env.agent);
status('loading...');
await human.load(); // preload all models
log('backend:', human.tf.getBackend(), '| available:', human.env.backends);
log('models stats:', human.models.stats());
log('models loaded:', human.models.loaded());
log('environment', human.env);
status('initializing...');
await human.warmup(); // warmup function to initialize backend for future faster detection
await webCam(); // start webcam
await detectionLoop(); // start detection loop
await drawLoop(); // start draw loop
}
window.onload = main;

View File

@ -1,58 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Human</title>
<meta name="viewport" content="width=device-width" id="viewport">
<meta name="keywords" content="Human">
<meta name="description" content="Human: Demo; Author: Vladimir Mandic <https://github.com/vladmandic>">
<link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<style>
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
body { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; margin: 0; background: black; color: white; overflow: hidden; width: 100vw; height: 100vh; }
</style>
</head>
<body>
<canvas id="canvas" style="margin: 0 auto; width: 100%"></canvas>
<pre id="log" style="padding: 8px; position: fixed; bottom: 0"></pre>
<script type="module">
import * as H from '../../dist/human.esm.js'; // equivalent of import @vladmandic/Human
const humanConfig = { // user configuration for human, used to fine-tune behavior
modelBasePath: '../../models', // models can be loaded directly from cdn as well
filter: { enabled: true, equalization: true, flip: false },
face: { enabled: true, detector: { rotation: false }, mesh: { enabled: true }, attention: { enabled: false }, iris: { enabled: true }, description: { enabled: true }, emotion: { enabled: true } },
body: { enabled: true },
hand: { enabled: true },
gesture: { enabled: true },
object: { enabled: false },
segmentation: { enabled: false },
};
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
const canvas = document.getElementById('canvas'); // output canvas to draw both webcam and detection results
async function drawLoop() { // main screen refresh loop
const interpolated = human.next(); // get smoothened result using last-known results which are continously updated based on input webcam video
human.draw.canvas(human.webcam.element, canvas); // draw webcam video to screen canvas // better than using procesed image as this loop happens faster than processing loop
await human.draw.all(canvas, interpolated); // draw labels, boxes, lines, etc.
setTimeout(drawLoop, 30); // use to slow down refresh from max refresh rate to target of 1000/30 ~ 30 fps
}
async function main() { // main entry point
document.getElementById('log').innerHTML = `human version: ${human.version} | tfjs version: ${human.tf.version['tfjs-core']}<br>platform: ${human.env.platform} | agent ${human.env.agent}`;
await human.webcam.start({ crop: true }); // find webcam and start it
human.video(human.webcam.element); // instruct human to continously detect video frames
canvas.width = human.webcam.width; // set canvas resolution to input webcam native resolution
canvas.height = human.webcam.height;
canvas.onclick = async () => { // pause when clicked on screen and resume on next click
if (human.webcam.paused) await human.webcam.play();
else human.webcam.pause();
};
await drawLoop(); // start draw loop
}
window.onload = main;
</script>
</body>
</html>

View File

@ -18,13 +18,16 @@
html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
body { margin: 0; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
body::-webkit-scrollbar { display: none; }
.status { position: absolute; width: 100vw; bottom: 10%; text-align: center; font-size: 3rem; font-weight: 100; text-shadow: 2px 2px #303030; }
.log { position: absolute; bottom: 0; margin: 0.4rem 0.4rem 0 0.4rem; font-size: 0.9rem; }
.video { display: none; }
.canvas { margin: 0 auto; }
</style>
</head>
<body>
<canvas id="canvas" style="margin: 0 auto; width: 100vw"></canvas>
<video id="video" playsinline style="display: none"></video>
<pre id="status" style="position: absolute; top: 12px; right: 20px; background-color: grey; padding: 8px; box-shadow: 2px 2px black"></pre>
<pre id="log" style="padding: 8px"></pre>
<div id="performance" style="position: absolute; bottom: 0; width: 100%; padding: 8px; font-size: 0.8rem;"></div>
<div id="status" class="status"></div>
<canvas id="canvas" class="canvas"></canvas>
<video id="video" playsinline class="video"></video>
<div id="log" class="log"></div>
</body>
</html>

277
demo/webgpu/index.js Normal file
View File

@ -0,0 +1,277 @@
/**
* Human demo for browsers
*
* @description Experimental Demo app for Human using WebGPU
*
*/
// @ts-nocheck // typescript checks disabled as this is pure javascript
import Human from '../../dist/human.esm.js';
import GLBench from '../helpers/gl-bench.js';
const workerJS = './worker.js';
const backend = 'webgpu';
const config = {
main: { // processes input and runs gesture analysis
warmup: 'none',
backend,
modelBasePath: '../../models/',
async: false,
filter: { enabled: true },
face: { enabled: false },
object: { enabled: false },
gesture: { enabled: true },
hand: { enabled: false },
body: { enabled: false },
segmentation: { enabled: false },
},
face: { // runs all face models
warmup: 'none',
backend,
modelBasePath: '../../models/',
async: false,
filter: { enabled: false },
face: { enabled: true,
detector: { return: false, rotation: false },
mesh: { enabled: true },
iris: { enabled: false },
description: { enabled: true },
emotion: { enabled: false },
},
object: { enabled: false },
gesture: { enabled: false },
hand: { enabled: false },
body: { enabled: false },
segmentation: { enabled: false },
},
body: { // runs body model
warmup: 'none',
backend,
modelBasePath: '../../models/',
async: false,
filter: { enabled: false },
face: { enabled: false },
object: { enabled: false },
gesture: { enabled: false },
hand: { enabled: false },
body: { enabled: true },
segmentation: { enabled: false },
},
hand: { // runs hands model
warmup: 'none',
backend,
modelBasePath: '../../models/',
async: false,
filter: { enabled: false },
face: { enabled: false },
object: { enabled: false },
gesture: { enabled: false },
hand: { enabled: true, rotation: false },
body: { enabled: false },
segmentation: { enabled: false },
},
object: { // runs object model
warmup: 'none',
backend,
modelBasePath: '../../models/',
async: false,
filter: { enabled: false },
face: { enabled: false },
object: { enabled: false },
gesture: { enabled: false },
hand: { enabled: false },
body: { enabled: false },
segmentation: { enabled: false },
},
};
let human;
let canvas;
let video;
let bench;
const busy = {
face: false,
hand: false,
body: false,
object: false,
};
const workers = {
face: null,
body: null,
hand: null,
object: null,
};
const time = {
main: 0,
draw: 0,
face: '[warmup]',
body: '[warmup]',
hand: '[warmup]',
object: '[warmup]',
};
const start = {
main: 0,
draw: 0,
face: 0,
body: 0,
hand: 0,
object: 0,
};
const result = { // initialize empty result object which will be partially filled with results from each thread
performance: {},
hand: [],
body: [],
face: [],
object: [],
};
function log(...msg) {
const dt = new Date();
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
// eslint-disable-next-line no-console
console.log(ts, ...msg);
}
async function drawResults() {
start.draw = performance.now();
const interpolated = human.next(result);
await human.draw.all(canvas, interpolated);
time.draw = Math.round(1 + performance.now() - start.draw);
const fps = Math.round(10 * 1000 / time.main) / 10;
const draw = Math.round(10 * 1000 / time.draw) / 10;
document.getElementById('log').innerText = `Human: version ${human.version} | Performance: Main ${time.main}ms Face: ${time.face}ms Body: ${time.body}ms Hand: ${time.hand}ms Object ${time.object}ms | FPS: ${fps} / ${draw}`;
requestAnimationFrame(drawResults);
}
async function receiveMessage(msg) {
result[msg.data.type] = msg.data.result;
busy[msg.data.type] = false;
time[msg.data.type] = Math.round(performance.now() - start[msg.data.type]);
}
async function runDetection() {
start.main = performance.now();
if (!bench) {
bench = new GLBench(null, { trackGPU: false, chartHz: 20, chartLen: 20 });
bench.begin();
}
const ctx = canvas.getContext('2d');
// const image = await human.image(video);
// ctx.drawImage(image.canvas, 0, 0, canvas.width, canvas.height);
ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
if (!busy.face) {
busy.face = true;
start.face = performance.now();
workers.face.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.face, type: 'face' }, [imageData.data.buffer.slice(0)]);
}
if (!busy.body) {
busy.body = true;
start.body = performance.now();
workers.body.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.body, type: 'body' }, [imageData.data.buffer.slice(0)]);
}
if (!busy.hand) {
busy.hand = true;
start.hand = performance.now();
workers.hand.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.hand, type: 'hand' }, [imageData.data.buffer.slice(0)]);
}
if (!busy.object) {
busy.object = true;
start.object = performance.now();
workers.object.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.object, type: 'object' }, [imageData.data.buffer.slice(0)]);
}
time.main = Math.round(performance.now() - start.main);
bench.nextFrame();
requestAnimationFrame(runDetection);
}
async function setupCamera() {
video = document.getElementById('video');
canvas = document.getElementById('canvas');
const output = document.getElementById('log');
let stream;
const constraints = {
audio: false,
video: {
facingMode: 'user',
resizeMode: 'crop-and-scale',
width: { ideal: document.body.clientWidth },
// height: { ideal: document.body.clientHeight }, // not set as we're using aspectRation to get height instead
aspectRatio: document.body.clientWidth / document.body.clientHeight,
},
};
// enumerate devices for diag purposes
navigator.mediaDevices.enumerateDevices().then((devices) => log('enumerated devices:', devices));
log('camera constraints', constraints);
try {
stream = await navigator.mediaDevices.getUserMedia(constraints);
} catch (err) {
output.innerText += `\n${err.name}: ${err.message}`;
status(err.name);
log('camera error:', err);
}
const tracks = stream.getVideoTracks();
log('enumerated viable tracks:', tracks);
const track = stream.getVideoTracks()[0];
const settings = track.getSettings();
log('selected video source:', track, settings);
const promise = !stream || new Promise((resolve) => {
video.onloadeddata = () => {
if (settings.width > settings.height) canvas.style.width = '100vw';
else canvas.style.height = '100vh';
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
video.play();
resolve();
};
});
// attach input to video element
if (stream) video.srcObject = stream;
return promise;
}
async function startWorkers() {
if (!workers.face) workers.face = new Worker(workerJS);
if (!workers.body) workers.body = new Worker(workerJS);
if (!workers.hand) workers.hand = new Worker(workerJS);
if (!workers.object) workers.object = new Worker(workerJS);
workers.face.onmessage = receiveMessage;
workers.body.onmessage = receiveMessage;
workers.hand.onmessage = receiveMessage;
workers.object.onmessage = receiveMessage;
}
async function main() {
window.addEventListener('unhandledrejection', (evt) => {
// eslint-disable-next-line no-console
console.error(evt.reason || evt);
document.getElementById('log').innerHTML = evt.reason.message || evt.reason || evt;
status('exception error');
evt.preventDefault();
});
if (typeof Worker === 'undefined' || typeof OffscreenCanvas === 'undefined') {
status('workers are not supported');
return;
}
human = new Human(config.main);
// human.tf.env().set('WEBGPU_USE_GLSL', false);
document.getElementById('log').innerText = `Human: version ${human.version}`;
await startWorkers();
await setupCamera();
runDetection();
drawResults();
}
window.onload = main;

21
demo/webgpu/worker.js Normal file
View File

@ -0,0 +1,21 @@
/// <reference lib="webworker" />
// import Human from '../../dist/human.esm'; // load Human using IIFE script as Chome Mobile does not support Modules as Workers
self.importScripts('../../assets/tf.es2017.js');
self.importScripts('../../assets/tf-backend-webgpu.es2017.js');
self.importScripts('../../dist/human.js');
let human;
onmessage = async (msg) => {
// received from index.js using:
// worker.postMessage({ image: image.data.buffer, width: canvas.width, height: canvas.height, config }, [image.data.buffer]);
// @ts-ignore // Human is registered as global namespace using IIFE script
// eslint-disable-next-line no-undef, new-cap
if (!human) human = new Human.default(msg.data.config);
const image = new ImageData(new Uint8ClampedArray(msg.data.image), msg.data.width, msg.data.height);
let result = {};
result = await human.detect(image, msg.data.config);
postMessage({ result: result[msg.data.type], type: msg.data.type });
};

Some files were not shown because too many files have changed in this diff Show More