Compare commits

...

No commits in common. "2.3.2" and "main" have entirely different histories.
2.3.2 ... main

757 changed files with 86953 additions and 204947 deletions

27
.api-extractor.json Normal file
View File

@ -0,0 +1,27 @@
{
"$schema": "https://developer.microsoft.com/json-schemas/api-extractor/v7/api-extractor.schema.json",
"mainEntryPointFilePath": "types/lib/src/human.d.ts",
"compiler": {
"skipLibCheck": true
},
"newlineKind": "lf",
"dtsRollup": {
"enabled": true,
"untrimmedFilePath": "types/human.d.ts"
},
"docModel": { "enabled": false },
"tsdocMetadata": { "enabled": false },
"apiReport": { "enabled": false },
"messages": {
"compilerMessageReporting": {
"default": { "logLevel": "warning" }
},
"extractorMessageReporting": {
"default": { "logLevel": "warning" },
"ae-missing-release-tag": { "logLevel": "none" }
},
"tsdocMessageReporting": {
"default": { "logLevel": "warning" }
}
}
}

View File

@ -8,13 +8,14 @@
"profiles": { "profiles": {
"production": ["clean", "compile", "typings", "typedoc", "lint", "changelog"], "production": ["clean", "compile", "typings", "typedoc", "lint", "changelog"],
"development": ["serve", "watch", "compile"], "development": ["serve", "watch", "compile"],
"serve": ["serve"] "serve": ["serve"],
"clean": ["clean"]
}, },
"clean": { "clean": {
"locations": ["dist/*", "types/*", "typedoc/*"] "locations": ["dist/*", "types/*", "typedoc/*"]
}, },
"lint": { "lint": {
"locations": [ "*.json", "src/**/*.ts", "test/**/*.js", "demo/**/*.js" ], "locations": [ "**/*.json", "src/**/*.ts", "test/**/*.js", "demo/**/*.js", "**/*.md" ],
"rules": { } "rules": { }
}, },
"changelog": { "changelog": {
@ -23,8 +24,8 @@
"serve": { "serve": {
"sslKey": "node_modules/@vladmandic/build/cert/https.key", "sslKey": "node_modules/@vladmandic/build/cert/https.key",
"sslCrt": "node_modules/@vladmandic/build/cert/https.crt", "sslCrt": "node_modules/@vladmandic/build/cert/https.crt",
"httpPort": 10030, "httpPort": 8000,
"httpsPort": 10031, "httpsPort": 8001,
"documentRoot": ".", "documentRoot": ".",
"defaultFolder": "demo", "defaultFolder": "demo",
"defaultFile": "index.html" "defaultFile": "index.html"
@ -33,9 +34,18 @@
"global": { "global": {
"target": "es2018", "target": "es2018",
"sourcemap": false, "sourcemap": false,
"treeShaking": true,
"ignoreAnnotations": true,
"banner": { "js": "/*\n Human\n homepage: <https://github.com/vladmandic/human>\n author: <https://github.com/vladmandic>'\n*/\n" } "banner": { "js": "/*\n Human\n homepage: <https://github.com/vladmandic/human>\n author: <https://github.com/vladmandic>'\n*/\n" }
}, },
"targets": [ "targets": [
{
"name": "tfjs/browser/version",
"platform": "browser",
"format": "esm",
"input": "tfjs/tf-version.ts",
"output": "dist/tfjs.version.js"
},
{ {
"name": "tfjs/nodejs/cpu", "name": "tfjs/nodejs/cpu",
"platform": "node", "platform": "node",
@ -74,6 +84,7 @@
"format": "cjs", "format": "cjs",
"input": "tfjs/tf-node-wasm.ts", "input": "tfjs/tf-node-wasm.ts",
"output": "dist/tfjs.esm.js", "output": "dist/tfjs.esm.js",
"minify": false,
"external": ["@tensorflow"] "external": ["@tensorflow"]
}, },
{ {
@ -84,21 +95,13 @@
"output": "dist/human.node-wasm.js", "output": "dist/human.node-wasm.js",
"external": ["@tensorflow"] "external": ["@tensorflow"]
}, },
{
"name": "tfjs/browser/version",
"platform": "browser",
"format": "esm",
"input": "tfjs/tf-version.ts",
"output": "dist/tfjs.version.js",
"external": ["fs", "os", "buffer", "util"]
},
{ {
"name": "tfjs/browser/esm/nobundle", "name": "tfjs/browser/esm/nobundle",
"platform": "browser", "platform": "browser",
"format": "esm", "format": "esm",
"input": "tfjs/tf-browser.ts", "input": "tfjs/tf-browser.ts",
"output": "dist/tfjs.esm.js", "output": "dist/tfjs.esm.js",
"external": ["@tensorflow", "fs", "os", "buffer", "util"] "external": ["@tensorflow"]
}, },
{ {
"name": "human/browser/esm/nobundle", "name": "human/browser/esm/nobundle",
@ -106,8 +109,8 @@
"format": "esm", "format": "esm",
"input": "src/human.ts", "input": "src/human.ts",
"output": "dist/human.esm-nobundle.js", "output": "dist/human.esm-nobundle.js",
"sourcemap": true, "sourcemap": false,
"external": ["@tensorflow", "fs", "os", "buffer", "util"] "external": ["@tensorflow"]
}, },
{ {
"name": "tfjs/browser/esm/bundle", "name": "tfjs/browser/esm/bundle",
@ -115,8 +118,8 @@
"format": "esm", "format": "esm",
"input": "tfjs/tf-browser.ts", "input": "tfjs/tf-browser.ts",
"output": "dist/tfjs.esm.js", "output": "dist/tfjs.esm.js",
"sourcemap": true, "sourcemap": false,
"external": ["fs", "os", "buffer", "util"] "minify": true
}, },
{ {
"name": "human/browser/iife/bundle", "name": "human/browser/iife/bundle",
@ -126,7 +129,7 @@
"output": "dist/human.js", "output": "dist/human.js",
"minify": true, "minify": true,
"globalName": "Human", "globalName": "Human",
"external": ["fs", "os", "buffer", "util"] "external": ["@tensorflow"]
}, },
{ {
"name": "human/browser/esm/bundle", "name": "human/browser/esm/bundle",
@ -135,14 +138,42 @@
"input": "src/human.ts", "input": "src/human.ts",
"output": "dist/human.esm.js", "output": "dist/human.esm.js",
"sourcemap": true, "sourcemap": true,
"external": ["fs", "os", "buffer", "util"], "minify": false,
"typings": "types", "external": ["@tensorflow"],
"typings": "types/lib",
"typedoc": "typedoc" "typedoc": "typedoc"
},
{
"name": "demo/typescript",
"platform": "browser",
"format": "esm",
"input": "demo/typescript/index.ts",
"output": "demo/typescript/index.js",
"sourcemap": true,
"external": ["*/human.esm.js"]
},
{
"name": "demo/faceid",
"platform": "browser",
"format": "esm",
"input": "demo/faceid/index.ts",
"output": "demo/faceid/index.js",
"sourcemap": true,
"external": ["*/human.esm.js"]
},
{
"name": "demo/tracker",
"platform": "browser",
"format": "esm",
"input": "demo/tracker/index.ts",
"output": "demo/tracker/index.js",
"sourcemap": true,
"external": ["*/human.esm.js"]
} }
] ]
}, },
"watch": { "watch": {
"locations": [ "src/**/*", "tfjs/**/*" ] "locations": [ "src/**/*", "tfjs/**/*", "demo/**/*.ts" ]
}, },
"typescript": { "typescript": {
"allowJs": false "allowJs": false

View File

@ -1,84 +1,221 @@
{ {
"globals": {}, "globals": {
},
"rules": {
"@typescript-eslint/no-require-imports":"off"
},
"overrides": [
{
"files": ["**/*.ts"],
"parser": "@typescript-eslint/parser",
"parserOptions": { "ecmaVersion": "latest", "project": ["./tsconfig.json"] },
"plugins": ["@typescript-eslint"],
"env": {
"browser": true,
"commonjs": false,
"node": false,
"es2021": true
},
"extends": [
"airbnb-base",
"eslint:recommended",
"plugin:@typescript-eslint/eslint-recommended",
"plugin:@typescript-eslint/recommended",
"plugin:@typescript-eslint/recommended-requiring-type-checking",
"plugin:@typescript-eslint/strict",
"plugin:import/recommended",
"plugin:promise/recommended"
],
"rules": {
"@typescript-eslint/ban-ts-comment":"off",
"@typescript-eslint/dot-notation":"off",
"@typescript-eslint/no-empty-interface":"off",
"@typescript-eslint/no-inferrable-types":"off",
"@typescript-eslint/no-misused-promises":"off",
"@typescript-eslint/no-unnecessary-condition":"off",
"@typescript-eslint/no-unsafe-argument":"off",
"@typescript-eslint/no-unsafe-assignment":"off",
"@typescript-eslint/no-unsafe-call":"off",
"@typescript-eslint/no-unsafe-member-access":"off",
"@typescript-eslint/no-unsafe-return":"off",
"@typescript-eslint/no-require-imports":"off",
"@typescript-eslint/no-empty-object-type":"off",
"@typescript-eslint/non-nullable-type-assertion-style":"off",
"@typescript-eslint/prefer-for-of":"off",
"@typescript-eslint/prefer-nullish-coalescing":"off",
"@typescript-eslint/prefer-ts-expect-error":"off",
"@typescript-eslint/restrict-plus-operands":"off",
"@typescript-eslint/restrict-template-expressions":"off",
"dot-notation":"off",
"guard-for-in":"off",
"import/extensions": ["off", "always"],
"import/no-unresolved":"off",
"import/prefer-default-export":"off",
"lines-between-class-members":"off",
"max-len": [1, 275, 3],
"no-async-promise-executor":"off",
"no-await-in-loop":"off",
"no-bitwise":"off",
"no-continue":"off",
"no-lonely-if":"off",
"no-mixed-operators":"off",
"no-param-reassign":"off",
"no-plusplus":"off",
"no-regex-spaces":"off",
"no-restricted-syntax":"off",
"no-return-assign":"off",
"no-void":"off",
"object-curly-newline":"off",
"prefer-destructuring":"off",
"prefer-template":"off",
"radix":"off"
}
},
{
"files": ["**/*.d.ts"],
"parser": "@typescript-eslint/parser",
"parserOptions": { "ecmaVersion": "latest", "project": ["./tsconfig.json"] },
"plugins": ["@typescript-eslint"],
"env": {
"browser": true,
"commonjs": false,
"node": false,
"es2021": true
},
"extends": [
"airbnb-base",
"eslint:recommended",
"plugin:@typescript-eslint/eslint-recommended",
"plugin:@typescript-eslint/recommended",
"plugin:@typescript-eslint/recommended-requiring-type-checking",
"plugin:@typescript-eslint/strict",
"plugin:import/recommended",
"plugin:promise/recommended"
],
"rules": {
"@typescript-eslint/array-type":"off",
"@typescript-eslint/ban-types":"off",
"@typescript-eslint/consistent-indexed-object-style":"off",
"@typescript-eslint/consistent-type-definitions":"off",
"@typescript-eslint/no-empty-interface":"off",
"@typescript-eslint/no-explicit-any":"off",
"@typescript-eslint/no-invalid-void-type":"off",
"@typescript-eslint/no-unnecessary-type-arguments":"off",
"@typescript-eslint/no-unnecessary-type-constraint":"off",
"comma-dangle":"off",
"indent":"off",
"lines-between-class-members":"off",
"max-classes-per-file":"off",
"max-len":"off",
"no-multiple-empty-lines":"off",
"no-shadow":"off",
"no-use-before-define":"off",
"quotes":"off",
"semi":"off"
}
},
{
"files": ["**/*.js"],
"parserOptions": { "sourceType": "module", "ecmaVersion": "latest" },
"plugins": [],
"env": { "env": {
"browser": true, "browser": true,
"commonjs": true, "commonjs": true,
"node": true, "node": true,
"es2021": true "es2021": true
}, },
"parser": "@typescript-eslint/parser",
"parserOptions": {
"ecmaVersion": 2021
},
"plugins": [
"@typescript-eslint"
],
"extends": [ "extends": [
"airbnb-base", "airbnb-base",
"eslint:recommended", "eslint:recommended",
"plugin:@typescript-eslint/eslint-recommended",
"plugin:@typescript-eslint/recommended",
"plugin:import/errors",
"plugin:import/warnings",
"plugin:json/recommended-with-comments",
"plugin:node/recommended", "plugin:node/recommended",
"plugin:promise/recommended" "plugin:promise/recommended"
], ],
"ignorePatterns": [
"assets",
"demo/helpers",
"dist",
"media",
"models",
"node_modules"
],
"rules": { "rules": {
"@typescript-eslint/ban-ts-comment": "off", "dot-notation":"off",
"@typescript-eslint/explicit-module-boundary-types": "off", "import/extensions": ["error", "always"],
"@typescript-eslint/no-shadow": "error", "import/no-extraneous-dependencies":"off",
"@typescript-eslint/no-var-requires": "off",
"@typescript-eslint/triple-slash-reference": "off",
"@typescript-eslint/no-inferrable-types": "off",
"camelcase": "off",
"dot-notation": "off",
"func-names": "off",
"guard-for-in": "off",
"import/extensions": "off",
"import/no-extraneous-dependencies": "off",
"import/no-named-as-default": "off",
"import/no-unresolved": "off",
"import/prefer-default-export": "off",
"lines-between-class-members": "off",
"max-len": [1, 275, 3], "max-len": [1, 275, 3],
"newline-per-chained-call": "off", "no-await-in-loop":"off",
"no-async-promise-executor": "off", "no-bitwise":"off",
"no-await-in-loop": "off", "no-continue":"off",
"no-bitwise": "off", "no-mixed-operators":"off",
"no-case-declarations":"off",
"no-continue": "off",
"no-lonely-if": "off",
"no-loop-func": "off",
"no-mixed-operators": "off",
"no-param-reassign":"off", "no-param-reassign":"off",
"no-plusplus": "off", "no-plusplus":"off",
"no-process-exit": "off", "no-regex-spaces":"off",
"no-regex-spaces": "off", "no-restricted-syntax":"off",
"no-restricted-globals": "off", "no-return-assign":"off",
"no-restricted-syntax": "off", "node/no-unsupported-features/es-syntax":"off",
"no-return-assign": "off", "object-curly-newline":"off",
"no-shadow": "off", "prefer-destructuring":"off",
"no-underscore-dangle": "off",
"node/no-missing-import": ["error", { "tryExtensions": [".js", ".json", ".ts"] }],
"node/no-unpublished-import": "off",
"node/no-unpublished-require": "off",
"node/no-unsupported-features/es-syntax": "off",
"node/shebang": "off",
"object-curly-newline": "off",
"prefer-destructuring": "off",
"prefer-template":"off", "prefer-template":"off",
"promise/always-return": "off", "radix":"off"
"promise/catch-or-return": "off",
"promise/no-nesting": "off",
"radix": "off"
} }
},
{
"files": ["**/*.json"],
"parserOptions": { "ecmaVersion": "latest" },
"plugins": ["json"],
"env": {
"browser": false,
"commonjs": false,
"node": false,
"es2021": false
},
"extends": []
},
{
"files": ["**/*.html"],
"parserOptions": { "sourceType": "module", "ecmaVersion": "latest" },
"parser": "@html-eslint/parser",
"plugins": ["html", "@html-eslint"],
"env": {
"browser": true,
"commonjs": false,
"node": false,
"es2021": false
},
"extends": ["plugin:@html-eslint/recommended"],
"rules": {
"@html-eslint/element-newline":"off",
"@html-eslint/attrs-newline":"off",
"@html-eslint/indent": ["error", 2]
}
},
{
"files": ["**/*.md"],
"plugins": ["markdown"],
"processor": "markdown/markdown",
"rules": {
"no-undef":"off"
}
},
{
"files": ["**/*.md/*.js"],
"rules": {
"@typescript-eslint/no-unused-vars":"off",
"@typescript-eslint/triple-slash-reference":"off",
"import/newline-after-import":"off",
"import/no-unresolved":"off",
"no-console":"off",
"no-global-assign":"off",
"no-multi-spaces":"off",
"no-restricted-globals":"off",
"no-undef":"off",
"no-unused-vars":"off",
"node/no-missing-import":"off",
"node/no-missing-require":"off",
"promise/catch-or-return":"off"
}
}
],
"ignorePatterns": [
"node_modules",
"assets",
"dist",
"demo/helpers/*.js",
"demo/typescript/*.js",
"demo/faceid/*.js",
"demo/tracker/*.js",
"typedoc"
]
} }

11
.github/FUNDING.yml vendored Normal file
View File

@ -0,0 +1,11 @@
github: [vladmandic]
patreon: # Replace with a single Patreon username
open_collective: # Replace with a single Open Collective username
ko_fi: # Replace with a single Ko-fi username
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']

9
.gitignore vendored
View File

@ -1,4 +1,9 @@
node_modules node_modules/
types/lib
pnpm-lock.yaml pnpm-lock.yaml
assets/tf* package-lock.json
*.swp *.swp
samples/**/*.mp4
samples/**/*.webm
temp
tmp

View File

@ -5,7 +5,7 @@
"browserslist": [ "browserslist": [
"chrome >= 90", "chrome >= 90",
"edge >= 90", "edge >= 90",
"firefox >= 90", "firefox >= 100",
"android >= 90", "android >= 90",
"safari >= 15" "safari >= 15"
], ],

View File

@ -1,6 +1,7 @@
{ {
"MD012": false, "MD012": false,
"MD013": false, "MD013": false,
"MD029": false,
"MD033": false, "MD033": false,
"MD036": false, "MD036": false,
"MD041": false "MD041": false

View File

@ -4,5 +4,4 @@ samples
typedoc typedoc
test test
wiki wiki
dist/tfjs.esm.js types/lib
dist/tfjs.esm.js.map

6
.npmrc
View File

@ -1 +1,5 @@
force = true force=true
omit=dev
legacy-peer-deps=true
strict-peer-dependencies=false
node-options='--no-deprecation'

10
.vscode/settings.json vendored Normal file
View File

@ -0,0 +1,10 @@
{
"search.exclude": {
"dist/*": true,
"node_modules/*": true,
"types": true,
"typedoc": true,
},
"search.useGlobalIgnoreFiles": true,
"search.useParentIgnoreFiles": true
}

View File

@ -1,23 +1,389 @@
# packageJson # @vladmandic/human
Version: **undefined** Version: **3.3.5**
Description: **undefined** Description: **Human: AI-powered 3D Face Detection & Rotation Tracking, Face Description & Recognition, Body Pose Tracking, 3D Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction, Gesture Recognition**
Author: **undefined** Author: **Vladimir Mandic <mandic00@live.com>**
License: **undefined** License: **MIT**
Repository: **<https://github.com/vladmandic/human>** Repository: **<https://github.com/vladmandic/human>**
## Changelog ## Changelog
### **HEAD -> main** 2021/10/10 mandic00@live.com ### **3.3.5** 2025/02/05 mandic00@live.com
### **origin/main** 2024/10/24 mandic00@live.com
- add human.draw.tensor method
### **3.3.4** 2024/10/24 mandic00@live.com
### **3.3.3** 2024/10/14 mandic00@live.com
- add loaded property to model stats and mark models not loaded correctly.
- release build
### **3.3.2** 2024/09/11 mandic00@live.com
- full rebuild
### **3.3.1** 2024/09/11 mandic00@live.com
- add config.face.detector.square option
- human 3.3 alpha test run
- human 3.3 alpha with new build environment
- release rebuild
- fix flazeface tensor scale and update build platform
### **3.2.2** 2024/04/17 mandic00@live.com
### **release: 3.2.1** 2024/02/15 mandic00@live.com
### **3.2.1** 2024/02/15 mandic00@live.com
### **3.2.0** 2023/12/06 mandic00@live.com
- set browser false when navigator object is empty
- https://github.com/vladmandic/human/issues/402
### **release: 3.1.2** 2023/09/18 mandic00@live.com
- full rebuild
### **3.1.2** 2023/09/18 mandic00@live.com
- major toolkit upgrade
- full rebuild
- major toolkit upgrade
### **3.1.1** 2023/08/05 mandic00@live.com
- fixes plus tfjs upgrade for new release
### **3.0.7** 2023/06/12 mandic00@live.com
- full rebuild
- fix memory leak in histogramequalization
- initial work on tracker
### **3.0.6** 2023/03/21 mandic00@live.com
- add optional crop to multiple models
- fix movenet-multipose
- add electron detection
- fix gender-ssrnet-imdb
- add movenet-multipose workaround
- rebuild and publish
- add face.detector.minsize configurable setting
- add affectnet
### **3.0.5** 2023/02/02 mandic00@live.com
- add gear-e models
- detect react-native
- redo blazeface annotations
### **3.0.4** 2023/01/29 mandic00@live.com
- make naviator calls safe
- fix facedetector-only configs
### **3.0.3** 2023/01/07 mandic00@live.com
- full rebuild
### **3.0.2** 2023/01/06 mandic00@live.com
- default face.rotation disabled
### **release: 3.0.1** 2022/11/22 mandic00@live.com
### **3.0.1** 2022/11/22 mandic00@live.com
- support dynamic loads
- polish demos
- add facedetect demo and fix model async load
- enforce markdown linting
- cleanup git history
- default empty result
- refactor draw and models namespaces
- refactor distance
- add basic anthropometry
- added webcam id specification
- include external typedefs
- prepare external typedefs
- rebuild all
- include project files for types
- architectural improvements
- refresh dependencies
- add named exports
- add draw label templates
- reduce dev dependencies
- tensor rank strong typechecks
- rebuild dependencies
### **2.11.1** 2022/10/09 mandic00@live.com
- add rvm segmentation model
- add human.webcam methods
- create funding.yml
- fix rotation interpolation
### **2.10.3** 2022/09/21 mandic00@live.com
- add human.video method
### **2.10.2** 2022/09/11 mandic00@live.com
- add node.js esm compatibility (#292)
- release
### **2.10.1** 2022/09/07 mandic00@live.com
- release candidate
- add config flags
- test update
- release preview
- optimize startup sequence
- reorder backend init code
- test embedding
- embedding test
- add browser iife tests
- minor bug fixes and increased test coverage
- extend release tests
- add model load exception handling
- add softwarekernels config option
- expand type safety
- full eslint rule rewrite
### **2.9.4** 2022/08/20 mandic00@live.com
- add browser test
- add tensorflow library detection
- fix wasm detection
- enumerate additional models
- release refresh
### **2.9.3** 2022/08/10 mandic00@live.com
- rehault testing framework
- release refresh
- add insightface
### **2.9.2** 2022/08/08 mandic00@live.com
- release rebuild
### **2.9.1** 2022/07/25 mandic00@live.com
- full rebuild
- release cleanup
- tflite experiments
- add load monitor test
- beta for upcoming major release
- swtich to release version of tfjs
- placeholder for face contours
- improve face compare in main demo
- add webview support
- fix(gear): ensure gear.modelpath is used for loadmodel()
- npm default install should be prod only
- fix npm v7 compatibility
- add getmodelstats method
- rebuild
- release build
### **2.8.1** 2022/06/08 mandic00@live.com
- webgpu and wasm optimizations
- add faceboxes prototype
- full rebuild
### **2.7.4** 2022/05/24 mandic00@live.com
### **2.7.3** 2022/05/24 mandic00@live.com
- add face.mesh.keepinvalid config flag
- initial work for new facemesh model
### **2.7.2** 2022/05/12 mandic00@live.com
- fix demo when used with video files
- major release
### **2.7.1** 2022/05/09 mandic00@live.com
- support 4k input
- add attention draw methods
- fix coloring function
- enable precompile as part of warmup
- prepare release beta
- change default face crop
- beta release 2.7
- refactor draw methods
- implement face attention model
- add electronjs demo
- rebuild
### **2.6.5** 2022/04/01 mandic00@live.com
- bundle offscreencanvas types
- prototype precompile pass
- fix changelog generation
- fix indexdb config check
### **2.6.4** 2022/02/27 mandic00@live.com
- fix types typo
- refresh
- add config option wasmplatformfetch
### **2.6.3** 2022/02/10 mandic00@live.com
- rebuild
### **2.6.2** 2022/02/07 mandic00@live.com
- release rebuild
### **2.6.1** 2022/01/20 mandic00@live.com
- implement model caching using indexdb
- prototype global fetch handler
- fix face box and hand tracking when in front of face
### **2.5.8** 2022/01/14 mandic00@live.com
- fix samples
- fix(src): typo
- change on how face box is calculated
### **2.5.7** 2021/12/27 mandic00@live.com
- fix posenet
- release refresh
### **2.5.6** 2021/12/15 mandic00@live.com
- strong type for string enums
- rebuild
- fix node detection in electron environment
### **2.5.5** 2021/12/01 mandic00@live.com
- added human-motion
- add offscreencanvas typedefs
- release preview
- fix face box scaling on detection
- cleanup
### **2.5.4** 2021/11/22 mandic00@live.com
- prototype blazepose detector
- minor fixes
- add body 3d interpolation
- edit blazepose keypoints
- new build process
### **2.5.3** 2021/11/18 mandic00@live.com
- create typedef rollup
- optimize centernet
- cache frequent tf constants
- add extra face rotation prior to mesh
- release 2.5.2
- improve error handling
### **2.5.2** 2021/11/14 mandic00@live.com
- fix mobilefacenet module
- fix gear and ssrnet modules
- fix for face crop when mesh is disabled
- implement optional face masking
- add similarity score range normalization
- add faceid demo
- documentation overhaul
- auto tensor shape and channels handling
- disable use of path2d in node
- add liveness module and facerecognition demo
- initial version of facerecognition demo
- rebuild
- add type defs when working with relative path imports
- disable humangl backend if webgl 1.0 is detected
- add additional hand gestures
### **2.5.1** 2021/11/08 mandic00@live.com
- new human.compare api
- added links to release notes
- new frame change detection algorithm
- add histogram equalization
- implement wasm missing ops
- performance and memory optimizations
- fix react compatibility issues
- improve box rescaling for all modules
- improve precision using wasm backend
- refactor predict with execute
- patch tfjs type defs
- start 2.5 major version
- build and docs cleanup
- fix firefox bug
### **2.4.3** 2021/10/28 mandic00@live.com
- additional human.performance counters
### **2.4.2** 2021/10/27 mandic00@live.com
- add ts demo
- switch from es2018 to es2020 for main build
- switch to custom tfjs for demos
- release 2.4
### **2.4.1** 2021/10/25 mandic00@live.com
- refactoring plus jsdoc comments
- increase face similarity match resolution
- time based caching
- turn on minification
- initial work on skiptime
- added generic types
- enhanced typing exports
- add optional autodetected custom wasm path
### **2.3.6** 2021/10/21 mandic00@live.com
- fix for human.draw labels and typedefs
- refactor human.env to a class type
- add human.custom.esm using custom tfjs build
### **2.3.5** 2021/10/19 mandic00@live.com
- removed direct usage of performance.now
### **2.3.4** 2021/10/19 mandic00@live.com
- minor blazepose optimizations
- compress samples
- remove posenet from default package
- enhanced movenet postprocessing
- use transferrable buffer for worker messages
- add optional anti-spoofing module
- add node-match advanced example using worker thread pool
- package updates
- optimize image preprocessing
- set webgpu optimized flags
- major precision improvements to movenet and handtrack
- image processing fixes - image processing fixes
- redesign body and hand caching and interpolation - redesign body and hand caching and interpolation
- demo default config cleanup - demo default config cleanup
- improve gaze and face angle visualizations in draw - improve gaze and face angle visualizations in draw
- release 2.3.1
### **release 2.3.1** 2021/10/06 mandic00@live.com
### **2.3.1** 2021/10/06 mandic00@live.com ### **2.3.1** 2021/10/06 mandic00@live.com
@ -27,7 +393,6 @@
- fix backend order initialization - fix backend order initialization
- added docker notes - added docker notes
- breaking change: new similarity and match methods - breaking change: new similarity and match methods
- release candidate
- tweaked default values - tweaked default values
- enable handtrack as default model - enable handtrack as default model
- redesign face processing - redesign face processing
@ -57,9 +422,7 @@
### **2.2.2** 2021/09/17 mandic00@live.com ### **2.2.2** 2021/09/17 mandic00@live.com
- experimental webgl status monitoring - experimental webgl status monitoring
- major release
### **release: 2.2.1** 2021/09/16 mandic00@live.com
### **2.2.1** 2021/09/16 mandic00@live.com ### **2.2.1** 2021/09/16 mandic00@live.com
@ -88,8 +451,6 @@
- implement event emitters - implement event emitters
- fix iife loader - fix iife loader
- simplify dependencies - simplify dependencies
- fix file permissions
- remove old build server
- change build process - change build process
- add benchmark info - add benchmark info
- simplify canvas handling in nodejs - simplify canvas handling in nodejs
@ -132,7 +493,6 @@
### **2.1.1** 2021/07/29 mandic00@live.com ### **2.1.1** 2021/07/29 mandic00@live.com
- proposal #141
- add note on manually disping tensor - add note on manually disping tensor
- modularize model loading - modularize model loading
@ -146,9 +506,7 @@
- reorganize demos - reorganize demos
- fix centernet box width & height - fix centernet box width & height
- add body segmentation sample - add body segmentation sample
- add release notes
### **release: 2.0.1** 2021/06/08 mandic00@live.com
- release 2.0 - release 2.0
### **2.0.1** 2021/06/08 mandic00@live.com ### **2.0.1** 2021/06/08 mandic00@live.com
@ -177,7 +535,6 @@
- implemented human.next global interpolation method - implemented human.next global interpolation method
- finished draw buffering and smoothing and enabled by default - finished draw buffering and smoothing and enabled by default
- implemented service worker - implemented service worker
- quantized centernet
- release candidate - release candidate
- added usage restrictions - added usage restrictions
- quantize handdetect model - quantize handdetect model
@ -211,8 +568,6 @@
### **1.9.1** 2021/05/21 mandic00@live.com ### **1.9.1** 2021/05/21 mandic00@live.com
- caching improvements - caching improvements
- sanitize server input
- remove nanodet weights from default distribution
- add experimental mb3-centernet object detection - add experimental mb3-centernet object detection
- individual model skipframes values still max high threshold for caching - individual model skipframes values still max high threshold for caching
- config.videooptimized has been removed and config.cachesensitivity has been added instead - config.videooptimized has been removed and config.cachesensitivity has been added instead
@ -234,9 +589,7 @@
### **1.8.2** 2021/05/04 mandic00@live.com ### **1.8.2** 2021/05/04 mandic00@live.com
- release 1.8 with major changes and tfjs 3.6.0
### **release 1.8 with major changes and tfjs 3.6.0** 2021/04/30 mandic00@live.com
### **1.8.1** 2021/04/30 mandic00@live.com ### **1.8.1** 2021/04/30 mandic00@live.com
@ -270,7 +623,6 @@
- added filter.flip feature - added filter.flip feature
- added demo load image from http - added demo load image from http
- mobile demo optimization and iris gestures - mobile demo optimization and iris gestures
- full test run
- full rebuild - full rebuild
- new look - new look
- added benchmarks - added benchmarks
@ -380,7 +732,6 @@
- add experimental nanodet object detection - add experimental nanodet object detection
- full models signature - full models signature
- cleanup
### **1.1.7** 2021/03/16 mandic00@live.com ### **1.1.7** 2021/03/16 mandic00@live.com
@ -426,7 +777,6 @@
### **1.0.3** 2021/03/10 mandic00@live.com ### **1.0.3** 2021/03/10 mandic00@live.com
- strong typing for public classes and hide private classes - strong typing for public classes and hide private classes
- re-added blazeface-front
- enhanced age, gender, emotion detection - enhanced age, gender, emotion detection
- full rebuild - full rebuild
@ -435,151 +785,73 @@
- remove blazeface-front, blazepose-upper, faceboxes - remove blazeface-front, blazepose-upper, faceboxes
- remove blazeface-front and faceboxes - remove blazeface-front and faceboxes
### **release: 1.0.1** 2021/03/09 mandic00@live.com
### **1.0.1** 2021/03/09 mandic00@live.com ### **1.0.1** 2021/03/09 mandic00@live.com
- fix for face detector when mesh is disabled - fix for face detector when mesh is disabled
- optimize for npm - optimize for npm
- 0.40.9
### **0.40.9** 2021/03/08 mandic00@live.com
- fix performance issue when running with low confidence - fix performance issue when running with low confidence
- 0.40.8
### **0.40.8** 2021/03/08 mandic00@live.com - 0.40.7
### **0.40.7** 2021/03/06 mandic00@live.com
- implemented 3d face angle calculations - implemented 3d face angle calculations
- 0.40.6
### **0.40.6** 2021/03/06 mandic00@live.com
- add curve draw output - add curve draw output
- 0.40.5
### **0.40.5** 2021/03/05 mandic00@live.com
- fix human.draw - fix human.draw
- 0.40.4
### **0.40.4** 2021/03/05 mandic00@live.com
- cleanup blazepose code
- fix demo - fix demo
- 0.40.3
### **0.40.3** 2021/03/05 mandic00@live.com - 0.40.2
### **0.40.2** 2021/03/05 mandic00@live.com
- added blazepose-upper - added blazepose-upper
- 0.40.1
### **0.40.1** 2021/03/04 mandic00@live.com
- implement blazepose and update demos - implement blazepose and update demos
- add todo list - add todo list
- 0.30.6
### **0.30.6** 2021/03/03 mandic00@live.com
- fine tuning age and face models - fine tuning age and face models
- 0.30.5
### **0.30.5** 2021/03/02 mandic00@live.com
- add debug logging flag - add debug logging flag
- 0.30.4
### **0.30.4** 2021/03/01 mandic00@live.com
- added skipinitial flag - added skipinitial flag
- 0.30.3
### **0.30.3** 2021/02/28 mandic00@live.com
- typo - typo
- 0.30.2
### **0.30.2** 2021/02/26 mandic00@live.com
- rebuild - rebuild
- fix typo - fix typo
- 0.30.1
### **0.30.1** 2021/02/25 mandic00@live.com - 0.20.11
- 0.20.10
- 0.20.9
### **0.20.11** 2021/02/24 mandic00@live.com - 0.20.8
- 0.20.7
### **0.20.10** 2021/02/22 mandic00@live.com
### **0.20.9** 2021/02/21 mandic00@live.com
- remove extra items
- simmilarity fix
### **0.20.8** 2021/02/21 mandic00@live.com
- embedding fix
### **0.20.7** 2021/02/21 mandic00@live.com
- build fix - build fix
- 0.20.6
### **0.20.6** 2021/02/21 mandic00@live.com
- embedding fix - embedding fix
- 0.20.5
### **0.20.5** 2021/02/21 mandic00@live.com
- fix imagefx and add dev builds - fix imagefx and add dev builds
### **0.20.4** 2021/02/19 mandic00@live.com
- 0.20.4 - 0.20.4
- 0.20.3
### **0.20.3** 2021/02/17 mandic00@live.com
- rebuild - rebuild
- 0.20.2
### **0.20.2** 2021/02/13 mandic00@live.com
- merge branch 'main' of https://github.com/vladmandic/human into main - merge branch 'main' of https://github.com/vladmandic/human into main
- create codeql-analysis.yml - create codeql-analysis.yml
- create security.md - create security.md
- add templates - add templates
- 0.20.1
### **0.20.1** 2021/02/08 mandic00@live.com
- menu fixes - menu fixes
- convert to typescript - convert to typescript
- 0.11.5
### **0.11.5** 2021/02/06 mandic00@live.com
- added faceboxes alternative model - added faceboxes alternative model
- 0.11.4
### **0.11.4** 2021/02/06 mandic00@live.com - 0.11.3
- 0.11.2
### **0.11.3** 2021/02/02 mandic00@live.com
### **0.11.2** 2021/01/30 mandic00@live.com
- added warmup for nodejs - added warmup for nodejs
- 0.11.1
### **update for tfjs 3.0.0** 2021/01/29 mandic00@live.com - 0.10.2
- 0.10.1
### **0.11.1** 2021/01/29 mandic00@live.com
### **0.10.2** 2021/01/22 mandic00@live.com
### **0.10.1** 2021/01/20 mandic00@live.com
### **0.9.26** 2021/01/18 mandic00@live.com ### **0.9.26** 2021/01/18 mandic00@live.com
- fix face detection when mesh is disabled - fix face detection when mesh is disabled
- added minification notes
- version bump - version bump
### **0.9.25** 2021/01/13 mandic00@live.com ### **0.9.25** 2021/01/13 mandic00@live.com
@ -641,7 +913,6 @@
- conditional hand rotation - conditional hand rotation
- staggered skipframes - staggered skipframes
- fix permissions
### **0.9.13** 2020/12/08 mandic00@live.com ### **0.9.13** 2020/12/08 mandic00@live.com
@ -693,9 +964,7 @@
### **0.9.3** 2020/11/16 mandic00@live.com ### **0.9.3** 2020/11/16 mandic00@live.com
- switched to minified build - switched to minified build
- web worker fixes
### **release: 1.2** 2020/11/15 mandic00@live.com
- full rebuild - full rebuild
### **0.9.2** 2020/11/14 mandic00@live.com ### **0.9.2** 2020/11/14 mandic00@live.com
@ -752,7 +1021,6 @@
- optimized model loader - optimized model loader
- merge branch 'main' of https://github.com/vladmandic/human into main - merge branch 'main' of https://github.com/vladmandic/human into main
- created wiki - created wiki
- delete bug_report.md
- optimize font resizing - optimize font resizing
- fix nms sync call - fix nms sync call
@ -776,7 +1044,6 @@
- optimized camera and mobile layout - optimized camera and mobile layout
- fixed worker and filter compatibility - fixed worker and filter compatibility
- removed test code
### **0.7.2** 2020/11/04 mandic00@live.com ### **0.7.2** 2020/11/04 mandic00@live.com
@ -853,7 +1120,6 @@
### **0.4.8** 2020/10/28 mandic00@live.com ### **0.4.8** 2020/10/28 mandic00@live.com
- revert "updated menu handler" - revert "updated menu handler"
- fix webpack compatibility issue
### **0.4.7** 2020/10/27 mandic00@live.com ### **0.4.7** 2020/10/27 mandic00@live.com
@ -941,7 +1207,6 @@
### **0.2.8** 2020/10/13 mandic00@live.com ### **0.2.8** 2020/10/13 mandic00@live.com
- added example image
### **0.2.7** 2020/10/13 mandic00@live.com ### **0.2.7** 2020/10/13 mandic00@live.com
@ -957,7 +1222,6 @@
### **0.2.4** 2020/10/12 mandic00@live.com ### **0.2.4** 2020/10/12 mandic00@live.com
- removed extra files
### **0.2.3** 2020/10/12 mandic00@live.com ### **0.2.3** 2020/10/12 mandic00@live.com
@ -965,9 +1229,6 @@
### **0.2.2** 2020/10/12 mandic00@live.com ### **0.2.2** 2020/10/12 mandic00@live.com
### **release: 1.0** 2020/10/12 mandic00@live.com
### **0.2.1** 2020/10/12 mandic00@live.com ### **0.2.1** 2020/10/12 mandic00@live.com
- added sample image - added sample image

293
README.md
View File

@ -1,9 +1,9 @@
[![](https://img.shields.io/static/v1?label=Sponsor&message=%E2%9D%A4&logo=GitHub&color=%23fe8e86)](https://github.com/sponsors/vladmandic)
![Git Version](https://img.shields.io/github/package-json/v/vladmandic/human?style=flat-square&svg=true&label=git) ![Git Version](https://img.shields.io/github/package-json/v/vladmandic/human?style=flat-square&svg=true&label=git)
![NPM Version](https://img.shields.io/npm/v/@vladmandic/human.png?style=flat-square) ![NPM Version](https://img.shields.io/npm/v/@vladmandic/human.png?style=flat-square)
![Last Commit](https://img.shields.io/github/last-commit/vladmandic/human?style=flat-square&svg=true) ![Last Commit](https://img.shields.io/github/last-commit/vladmandic/human?style=flat-square&svg=true)
![License](https://img.shields.io/github/license/vladmandic/human?style=flat-square&svg=true) ![License](https://img.shields.io/github/license/vladmandic/human?style=flat-square&svg=true)
![GitHub Status Checks](https://img.shields.io/github/checks-status/vladmandic/human/main?style=flat-square&svg=true) ![GitHub Status Checks](https://img.shields.io/github/checks-status/vladmandic/human/main?style=flat-square&svg=true)
![Vulnerabilities](https://img.shields.io/snyk/vulnerabilities/github/vladmandic/human?style=flat-square&svg=true)
# Human Library # Human Library
@ -13,43 +13,99 @@
<br> <br>
JavaScript module using TensorFlow/JS Machine Learning library ## Highlights
- **Browser**: - Compatible with most server-side and client-side environments and frameworks
Compatible with both desktop and mobile platforms - Combines multiple machine learning models which can be switched on-demand depending on the use-case
Compatible with *CPU*, *WebGL*, *WASM* backends - Related models are executed in an attention pipeline to provide details when needed
Compatible with *WebWorker* execution - Optimized input pre-processing that can enhance image quality of any type of inputs
- **NodeJS**: - Detection of frame changes to trigger only required models for improved performance
Compatible with both software *tfjs-node* and - Intelligent temporal interpolation to provide smooth results regardless of processing performance
GPU accelerated backends *tfjs-node-gpu* using CUDA libraries - Simple unified API
- Built-in Image, Video and WebCam handling
[*Jump to Quick Start*](#quick-start)
<br> <br>
Check out [**Live Demo**](https://vladmandic.github.io/human/demo/index.html) app for processing of live WebCam video or static images ## Compatibility
**Browser**:
- Compatible with both desktop and mobile platforms
- Compatible with *WebGPU*, *WebGL*, *WASM*, *CPU* backends
- Compatible with *WebWorker* execution
- Compatible with *WebView*
- Primary platform: *Chromium*-based browsers
- Secondary platform: *Firefox*, *Safari*
**NodeJS**:
- Compatibile with *WASM* backend for executions on architectures where *tensorflow* binaries are not available
- Compatible with *tfjs-node* using software execution via *tensorflow* shared libraries
- Compatible with *tfjs-node* using GPU-accelerated execution via *tensorflow* shared libraries and nVidia CUDA
- Supported versions are from **14.x** to **22.x**
- NodeJS version **23.x** is not supported due to breaking changes and issues with `@tensorflow/tfjs`
<br>
## Releases
- [Release Notes](https://github.com/vladmandic/human/releases)
- [NPM Link](https://www.npmjs.com/package/@vladmandic/human)
## Demos
*Check out [**Simple Live Demo**](https://vladmandic.github.io/human/demo/typescript/index.html) fully annotated app as a good start starting point ([html](https://github.com/vladmandic/human/blob/main/demo/typescript/index.html))([code](https://github.com/vladmandic/human/blob/main/demo/typescript/index.ts))*
*Check out [**Main Live Demo**](https://vladmandic.github.io/human/demo/index.html) app for advanced processing of of webcam, video stream or images static images with all possible tunable options*
- To start video detection, simply press *Play* - To start video detection, simply press *Play*
- To process images, simply drag & drop in your Browser window - To process images, simply drag & drop in your Browser window
- Note: For optimal performance, select only models you'd like to use - Note: For optimal performance, select only models you'd like to use
- Note: If you have modern GPU, WebGL (default) backend is preferred, otherwise select WASM backend - Note: If you have modern GPU, *WebGL* (default) backend is preferred, otherwise select *WASM* backend
<br> <br>
## Demos
- [**List of all Demo applications**](https://github.com/vladmandic/human/wiki/Demos) - [**List of all Demo applications**](https://github.com/vladmandic/human/wiki/Demos)
- [*Live:* **Main Application**](https://vladmandic.github.io/human/demo/index.html) - [**Live Examples galery**](https://vladmandic.github.io/human/samples/index.html)
- [*Live:* **Face Extraction, Description, Identification and Matching**](https://vladmandic.github.io/human/demo/facematch/index.html)
- [*Live:* **Face Extraction and 3D Rendering**](https://vladmandic.github.io/human/demo/face3d/index.html) ### Browser Demos
- [*Live:* **Multithreaded Detection Showcasing Maximum Performance**](https://vladmandic.github.io/human/demo/multithread/index.html)
- [*Live:* **VR Model with Head, Face, Eye, Body and Hand tracking**](https://vladmandic.github.io/human-vrm/src/human-vrm.html) *All browser demos are self-contained without any external dependencies*
- [Examples galery](https://vladmandic.github.io/human/samples/samples.html)
- **Full** [[*Live*]](https://vladmandic.github.io/human/demo/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo): Main browser demo app that showcases all Human capabilities
- **Simple** [[*Live*]](https://vladmandic.github.io/human/demo/typescript/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/typescript): Simple demo in WebCam processing demo in TypeScript
- **Embedded** [[*Live*]](https://vladmandic.github.io/human/demo/video/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/video/index.html): Even simpler demo with tiny code embedded in HTML file
- **Face Detect** [[*Live*]](https://vladmandic.github.io/human/demo/facedetect/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facedetect): Extract faces from images and processes details
- **Face Match** [[*Live*]](https://vladmandic.github.io/human/demo/facematch/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facematch): Extract faces from images, calculates face descriptors and similarities and matches them to known database
- **Face ID** [[*Live*]](https://vladmandic.github.io/human/demo/faceid/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/faceid): Runs multiple checks to validate webcam input before performing face match to faces in IndexDB
- **Multi-thread** [[*Live*]](https://vladmandic.github.io/human/demo/multithread/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/multithread): Runs each Human module in a separate web worker for highest possible performance
- **NextJS** [[*Live*]](https://vladmandic.github.io/human-next/out/index.html) [[*Details*]](https://github.com/vladmandic/human-next): Use Human with TypeScript, NextJS and ReactJS
- **ElectronJS** [[*Details*]](https://github.com/vladmandic/human-electron): Use Human with TypeScript and ElectonJS to create standalone cross-platform apps
- **3D Analysis with BabylonJS** [[*Live*]](https://vladmandic.github.io/human-motion/src/index.html) [[*Details*]](https://github.com/vladmandic/human-motion): 3D tracking and visualization of heead, face, eye, body and hand
- **VRM Virtual Model Tracking with Three.JS** [[*Live*]](https://vladmandic.github.io/human-three-vrm/src/human-vrm.html) [[*Details*]](https://github.com/vladmandic/human-three-vrm): VR model with head, face, eye, body and hand tracking
- **VRM Virtual Model Tracking with BabylonJS** [[*Live*]](https://vladmandic.github.io/human-bjs-vrm/src/index.html) [[*Details*]](https://github.com/vladmandic/human-bjs-vrm): VR model with head, face, eye, body and hand tracking
### NodeJS Demos
*NodeJS demos may require extra dependencies which are used to decode inputs*
*See header of each demo to see its dependencies as they are not automatically installed with `Human`*
- **Main** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node.js): Process images from files, folders or URLs using native methods
- **Canvas** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-canvas.js): Process image from file or URL and draw results to a new image file using `node-canvas`
- **Video** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-video.js): Processing of video input using `ffmpeg`
- **WebCam** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-webcam.js): Processing of webcam screenshots using `fswebcam`
- **Events** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-event.js): Showcases usage of `Human` eventing to get notifications on processing
- **Similarity** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-similarity.js): Compares two input images for similarity of detected faces
- **Face Match** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facematch/node-match.js): Parallel processing of face **match** in multiple child worker threads
- **Multiple Workers** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/multithread/node-multiprocess.js): Runs multiple parallel `human` by dispaching them to pool of pre-created worker processes
- **Dynamic Load** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Loads Human dynamically with multiple different desired backends
## Project pages ## Project pages
- [**Code Repository**](https://github.com/vladmandic/human) - [**Code Repository**](https://github.com/vladmandic/human)
- [**NPM Package**](https://www.npmjs.com/package/@vladmandic/human) - [**NPM Package**](https://www.npmjs.com/package/@vladmandic/human)
- [**Issues Tracker**](https://github.com/vladmandic/human/issues) - [**Issues Tracker**](https://github.com/vladmandic/human/issues)
- [**TypeDoc API Specification**](https://vladmandic.github.io/human/typedoc/classes/Human.html) - [**TypeDoc API Specification - Main class**](https://vladmandic.github.io/human/typedoc/classes/Human.html)
- [**TypeDoc API Specification - Full**](https://vladmandic.github.io/human/typedoc/)
- [**Change Log**](https://github.com/vladmandic/human/blob/main/CHANGELOG.md) - [**Change Log**](https://github.com/vladmandic/human/blob/main/CHANGELOG.md)
- [**Current To-do List**](https://github.com/vladmandic/human/blob/main/TODO.md) - [**Current To-do List**](https://github.com/vladmandic/human/blob/main/TODO.md)
@ -58,8 +114,11 @@ Check out [**Live Demo**](https://vladmandic.github.io/human/demo/index.html) ap
- [**Home**](https://github.com/vladmandic/human/wiki) - [**Home**](https://github.com/vladmandic/human/wiki)
- [**Installation**](https://github.com/vladmandic/human/wiki/Install) - [**Installation**](https://github.com/vladmandic/human/wiki/Install)
- [**Usage & Functions**](https://github.com/vladmandic/human/wiki/Usage) - [**Usage & Functions**](https://github.com/vladmandic/human/wiki/Usage)
- [**Configuration Details**](https://github.com/vladmandic/human/wiki/Configuration) - [**Configuration Details**](https://github.com/vladmandic/human/wiki/Config)
- [**Output Details**](https://github.com/vladmandic/human/wiki/Outputs) - [**Result Details**](https://github.com/vladmandic/human/wiki/Result)
- [**Customizing Draw Methods**](https://github.com/vladmandic/human/wiki/Draw)
- [**Caching & Smoothing**](https://github.com/vladmandic/human/wiki/Caching)
- [**Input Processing**](https://github.com/vladmandic/human/wiki/Image)
- [**Face Recognition & Face Description**](https://github.com/vladmandic/human/wiki/Embedding) - [**Face Recognition & Face Description**](https://github.com/vladmandic/human/wiki/Embedding)
- [**Gesture Recognition**](https://github.com/vladmandic/human/wiki/Gesture) - [**Gesture Recognition**](https://github.com/vladmandic/human/wiki/Gesture)
- [**Common Issues**](https://github.com/vladmandic/human/wiki/Issues) - [**Common Issues**](https://github.com/vladmandic/human/wiki/Issues)
@ -89,27 +148,24 @@ Check out [**Live Demo**](https://vladmandic.github.io/human/demo/index.html) ap
<hr><br> <hr><br>
## Examples ## App Examples
Visit [Examples galery](https://vladmandic.github.io/human/samples/samples.html) for more examples Visit [Examples gallery](https://vladmandic.github.io/human/samples/index.html) for more examples
<https://vladmandic.github.io/human/samples/samples.html> [<img src="assets/samples.jpg" width="640"/>](assets/samples.jpg)
![samples](assets/samples.jpg)
<br> <br>
## Options ## Options
All options as presented in the demo application... All options as presented in the demo application...
> [demo/index.html](demo/index.html) [demo/index.html](demo/index.html)
[<img src="assets/screenshot-menu.png"/>](assets/screenshot-menu.png)
![Options visible in demo](assets/screenshot-menu.png)
<br> <br>
**Results Browser:** **Results Browser:**
[ *Demo -> Display -> Show Results* ]<br> [ *Demo -> Display -> Show Results* ]<br>
![Results](assets/screenshot-results.png) [<img src="assets/screenshot-results.png"/>](assets/screenshot-results.png)
<br> <br>
@ -121,26 +177,47 @@ sorts them by similarity to selected face
and optionally matches detected face with database of known people to guess their names and optionally matches detected face with database of known people to guess their names
> [demo/facematch](demo/facematch/index.html) > [demo/facematch](demo/facematch/index.html)
![Face Matching](assets/screenshot-facematch.jpg) [<img src="assets/screenshot-facematch.jpg" width="640"/>](assets/screenshot-facematch.jpg)
2. **Face Detect:**
Extracts all detect faces from loaded images on-demand and highlights face details on a selected face
> [demo/facedetect](demo/facedetect/index.html)
[<img src="assets/screenshot-facedetect.jpg" width="640"/>](assets/screenshot-facedetect.jpg)
3. **Face ID:**
Performs validation check on a webcam input to detect a real face and matches it to known faces stored in database
> [demo/faceid](demo/faceid/index.html)
[<img src="assets/screenshot-faceid.jpg" width="640"/>](assets/screenshot-faceid.jpg)
<br> <br>
2. **Face3D OpenGL Rendering:** 4. **3D Rendering:**
> [demo/face3d](demo/face3d/index.html) > [human-motion](https://github.com/vladmandic/human-motion)
![Face Matching](assets/screenshot-face3d.jpg) [<img src="https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-face.jpg" width="640"/>](https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-face.jpg)
[<img src="https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-body.jpg" width="640"/>](https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-body.jpg)
[<img src="https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-hand.jpg" width="640"/>](https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-hand.jpg)
<br> <br>
3. **VR Model Tracking:** 5. **VR Model Tracking:**
![vrmodel](assets/screenshot-vrm.jpg) > [human-three-vrm](https://github.com/vladmandic/human-three-vrm)
> [human-bjs-vrm](https://github.com/vladmandic/human-bjs-vrm)
[<img src="https://github.com/vladmandic/human-three-vrm/raw/main/assets/human-vrm-screenshot.jpg" width="640"/>](https://github.com/vladmandic/human-three-vrm/raw/main/assets/human-vrm-screenshot.jpg)
6. **Human as OS native application:**
> [human-electron](https://github.com/vladmandic/human-electron)
<br> <br>
**468-Point Face Mesh Defails:** **468-Point Face Mesh Defails:**
(view in full resolution to see keypoints) (view in full resolution to see keypoints)
![FaceMesh](assets/facemesh.png) [<img src="assets/facemesh.png" width="400"/>](assets/facemesh.png)
<br><hr><br> <br><hr><br>
@ -150,44 +227,25 @@ Simply load `Human` (*IIFE version*) directly from a cloud CDN in your HTML file
(pick one: `jsdelirv`, `unpkg` or `cdnjs`) (pick one: `jsdelirv`, `unpkg` or `cdnjs`)
```html ```html
<!DOCTYPE HTML>
<script src="https://cdn.jsdelivr.net/npm/@vladmandic/human/dist/human.js"></script> <script src="https://cdn.jsdelivr.net/npm/@vladmandic/human/dist/human.js"></script>
<script src="https://unpkg.dev/@vladmandic/human/dist/human.js"></script> <script src="https://unpkg.dev/@vladmandic/human/dist/human.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/human/2.1.5/human.js"></script> <script src="https://cdnjs.cloudflare.com/ajax/libs/human/3.0.0/human.js"></script>
``` ```
For details, including how to use `Browser ESM` version or `NodeJS` version of `Human`, see [**Installation**](https://github.com/vladmandic/human/wiki/Install) For details, including how to use `Browser ESM` version or `NodeJS` version of `Human`, see [**Installation**](https://github.com/vladmandic/human/wiki/Install)
<br> <br>
## Inputs ## Code Examples
`Human` library can process all known input types: Simple app that uses Human to process video input and
- `Image`, `ImageData`, `ImageBitmap`, `Canvas`, `OffscreenCanvas`, `Tensor`,
- `HTMLImageElement`, `HTMLCanvasElement`, `HTMLVideoElement`, `HTMLMediaElement`
Additionally, `HTMLVideoElement`, `HTMLMediaElement` can be a standard `<video>` tag that links to:
- WebCam on user's system
- Any supported video type
For example: `.mp4`, `.avi`, etc.
- Additional video types supported via *HTML5 Media Source Extensions*
Live streaming examples:
- **HLS** (*HTTP Live Streaming*) using `hls.js`
- **DASH** (Dynamic Adaptive Streaming over HTTP) using `dash.js`
- **WebRTC** media track using built-in support
<br>
## Example
Example simple app that uses Human to process video input and
draw output on screen using internal draw helper functions draw output on screen using internal draw helper functions
```js ```js
// create instance of human with simple configuration using default values // create instance of human with simple configuration using default values
const config = { backend: 'webgl' }; const config = { backend: 'webgl' };
const human = new Human(config); const human = new Human.Human(config);
// select input HTMLVideoElement and output HTMLCanvasElement from page // select input HTMLVideoElement and output HTMLCanvasElement from page
const inputVideo = document.getElementById('video-id'); const inputVideo = document.getElementById('video-id');
const outputCanvas = document.getElementById('canvas-id'); const outputCanvas = document.getElementById('canvas-id');
@ -206,6 +264,7 @@ function detectVideo() {
human.draw.gesture(outputCanvas, result.gesture); human.draw.gesture(outputCanvas, result.gesture);
// and loop immediate to the next frame // and loop immediate to the next frame
requestAnimationFrame(detectVideo); requestAnimationFrame(detectVideo);
return result;
}); });
} }
@ -266,7 +325,7 @@ async function detectVideo() {
async function drawVideo() { async function drawVideo() {
if (result) { // check if result is available if (result) { // check if result is available
const interpolated = human.next(result); // calculate next interpolated frame const interpolated = human.next(result); // get smoothened result using last-known results
human.draw.all(outputCanvas, interpolated); // draw the frame human.draw.all(outputCanvas, interpolated); // draw the frame
} }
requestAnimationFrame(drawVideo); // run draw loop requestAnimationFrame(drawVideo); // run draw loop
@ -276,27 +335,108 @@ detectVideo(); // start detection loop
drawVideo(); // start draw loop drawVideo(); // start draw loop
``` ```
or same, but using built-in full video processing instead of running manual frame-by-frame loop:
```js
const human = new Human(); // create instance of Human
const inputVideo = document.getElementById('video-id');
const outputCanvas = document.getElementById('canvas-id');
async function drawResults() {
const interpolated = human.next(); // get smoothened result using last-known results
human.draw.all(outputCanvas, interpolated); // draw the frame
requestAnimationFrame(drawResults); // run draw loop
}
human.video(inputVideo); // start detection loop which continously updates results
drawResults(); // start draw loop
```
or using built-in webcam helper methods that take care of video handling completely:
```js
const human = new Human(); // create instance of Human
const outputCanvas = document.getElementById('canvas-id');
async function drawResults() {
const interpolated = human.next(); // get smoothened result using last-known results
human.draw.canvas(outputCanvas, human.webcam.element); // draw current webcam frame
human.draw.all(outputCanvas, interpolated); // draw the frame detectgion results
requestAnimationFrame(drawResults); // run draw loop
}
await human.webcam.start({ crop: true });
human.video(human.webcam.element); // start detection loop which continously updates results
drawResults(); // start draw loop
```
And for even better results, you can run detection in a separate web worker thread And for even better results, you can run detection in a separate web worker thread
<br><hr><br>
## Inputs
`Human` library can process all known input types:
- `Image`, `ImageData`, `ImageBitmap`, `Canvas`, `OffscreenCanvas`, `Tensor`,
- `HTMLImageElement`, `HTMLCanvasElement`, `HTMLVideoElement`, `HTMLMediaElement`
Additionally, `HTMLVideoElement`, `HTMLMediaElement` can be a standard `<video>` tag that links to:
- WebCam on user's system
- Any supported video type
e.g. `.mp4`, `.avi`, etc.
- Additional video types supported via *HTML5 Media Source Extensions*
e.g.: **HLS** (*HTTP Live Streaming*) using `hls.js` or **DASH** (*Dynamic Adaptive Streaming over HTTP*) using `dash.js`
- **WebRTC** media track using built-in support
<br><hr><br>
## Detailed Usage
- [**Wiki Home**](https://github.com/vladmandic/human/wiki)
- [**List of all available methods, properies and namespaces**](https://github.com/vladmandic/human/wiki/Usage)
- [**TypeDoc API Specification - Main class**](https://vladmandic.github.io/human/typedoc/classes/Human.html)
- [**TypeDoc API Specification - Full**](https://vladmandic.github.io/human/typedoc/)
![typedoc](assets/screenshot-typedoc.png)
<br><hr><br>
## TypeDefs
`Human` is written using TypeScript strong typing and ships with full **TypeDefs** for all classes defined by the library bundled in `types/human.d.ts` and enabled by default
*Note*: This does not include embedded `tfjs`
If you want to use embedded `tfjs` inside `Human` (`human.tf` namespace) and still full **typedefs**, add this code:
> import type * as tfjs from '@vladmandic/human/dist/tfjs.esm';
> const tf = human.tf as typeof tfjs;
This is not enabled by default as `Human` does not ship with full **TFJS TypeDefs** due to size considerations
Enabling `tfjs` TypeDefs as above creates additional project (dev-only as only types are required) dependencies as defined in `@vladmandic/human/dist/tfjs.esm.d.ts`:
> @tensorflow/tfjs-core, @tensorflow/tfjs-converter, @tensorflow/tfjs-backend-wasm, @tensorflow/tfjs-backend-webgl
<br><hr><br> <br><hr><br>
## Default models ## Default models
Default models in Human library are: Default models in Human library are:
- **Face Detection**: MediaPipe BlazeFace - Back variation - **Face Detection**: *MediaPipe BlazeFace Back variation*
- **Face Mesh**: MediaPipe FaceMesh - **Face Mesh**: *MediaPipe FaceMesh*
- **Face Iris Analysis**: MediaPipe Iris - **Face Iris Analysis**: *MediaPipe Iris*
- **Face Description**: HSE FaceRes - **Face Description**: *HSE FaceRes*
- **Emotion Detection**: Oarriaga Emotion - **Emotion Detection**: *Oarriaga Emotion*
- **Body Analysis**: MoveNet - Lightning variation - **Body Analysis**: *MoveNet Lightning variation*
- **Hand Analysis**: MediaPipe Hands - **Hand Analysis**: *HandTrack & MediaPipe HandLandmarks*
- **Body Segmentation**: Google Selfie - **Body Segmentation**: *Google Selfie*
- **Object Detection**: MB3 CenterNet - **Object Detection**: *CenterNet with MobileNet v3*
- **Body Segmentation**: Google Selfie
Note that alternative models are provided and can be enabled via configuration Note that alternative models are provided and can be enabled via configuration
For example, `PoseNet` model can be switched for `BlazePose`, `EfficientPose` or `MoveNet` model depending on the use case For example, body pose detection by default uses *MoveNet Lightning*, but can be switched to *MultiNet Thunder* for higher precision or *Multinet MultiPose* for multi-person detection or even *PoseNet*, *BlazePose* or *EfficientPose* depending on the use case
For more info, see [**Configuration Details**](https://github.com/vladmandic/human/wiki/Configuration) and [**List of Models**](https://github.com/vladmandic/human/wiki/Models) For more info, see [**Configuration Details**](https://github.com/vladmandic/human/wiki/Configuration) and [**List of Models**](https://github.com/vladmandic/human/wiki/Models)
@ -308,9 +448,9 @@ For more info, see [**Configuration Details**](https://github.com/vladmandic/hum
<br><hr><br> <br><hr><br>
`Human` library is written in `TypeScript` [4.4](https://www.typescriptlang.org/docs/handbook/intro.html) `Human` library is written in [TypeScript](https://www.typescriptlang.org/docs/handbook/intro.html) **5.1** using [TensorFlow/JS](https://www.tensorflow.org/js/) **4.10** and conforming to latest `JavaScript` [ECMAScript version 2022](https://262.ecma-international.org/) standard
Conforming to `JavaScript` [ECMAScript version 2020](https://www.ecma-international.org/ecma-262/11.0/index.html) standard
Build target is `JavaScript` [EMCAScript version 2018](https://262.ecma-international.org/9.0/) Build target for distributables is `JavaScript` [EMCAScript version 2018](https://262.ecma-international.org/9.0/)
<br> <br>
@ -319,6 +459,7 @@ and [**API Specification**](https://vladmandic.github.io/human/typedoc/classes/H
<br> <br>
[![](https://img.shields.io/static/v1?label=Sponsor&message=%E2%9D%A4&logo=GitHub&color=%23fe8e86)](https://github.com/sponsors/vladmandic)
![Stars](https://img.shields.io/github/stars/vladmandic/human?style=flat-square&svg=true) ![Stars](https://img.shields.io/github/stars/vladmandic/human?style=flat-square&svg=true)
![Forks](https://badgen.net/github/forks/vladmandic/human) ![Forks](https://badgen.net/github/forks/vladmandic/human)
![Code Size](https://img.shields.io/github/languages/code-size/vladmandic/human?style=flat-square&svg=true) ![Code Size](https://img.shields.io/github/languages/code-size/vladmandic/human?style=flat-square&svg=true)

99
TODO.md
View File

@ -1,81 +1,38 @@
# To-Do list for Human library # To-Do list for Human library
## Work in Progress ## Work-in-Progress
<br> <hr><br>
### Models ## Known Issues & Limitations
- Implement BlazePose end-to-end ### Face with Attention
<br> `FaceMesh-Attention` is not supported when using `WASM` backend due to missing kernel op in **TFJS**
No issues with default model `FaceMesh`
### Backends
#### WebGL
- Optimize shader packing for WebGL backend:
<https://github.com/tensorflow/tfjs/issues/5343>
#### WASM
- Backend WASM incorrect handling of `int32` tensors
<https://github.com/tensorflow/tfjs/issues/5641>
#### WebGPU
Implementation of WebGPU backend
Experimental support only until support is officially added in Chromium
- Evaluate WGSL vs GLSL for WebGPU
- Backend WebGPU missing kernel ops
<https://github.com/tensorflow/tfjs/issues/5496>
- Backend WebGPU incompatible with web workers
<https://github.com/tensorflow/tfjs/issues/5467>
- Backend WebGPU incompatible with sync read calls
<https://github.com/tensorflow/tfjs/issues/5468>
<br>
### Exploring
- Optical Flow: <https://docs.opencv.org/3.3.1/db/d7f/tutorial_js_lucas_kanade.html>
- TFLite Models: <https://js.tensorflow.org/api_tflite/0.0.1-alpha.4/>
<br><hr><br>
## Known Issues
<br>
### Face Detection
Enhanced rotation correction for face detection is not working in NodeJS due to missing kernel op in TFJS
Feature is automatically disabled in NodeJS without user impact
- Backend NodeJS missing kernel op `RotateWithOffset`
<https://github.com/tensorflow/tfjs/issues/5473>
### Hand Detection
Enhanced rotation correction for hand detection is not working in NodeJS due to missing kernel op in TFJS
Feature is automatically disabled in NodeJS without user impact
- Backend NodeJS missing kernel op `RotateWithOffset`
<https://github.com/tensorflow/tfjs/issues/5473>
### Body Detection
MoveNet MultiPose model does not work with WASM backend due to missing F32 broadcast implementation
- Backend WASM missing F32 broadcat implementation
<https://github.com/tensorflow/tfjs/issues/5516>
### Object Detection ### Object Detection
Object detection using CenterNet or NanoDet models is not working when using WASM backend due to missing kernel ops in TFJS `NanoDet` model is not supported when using `WASM` backend due to missing kernel op in **TFJS**
No issues with default model `MB3-CenterNet`
- Backend WASM missing kernel op `Mod` ## Body Detection using MoveNet-MultiPose
<https://github.com/tensorflow/tfjs/issues/5110>
- Backend WASM missing kernel op `SparseToDense` Model does not return valid detection scores (all other functionality is not impacted)
<https://github.com/tensorflow/tfjs/issues/4824>
### Firefox
Running in **web workers** requires `OffscreenCanvas` which is still disabled by default in **Firefox**
Enable via `about:config` -> `gfx.offscreencanvas.enabled`
[Details](https://developer.mozilla.org/en-US/docs/Web/API/OffscreenCanvas#browser_compatibility)
### Safari
No support for running in **web workers** as Safari still does not support `OffscreenCanvas`
[Details](https://developer.mozilla.org/en-US/docs/Web/API/OffscreenCanvas#browser_compatibility)
## React-Native
`Human` support for **React-Native** is best-effort, but not part of the main development focus
<hr><br>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.1 MiB

After

Width:  |  Height:  |  Size: 595 KiB

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

Binary file not shown.

Before

Width:  |  Height:  |  Size: 297 KiB

After

Width:  |  Height:  |  Size: 261 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 47 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 366 KiB

After

Width:  |  Height:  |  Size: 321 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 41 KiB

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 36 KiB

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 47 KiB

After

Width:  |  Height:  |  Size: 42 KiB

153
build.js Normal file
View File

@ -0,0 +1,153 @@
const fs = require('fs');
const path = require('path');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
const Build = require('@vladmandic/build').Build; // eslint-disable-line node/no-unpublished-require
const APIExtractor = require('@microsoft/api-extractor'); // eslint-disable-line node/no-unpublished-require
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
const packageJSON = require('./package.json');
const logFile = 'test/build.log';
const modelsOut = 'models/models.json';
const modelsFolders = [
'./models',
'../human-models/models',
'../blazepose/model/',
'../anti-spoofing/model',
'../efficientpose/models',
'../insightface/models',
'../movenet/models',
'../nanodet/models',
];
const apiExtractorIgnoreList = [ // eslint-disable-line no-unused-vars
'ae-missing-release-tag',
'tsdoc-param-tag-missing-hyphen',
'tsdoc-escape-right-brace',
'tsdoc-undefined-tag',
'tsdoc-escape-greater-than',
'ae-unresolved-link',
'ae-forgotten-export',
'tsdoc-malformed-inline-tag',
'tsdoc-unnecessary-backslash',
];
const regEx = [
{ search: 'types="@webgpu/types/dist"', replace: 'path="../src/types/webgpu.d.ts"' },
{ search: 'types="offscreencanvas"', replace: 'path="../src/types/offscreencanvas.d.ts"' },
];
function copyFile(src, dst) {
if (!fs.existsSync(src)) {
log.warn('Copy:', { input: src, output: dst });
return;
}
log.state('Copy:', { input: src, output: dst });
const buffer = fs.readFileSync(src);
fs.writeFileSync(dst, buffer);
}
function writeFile(str, dst) {
log.state('Write:', { output: dst });
fs.writeFileSync(dst, str);
}
function regExFile(src, entries) {
if (!fs.existsSync(src)) {
log.warn('Filter:', { src });
return;
}
log.state('Filter:', { input: src });
for (const entry of entries) {
const buffer = fs.readFileSync(src, 'UTF-8');
const lines = buffer.split(/\r?\n/);
const out = [];
for (const line of lines) {
if (line.includes(entry.search)) out.push(line.replace(entry.search, entry.replace));
else out.push(line);
}
fs.writeFileSync(src, out.join('\n'));
}
}
async function analyzeModels() {
log.info('Analyze models:', { folders: modelsFolders.length, result: modelsOut });
let totalSize = 0;
const models = {};
const allModels = [];
for (const folder of modelsFolders) {
try {
if (!fs.existsSync(folder)) continue;
const stat = fs.statSync(folder);
if (!stat.isDirectory) continue;
const dir = fs.readdirSync(folder);
const found = dir.map((f) => `file://${folder}/${f}`).filter((f) => f.endsWith('json'));
log.state('Models', { folder, models: found.length });
allModels.push(...found);
} catch {
// log.warn('Cannot enumerate:', modelFolder);
}
}
for (const url of allModels) {
// if (!f.endsWith('.json')) continue;
// const url = `file://${modelsDir}/${f}`;
const model = new tf.GraphModel(url); // create model prototype and decide if load from cache or from original modelurl
model.findIOHandler();
const artifacts = await model.handler.load();
const size = artifacts?.weightData?.byteLength || 0;
totalSize += size;
const name = path.basename(url).replace('.json', '');
if (!models[name]) models[name] = size;
}
const json = JSON.stringify(models, null, 2);
fs.writeFileSync(modelsOut, json);
log.state('Models:', { count: Object.keys(models).length, totalSize });
}
async function main() {
log.logFile(logFile);
log.data('Build', { name: packageJSON.name, version: packageJSON.version });
// run production build
const build = new Build();
await build.run('production');
// patch tfjs typedefs
copyFile('node_modules/@vladmandic/tfjs/types/tfjs-core.d.ts', 'types/tfjs-core.d.ts');
copyFile('node_modules/@vladmandic/tfjs/types/tfjs.d.ts', 'types/tfjs.esm.d.ts');
copyFile('src/types/tsconfig.json', 'types/tsconfig.json');
copyFile('src/types/eslint.json', 'types/.eslintrc.json');
copyFile('src/types/tfjs.esm.d.ts', 'dist/tfjs.esm.d.ts');
regExFile('types/tfjs-core.d.ts', regEx);
// run api-extractor to create typedef rollup
const extractorConfig = APIExtractor.ExtractorConfig.loadFileAndPrepare('.api-extractor.json');
try {
const extractorResult = APIExtractor.Extractor.invoke(extractorConfig, {
localBuild: true,
showVerboseMessages: false,
messageCallback: (msg) => {
msg.handled = true;
if (msg.logLevel === 'none' || msg.logLevel === 'verbose' || msg.logLevel === 'info') return;
if (msg.sourceFilePath?.includes('/node_modules/')) return;
// if (apiExtractorIgnoreList.reduce((prev, curr) => prev || msg.messageId.includes(curr), false)) return; // those are external issues outside of human control
log.data('API', { level: msg.logLevel, category: msg.category, id: msg.messageId, file: msg.sourceFilePath, line: msg.sourceFileLine, text: msg.text });
},
});
log.state('API-Extractor:', { succeeeded: extractorResult.succeeded, errors: extractorResult.errorCount, warnings: extractorResult.warningCount });
} catch (err) {
log.error('API-Extractor:', err);
}
regExFile('types/human.d.ts', regEx);
writeFile('export * from \'../types/human\';', 'dist/human.esm-nobundle.d.ts');
writeFile('export * from \'../types/human\';', 'dist/human.esm.d.ts');
writeFile('export * from \'../types/human\';', 'dist/human.d.ts');
writeFile('export * from \'../types/human\';', 'dist/human.node-gpu.d.ts');
writeFile('export * from \'../types/human\';', 'dist/human.node.d.ts');
writeFile('export * from \'../types/human\';', 'dist/human.node-wasm.d.ts');
// generate model signature
await analyzeModels();
log.info('Human Build complete...', { logFile });
}
main();

View File

@ -1,5 +1,67 @@
# Human Library: Demos # Human Library: Demos
For details see Wiki: For details on other demos see Wiki: [**Demos**](https://github.com/vladmandic/human/wiki/Demos)
- [**Demos**](https://github.com/vladmandic/human/wiki/Demos) ## Main Demo
`index.html`: Full demo using `Human` ESM module running in Browsers,
Includes:
- Selectable inputs:
- Sample images
- Image via drag & drop
- Image via URL param
- WebCam input
- Video stream
- WebRTC stream
- Selectable active `Human` modules
- With interactive module params
- Interactive `Human` image filters
- Selectable interactive `results` browser
- Selectable `backend`
- Multiple execution methods:
- Sync vs Async
- in main thread or web worker
- live on git pages, on user-hosted web server or via included [**micro http2 server**](https://github.com/vladmandic/human/wiki/Development-Server)
### Demo Options
- General `Human` library options
in `index.js:userConfig`
- General `Human` `draw` options
in `index.js:drawOptions`
- Demo PWA options
in `index.js:pwa`
- Demo specific options
in `index.js:ui`
```js
const ui = {
console: true, // log messages to browser console
useWorker: true, // use web workers for processing
buffered: true, // should output be buffered between frames
interpolated: true, // should output be interpolated for smoothness between frames
results: false, // show results tree
useWebRTC: false, // use webrtc as camera source instead of local webcam
};
```
Demo implements several ways to use `Human` library,
### URL Params
Demo app can use URL parameters to override configuration values
For example:
- Force using `WASM` as backend: <https://vladmandic.github.io/human/demo/index.html?backend=wasm>
- Enable `WebWorkers`: <https://vladmandic.github.io/human/demo/index.html?worker=true>
- Skip pre-loading and warming up: <https://vladmandic.github.io/human/demo/index.html?preload=false&warmup=false>
### WebRTC
Note that WebRTC connection requires a WebRTC server that provides a compatible media track such as H.264 video track
For such a WebRTC server implementation see <https://github.com/vladmandic/stream-rtsp> project
that implements a connection to IP Security camera using RTSP protocol and transcodes it to WebRTC
ready to be consumed by a client such as `Human`

View File

@ -1,30 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Human</title>
<meta name="viewport" content="width=device-width" id="viewport">
<meta name="keywords" content="Human">
<meta name="application-name" content="Human">
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="theme-color" content="#000000">
<link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../../assets/icon.png">
<script type="module" src="browser.js"></script>
<style>
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
body { margin: 0; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
body::-webkit-scrollbar { display: none; }
.status { position: absolute; width: 100vw; bottom: 10%; text-align: center; font-size: 3rem; font-weight: 100; text-shadow: 2px 2px #303030; }
.log { position: absolute; bottom: 0; margin: 0.4rem 0.4rem 0 0.4rem; font-size: 0.9rem; }
</style>
</head>
<body>
<div id="status" class="status"></div>
<img id="image" src="../../samples/groups/group1.jpg" style="display: none"></img>
<div id="log" class="log"></div>
</body>
</html>

View File

@ -1,51 +0,0 @@
// import * as tf from '../../assets/tf.es2017.js';
// import '../../assets/tf-backend-webgpu.es2017.js';
import Human from '../../dist/human.esm.js';
const loop = 20;
// eslint-disable-next-line no-console
const log = (...msg) => console.log(...msg);
const myConfig = {
backend: 'humangl',
modelBasePath: 'https://vladmandic.github.io/human/models',
wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.9.0/dist/',
debug: true,
async: true,
cacheSensitivity: 0,
filter: { enabled: false },
face: {
enabled: true,
detector: { enabled: true, rotation: false },
mesh: { enabled: true },
iris: { enabled: true },
description: { enabled: true },
emotion: { enabled: false },
},
hand: { enabled: true, rotation: false },
body: { enabled: true },
object: { enabled: false },
};
async function main() {
const human = new Human(myConfig);
await human.tf.ready();
log('Human:', human.version);
await human.load();
const loaded = Object.keys(human.models).filter((a) => human.models[a]);
log('Loaded:', loaded);
log('Memory state:', human.tf.engine().memory());
const element = document.getElementById('image');
const processed = await human.image(element);
const t0 = performance.now();
await human.detect(processed.tensor, myConfig);
const t1 = performance.now();
log('Backend:', human.tf.getBackend());
log('Warmup:', Math.round(t1 - t0));
for (let i = 0; i < loop; i++) await human.detect(processed.tensor, myConfig);
const t2 = performance.now();
log('Average:', Math.round((t2 - t1) / loop));
}
main();

View File

@ -1,71 +0,0 @@
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
const tf = require('@tensorflow/tfjs-node-gpu');
const log = require('@vladmandic/pilogger');
const canvasJS = require('canvas');
const Human = require('../../dist/human.node-gpu.js').default;
const input = 'samples/groups/group1.jpg';
const loop = 20;
const myConfig = {
backend: 'tensorflow',
modelBasePath: 'https://vladmandic.github.io/human/models',
wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.9.0/dist/',
debug: true,
async: true,
cacheSensitivity: 0,
filter: { enabled: false },
face: {
enabled: true,
detector: { enabled: true, rotation: false },
mesh: { enabled: true },
iris: { enabled: true },
description: { enabled: true },
emotion: { enabled: true },
},
hand: {
enabled: true,
},
body: { enabled: true },
object: { enabled: false },
};
async function getImage(human) {
const img = await canvasJS.loadImage(input);
const canvas = canvasJS.createCanvas(img.width, img.height);
const ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0, img.width, img.height);
const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
const res = human.tf.tidy(() => {
const tensor = human.tf.tensor(Array.from(imageData.data), [canvas.height, canvas.width, 4], 'int32'); // create rgba image tensor from flat array
const channels = human.tf.split(tensor, 4, 2); // split rgba to channels
const rgb = human.tf.stack([channels[0], channels[1], channels[2]], 2); // stack channels back to rgb
const reshape = human.tf.reshape(rgb, [1, canvas.height, canvas.width, 3]); // move extra dim from the end of tensor and use it as batch number instead
return reshape;
});
log.info('Image:', input, res.shape);
return res;
}
async function main() {
log.header();
const human = new Human(myConfig);
await human.tf.ready();
log.info('Human:', human.version);
await human.load();
const loaded = Object.keys(human.models).filter((a) => human.models[a]);
log.info('Loaded:', loaded);
log.info('Memory state:', human.tf.engine().memory());
const tensor = await getImage(human);
log.state('Processing:', tensor['shape']);
const t0 = performance.now();
await human.detect(tensor, myConfig);
const t1 = performance.now();
log.state('Backend:', human.tf.getBackend());
log.data('Warmup:', Math.round(t1 - t0));
for (let i = 0; i < loop; i++) await human.detect(tensor, myConfig);
const t2 = performance.now();
log.data('Average:', Math.round((t2 - t1) / loop));
}
main();

View File

@ -1,189 +0,0 @@
// @ts-nocheck // typescript checks disabled as this is pure javascript
/**
* Human demo for browsers
*
* Demo for face mesh detection and projection as 3D object using Three.js
*/
import { DoubleSide, Mesh, MeshBasicMaterial, OrthographicCamera, Scene, sRGBEncoding, VideoTexture, WebGLRenderer, BufferGeometry, BufferAttribute } from '../helpers/three.js';
import { OrbitControls } from '../helpers/three-orbitControls.js';
import Human from '../../dist/human.esm.js'; // equivalent of @vladmandic/human
const userConfig = {
backend: 'wasm',
async: false,
profile: false,
warmup: 'full',
modelBasePath: '../../models/',
// wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.9.0/dist/',
filter: { enabled: false },
face: { enabled: true,
detector: { rotation: false, maxDetected: 1 },
mesh: { enabled: true },
iris: { enabled: true },
description: { enabled: false },
emotion: { enabled: false },
},
hand: { enabled: false },
gesture: { enabled: false },
body: { enabled: false },
object: { enabled: false },
};
const human = new Human(userConfig);
const wireframe = true; // enable wireframe overlay
const canvas = document.getElementById('canvas');
let width = 0;
let height = 0;
const renderer = new WebGLRenderer({ antialias: true, alpha: true, canvas });
renderer.setClearColor(0x000000);
renderer.outputEncoding = sRGBEncoding;
const camera = new OrthographicCamera();
const controls = new OrbitControls(camera, renderer.domElement); // pan&zoom controls
controls.enabled = true;
const materialWireFrame = new MeshBasicMaterial({ // create wireframe material
color: 0xffaaaa,
wireframe: true,
});
const materialFace = new MeshBasicMaterial({ // create material for mask
color: 0xffffff,
map: null, // will be created when the video is ready.
side: DoubleSide,
});
class FaceGeometry extends BufferGeometry {
constructor(triangulation) {
super();
this.positions = new Float32Array(478 * 3);
this.uvs = new Float32Array(478 * 2);
this.setAttribute('position', new BufferAttribute(this.positions, 3));
this.setAttribute('uv', new BufferAttribute(this.uvs, 2));
this.setIndex(triangulation);
}
update(face) {
let ptr = 0;
for (const p of face.mesh) {
this.positions[ptr + 0] = -p[0] + width / 2;
this.positions[ptr + 1] = height - p[1] - height / 2;
this.positions[ptr + 2] = -p[2];
ptr += 3;
}
ptr = 0;
for (const p of face.meshRaw) {
this.uvs[ptr + 0] = 0 + p[0];
this.uvs[ptr + 1] = 1 - p[1];
ptr += 2;
}
materialFace.map.update(); // update textures from video
this.attributes.position.needsUpdate = true; // vertices
this.attributes.uv.needsUpdate = true; // textures
this.computeVertexNormals();
}
}
const scene = new Scene();
const faceGeometry = new FaceGeometry(human.faceTriangulation); // create a new geometry helper
const mesh = new Mesh(faceGeometry, materialFace); // create mask mesh
scene.add(mesh);
function resize(input) {
width = input.videoWidth;
height = input.videoHeight;
camera.left = -width / 2;
camera.right = width / 2;
camera.top = height / 2;
camera.bottom = -height / 2;
camera.near = -100;
camera.far = 100;
camera.zoom = 2;
camera.updateProjectionMatrix();
renderer.setSize(width, height);
}
const isLive = (input) => input.srcObject && (input.srcObject.getVideoTracks()[0].readyState === 'live') && (input.readyState > 2) && (!input.paused);
async function render(input) {
if (isLive(input)) {
if (width !== input.videoWidth || height !== input.videoHeight) resize(input); // resize orthographic camera to video dimensions if necessary
const res = await human.detect(input);
if (res?.face?.length > 0) {
faceGeometry.update(res.face[0]);
// render the mask
mesh.material = materialFace;
renderer.autoClear = true;
renderer.render(scene, camera);
if (wireframe) { // overlay wireframe
mesh.material = materialWireFrame;
renderer.autoClear = false;
renderer.render(scene, camera);
}
}
}
requestAnimationFrame(() => render(input));
}
// setup webcam
async function setupCamera() {
if (!navigator.mediaDevices) return null;
const video = document.getElementById('video');
canvas.addEventListener('click', () => {
if (isLive(video)) video.pause();
else video.play();
});
const constraints = {
audio: false,
video: { facingMode: 'user', resizeMode: 'crop-and-scale' },
};
if (window.innerWidth > window.innerHeight) constraints.video.width = { ideal: window.innerWidth };
else constraints.video.height = { ideal: window.innerHeight };
const stream = await navigator.mediaDevices.getUserMedia(constraints);
if (stream) video.srcObject = stream;
else return null;
// get information data
const track = stream.getVideoTracks()[0];
const settings = track.getSettings();
// log('camera constraints:', constraints, 'window:', { width: window.innerWidth, height: window.innerHeight }, 'settings:', settings, 'track:', track);
const engineData = human.tf.engine();
const gpuData = (engineData.backendInstance && engineData.backendInstance.numBytesInGPU > 0) ? `gpu: ${(engineData.backendInstance.numBytesInGPU ? engineData.backendInstance.numBytesInGPU : 0).toLocaleString()} bytes` : '';
const cameraData = { name: track.label?.toLowerCase(), width: settings.width, height: settings.height, facing: settings.facingMode === 'user' ? 'front' : 'back' };
const memoryData = `system: ${engineData.state.numBytes.toLocaleString()} bytes ${gpuData} | tensors: ${engineData.state.numTensors.toLocaleString()}`;
document.getElementById('log').innerHTML = `
video: ${cameraData.name} | facing: ${cameraData.facing} | screen: ${window.innerWidth} x ${window.innerHeight} camera: ${cameraData.width} x ${cameraData.height}<br>
backend: ${human.tf.getBackend()} | ${memoryData}<br>
`;
// return when camera is ready
return new Promise((resolve) => {
video.onloadeddata = async () => {
video.width = video.videoWidth;
video.height = video.videoHeight;
canvas.width = video.width;
canvas.height = video.height;
video.play();
resolve(video);
};
});
}
async function main() {
window.addEventListener('unhandledrejection', (evt) => {
// eslint-disable-next-line no-console
console.error(evt.reason || evt);
document.getElementById('log').innerHTML = evt?.reason?.message || evt?.reason || evt;
evt.preventDefault();
});
await human.load();
const video = await setupCamera();
if (video) {
const videoTexture = new VideoTexture(video); // now load textures from video
videoTexture.encoding = sRGBEncoding;
materialFace.map = videoTexture;
render(video);
}
}
window.onload = main;

View File

@ -0,0 +1,160 @@
/**
* Human demo for browsers
*
* Demo for face detection
*/
/** @type {Human} */
import { Human } from '../../dist/human.esm.js';
let loader;
const humanConfig = { // user configuration for human, used to fine-tune behavior
cacheSensitivity: 0,
debug: true,
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
filter: { enabled: true, equalization: false, flip: false },
face: {
enabled: true,
detector: { rotation: false, maxDetected: 100, minConfidence: 0.2, return: true, square: false },
iris: { enabled: true },
description: { enabled: true },
emotion: { enabled: true },
antispoof: { enabled: true },
liveness: { enabled: true },
},
body: { enabled: false },
hand: { enabled: false },
object: { enabled: false },
gesture: { enabled: false },
segmentation: { enabled: false },
};
const human = new Human(humanConfig); // new instance of human
export const showLoader = (msg) => { loader.setAttribute('msg', msg); loader.style.display = 'block'; };
export const hideLoader = () => loader.style.display = 'none';
class ComponentLoader extends HTMLElement { // watch for attributes
message = document.createElement('div');
static get observedAttributes() { return ['msg']; }
attributeChangedCallback(_name, _prevVal, currVal) {
this.message.innerHTML = currVal;
}
connectedCallback() { // triggered on insert
this.attachShadow({ mode: 'open' });
const css = document.createElement('style');
css.innerHTML = `
.loader-container { top: 450px; justify-content: center; position: fixed; width: 100%; }
.loader-message { font-size: 1.5rem; padding: 1rem; }
.loader { width: 300px; height: 300px; border: 3px solid transparent; border-radius: 50%; border-top: 4px solid #f15e41; animation: spin 4s linear infinite; position: relative; }
.loader::before, .loader::after { content: ""; position: absolute; top: 6px; bottom: 6px; left: 6px; right: 6px; border-radius: 50%; border: 4px solid transparent; }
.loader::before { border-top-color: #bad375; animation: 3s spin linear infinite; }
.loader::after { border-top-color: #26a9e0; animation: spin 1.5s linear infinite; }
@keyframes spin { from { transform: rotate(0deg); } to { transform: rotate(360deg); } }
`;
const container = document.createElement('div');
container.id = 'loader-container';
container.className = 'loader-container';
loader = document.createElement('div');
loader.id = 'loader';
loader.className = 'loader';
this.message.id = 'loader-message';
this.message.className = 'loader-message';
this.message.innerHTML = '';
container.appendChild(this.message);
container.appendChild(loader);
this.shadowRoot?.append(css, container);
loader = this; // eslint-disable-line @typescript-eslint/no-this-alias
}
}
customElements.define('component-loader', ComponentLoader);
function addFace(face, source) {
const deg = (rad) => Math.round((rad || 0) * 180 / Math.PI);
const canvas = document.createElement('canvas');
const emotion = face.emotion?.map((e) => `${Math.round(100 * e.score)}% ${e.emotion}`) || [];
const rotation = `pitch ${deg(face.rotation?.angle.pitch)}° | roll ${deg(face.rotation?.angle.roll)}° | yaw ${deg(face.rotation?.angle.yaw)}°`;
const gaze = `direction ${deg(face.rotation?.gaze.bearing)}° strength ${Math.round(100 * (face.rotation.gaze.strength || 0))}%`;
canvas.title = `
source: ${source}
score: ${Math.round(100 * face.boxScore)}% detection ${Math.round(100 * face.faceScore)}% analysis
age: ${face.age} years | gender: ${face.gender} score ${Math.round(100 * face.genderScore)}%
emotion: ${emotion.join(' | ')}
head rotation: ${rotation}
eyes gaze: ${gaze}
camera distance: ${face.distance}m | ${Math.round(100 * face.distance / 2.54)}in
check: ${Math.round(100 * face.real)}% real ${Math.round(100 * face.live)}% live
`.replace(/ /g, ' ');
canvas.onclick = (e) => {
e.preventDefault();
document.getElementById('description').innerHTML = canvas.title;
};
human.draw.tensor(face.tensor, canvas);
human.tf.dispose(face.tensor);
return canvas;
}
async function addFaces(imgEl) {
showLoader('human: busy');
const faceEl = document.getElementById('faces');
faceEl.innerHTML = '';
const res = await human.detect(imgEl);
console.log(res); // eslint-disable-line no-console
document.getElementById('description').innerHTML = `detected ${res.face.length} faces`;
for (const face of res.face) {
const canvas = addFace(face, imgEl.src.substring(0, 64));
faceEl.appendChild(canvas);
}
hideLoader();
}
function addImage(imageUri) {
const imgEl = new Image(256, 256);
imgEl.onload = () => {
const images = document.getElementById('images');
images.appendChild(imgEl); // add image if loaded ok
images.scroll(images?.offsetWidth, 0);
};
imgEl.onerror = () => console.error('addImage', { imageUri }); // eslint-disable-line no-console
imgEl.onclick = () => addFaces(imgEl);
imgEl.title = imageUri.substring(0, 64);
imgEl.src = encodeURI(imageUri);
}
async function initDragAndDrop() {
const reader = new FileReader();
reader.onload = async (e) => {
if (e.target.result.startsWith('data:image')) await addImage(e.target.result);
};
document.body.addEventListener('dragenter', (evt) => evt.preventDefault());
document.body.addEventListener('dragleave', (evt) => evt.preventDefault());
document.body.addEventListener('dragover', (evt) => evt.preventDefault());
document.body.addEventListener('drop', async (evt) => {
evt.preventDefault();
evt.dataTransfer.dropEffect = 'copy';
for (const f of evt.dataTransfer.files) reader.readAsDataURL(f);
});
document.body.onclick = (e) => {
if (e.target.localName !== 'canvas') document.getElementById('description').innerHTML = '';
};
}
async function main() {
showLoader('loading models');
await human.load();
showLoader('compiling models');
await human.warmup();
showLoader('loading images');
const images = ['group-1.jpg', 'group-2.jpg', 'group-3.jpg', 'group-4.jpg', 'group-5.jpg', 'group-6.jpg', 'group-7.jpg', 'solvay1927.jpg', 'stock-group-1.jpg', 'stock-group-2.jpg', 'stock-models-6.jpg', 'stock-models-7.jpg'];
const imageUris = images.map((a) => `../../samples/in/${a}`);
for (let i = 0; i < imageUris.length; i++) addImage(imageUris[i]);
initDragAndDrop();
hideLoader();
}
window.onload = main;

View File

@ -0,0 +1,43 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Human</title>
<!-- <meta http-equiv="content-type" content="text/html; charset=utf-8"> -->
<meta name="viewport" content="width=device-width, shrink-to-fit=yes">
<meta name="keywords" content="Human">
<meta name="application-name" content="Human">
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="theme-color" content="#000000">
<link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../../assets/icon.png">
<script src="./facedetect.js" type="module"></script>
<style>
img { object-fit: contain; }
img:hover { filter: grayscale(1); transform: scale(1.08); transition : all 0.3s ease; }
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
html { font-family: 'Lato', 'Segoe UI'; font-size: 24px; font-variant: small-caps; }
body { margin: 24px; background: black; color: white; overflow: hidden; text-align: -webkit-center; width: 100vw; height: 100vh; }
::-webkit-scrollbar { height: 8px; border: 0; border-radius: 0; }
::-webkit-scrollbar-thumb { background: grey }
::-webkit-scrollbar-track { margin: 3px; }
canvas { width: 192px; height: 192px; margin: 2px; padding: 2px; cursor: grab; transform: scale(1.00); transition : all 0.3s ease; }
canvas:hover { filter: grayscale(1); transform: scale(1.08); transition : all 0.3s ease; }
</style>
</head>
<body>
<component-loader></component-loader>
<div style="display: flex">
<div>
<div style="margin: 24px">select image to show detected faces<br>drag & drop to add your images</div>
<div id="images" style="display: flex; width: 98vw; overflow-x: auto; overflow-y: hidden; scroll-behavior: smooth"></div>
</div>
</div>
<div id="list" style="height: 10px"></div>
<div style="margin: 24px">hover or click on face to show details</div>
<div id="faces" style="overflow-y: auto"></div>
<div id="description" style="white-space: pre;"></div>
</body>
</html>

42
demo/faceid/README.md Normal file
View File

@ -0,0 +1,42 @@
# Human Face Recognition: FaceID
`faceid` runs multiple checks to validate webcam input before performing face match
Detected face image and descriptor are stored in client-side IndexDB
## Workflow
- Starts webcam
- Waits until input video contains validated face or timeout is reached
- Number of people
- Face size
- Face and gaze direction
- Detection scores
- Blink detection (including temporal check for blink speed) to verify live input
- Runs `antispoofing` optional module
- Runs `liveness` optional module
- Runs match against database of registered faces and presents best match with scores
## Notes
Both `antispoof` and `liveness` models are tiny and
designed to serve as a quick check when used together with other indicators:
- size below 1MB
- very quick inference times as they are very simple (11 ops for antispoof and 23 ops for liveness)
- trained on low-resolution inputs
### Anti-spoofing Module
- Checks if input is realistic (e.g. computer generated faces)
- Configuration: `human.config.face.antispoof`.enabled
- Result: `human.result.face[0].real` as score
### Liveness Module
- Checks if input has obvious artifacts due to recording (e.g. playing back phone recording of a face)
- Configuration: `human.config.face.liveness`.enabled
- Result: `human.result.face[0].live` as score
### Models
**FaceID** is compatible with
- `faceres.json` (default) perfoms combined age/gender/descriptor analysis
- `faceres-deep.json` higher resolution variation of `faceres`
- `insightface` alternative model for face descriptor analysis
- `mobilefacenet` alternative model for face descriptor analysis

49
demo/faceid/index.html Normal file
View File

@ -0,0 +1,49 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Human: Face Recognition</title>
<meta name="viewport" content="width=device-width" id="viewport">
<meta name="keywords" content="Human">
<meta name="application-name" content="Human">
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="theme-color" content="#000000">
<link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../../assets/icon.png">
<script src="./index.js" type="module"></script>
<style>
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
body { margin: 0; padding: 16px; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
body::-webkit-scrollbar { display: none; }
.button { padding: 2px; cursor: pointer; box-shadow: 2px 2px black; width: 64px; text-align: center; place-content: center; margin-left: 16px; height: 16px; display: none }
.ok { position: absolute; top: 64px; right: 20px; width: 150px; background-color: grey; padding: 4px; color: black; font-size: 14px }
</style>
</head>
<body>
<div style="padding: 8px">
<h1 style="margin: 0">faceid demo using human library</h1>
look directly at camera and make sure that detection passes all of the required tests noted on the right hand side of the screen<br>
if input does not satisfies tests within specific timeout, no image will be selected<br>
once face image is approved, it will be compared with existing face database<br>
you can also store face descriptor with label in a browser's indexdb for future usage<br>
<br>
<i>note: this is not equivalent to full faceid methods as used by modern mobile phones or windows hello<br>
as they rely on additional infrared sensors and depth-sensing and not just camera image for additional levels of security</i>
</div>
<canvas id="canvas" style="padding: 8px"></canvas>
<canvas id="source" style="padding: 8px"></canvas>
<video id="video" playsinline style="display: none"></video>
<pre id="log" style="padding: 8px"></pre>
<div id="match" style="display: none; padding: 8px">
<label for="name">name:</label>
<input id="name" type="text" value="" style="height: 16px; border: none; padding: 2px; margin-left: 8px">
<span id="save" class="button" style="background-color: royalblue">save</span>
<span id="delete" class="button" style="background-color: lightcoral">delete</span>
</div>
<div id="retry" class="button" style="background-color: darkslategray; width: 93%; margin-top: 32px; padding: 12px">retry</div>
<div id="ok"></div>
</body>
</html>

9
demo/faceid/index.js Normal file

File diff suppressed because one or more lines are too long

7
demo/faceid/index.js.map Normal file

File diff suppressed because one or more lines are too long

318
demo/faceid/index.ts Normal file
View File

@ -0,0 +1,318 @@
/**
* Human demo for browsers
* @default Human Library
* @summary <https://github.com/vladmandic/human>
* @author <https://github.com/vladmandic>
* @copyright <https://github.com/vladmandic>
* @license MIT
*/
import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
import * as indexDb from './indexdb'; // methods to deal with indexdb
const humanConfig = { // user configuration for human, used to fine-tune behavior
cacheSensitivity: 0.01,
modelBasePath: '../../models',
filter: { enabled: true, equalization: true }, // lets run with histogram equilizer
debug: true,
face: {
enabled: true,
detector: { rotation: true, return: true, mask: false }, // return tensor is used to get detected face image
description: { enabled: true }, // default model for face descriptor extraction is faceres
// mobilefacenet: { enabled: true, modelPath: 'https://vladmandic.github.io/human-models/models/mobilefacenet.json' }, // alternative model
// insightface: { enabled: true, modelPath: 'https://vladmandic.github.io/insightface/models/insightface-mobilenet-swish.json' }, // alternative model
iris: { enabled: true }, // needed to determine gaze direction
emotion: { enabled: false }, // not needed
antispoof: { enabled: true }, // enable optional antispoof module
liveness: { enabled: true }, // enable optional liveness module
},
body: { enabled: false },
hand: { enabled: false },
object: { enabled: false },
gesture: { enabled: true }, // parses face and iris gestures
};
// const matchOptions = { order: 2, multiplier: 1000, min: 0.0, max: 1.0 }; // for embedding model
const matchOptions = { order: 2, multiplier: 25, min: 0.2, max: 0.8 }; // for faceres model
const options = {
minConfidence: 0.6, // overal face confidence for box, face, gender, real, live
minSize: 224, // min input to face descriptor model before degradation
maxTime: 30000, // max time before giving up
blinkMin: 10, // minimum duration of a valid blink
blinkMax: 800, // maximum duration of a valid blink
threshold: 0.5, // minimum similarity
distanceMin: 0.4, // closest that face is allowed to be to the cammera in cm
distanceMax: 1.0, // farthest that face is allowed to be to the cammera in cm
mask: humanConfig.face.detector.mask,
rotation: humanConfig.face.detector.rotation,
...matchOptions,
};
const ok: Record<string, { status: boolean | undefined, val: number }> = { // must meet all rules
faceCount: { status: false, val: 0 },
faceConfidence: { status: false, val: 0 },
facingCenter: { status: false, val: 0 },
lookingCenter: { status: false, val: 0 },
blinkDetected: { status: false, val: 0 },
faceSize: { status: false, val: 0 },
antispoofCheck: { status: false, val: 0 },
livenessCheck: { status: false, val: 0 },
distance: { status: false, val: 0 },
age: { status: false, val: 0 },
gender: { status: false, val: 0 },
timeout: { status: true, val: 0 },
descriptor: { status: false, val: 0 },
elapsedMs: { status: undefined, val: 0 }, // total time while waiting for valid face
detectFPS: { status: undefined, val: 0 }, // mark detection fps performance
drawFPS: { status: undefined, val: 0 }, // mark redraw fps performance
};
const allOk = () => ok.faceCount.status
&& ok.faceSize.status
&& ok.blinkDetected.status
&& ok.facingCenter.status
&& ok.lookingCenter.status
&& ok.faceConfidence.status
&& ok.antispoofCheck.status
&& ok.livenessCheck.status
&& ok.distance.status
&& ok.descriptor.status
&& ok.age.status
&& ok.gender.status;
const current: { face: H.FaceResult | null, record: indexDb.FaceRecord | null } = { face: null, record: null }; // current face record and matched database record
const blink = { // internal timers for blink start/end/duration
start: 0,
end: 0,
time: 0,
};
// let db: Array<{ name: string, source: string, embedding: number[] }> = []; // holds loaded face descriptor database
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
human.env.perfadd = false; // is performance data showing instant or total values
human.draw.options.font = 'small-caps 18px "Lato"'; // set font used to draw labels when using draw methods
human.draw.options.lineHeight = 20;
const dom = { // grab instances of dom objects so we dont have to look them up later
video: document.getElementById('video') as HTMLVideoElement,
canvas: document.getElementById('canvas') as HTMLCanvasElement,
log: document.getElementById('log') as HTMLPreElement,
fps: document.getElementById('fps') as HTMLPreElement,
match: document.getElementById('match') as HTMLDivElement,
name: document.getElementById('name') as HTMLInputElement,
save: document.getElementById('save') as HTMLSpanElement,
delete: document.getElementById('delete') as HTMLSpanElement,
retry: document.getElementById('retry') as HTMLDivElement,
source: document.getElementById('source') as HTMLCanvasElement,
ok: document.getElementById('ok') as HTMLDivElement,
};
const timestamp = { detect: 0, draw: 0 }; // holds information used to calculate performance and possible memory leaks
let startTime = 0;
const log = (...msg) => { // helper method to output messages
dom.log.innerText += msg.join(' ') + '\n';
console.log(...msg); // eslint-disable-line no-console
};
async function webCam() { // initialize webcam
// @ts-ignore resizeMode is not yet defined in tslib
const cameraOptions: MediaStreamConstraints = { audio: false, video: { facingMode: 'user', resizeMode: 'none', width: { ideal: document.body.clientWidth } } };
const stream: MediaStream = await navigator.mediaDevices.getUserMedia(cameraOptions);
const ready = new Promise((resolve) => { dom.video.onloadeddata = () => resolve(true); });
dom.video.srcObject = stream;
void dom.video.play();
await ready;
dom.canvas.width = dom.video.videoWidth;
dom.canvas.height = dom.video.videoHeight;
dom.canvas.style.width = '50%';
dom.canvas.style.height = '50%';
if (human.env.initial) log('video:', dom.video.videoWidth, dom.video.videoHeight, '|', stream.getVideoTracks()[0].label);
dom.canvas.onclick = () => { // pause when clicked on screen and resume on next click
if (dom.video.paused) void dom.video.play();
else dom.video.pause();
};
}
async function detectionLoop() { // main detection loop
if (!dom.video.paused) {
if (current.face?.tensor) human.tf.dispose(current.face.tensor); // dispose previous tensor
await human.detect(dom.video); // actual detection; were not capturing output in a local variable as it can also be reached via human.result
const now = human.now();
ok.detectFPS.val = Math.round(10000 / (now - timestamp.detect)) / 10;
timestamp.detect = now;
requestAnimationFrame(detectionLoop); // start new frame immediately
}
}
function drawValidationTests() {
let y = 32;
for (const [key, val] of Object.entries(ok)) {
let el = document.getElementById(`ok-${key}`);
if (!el) {
el = document.createElement('div');
el.id = `ok-${key}`;
el.innerText = key;
el.className = 'ok';
el.style.top = `${y}px`;
dom.ok.appendChild(el);
}
if (typeof val.status === 'boolean') el.style.backgroundColor = val.status ? 'lightgreen' : 'lightcoral';
const status = val.status ? 'ok' : 'fail';
el.innerText = `${key}: ${val.val === 0 ? status : val.val}`;
y += 28;
}
}
async function validationLoop(): Promise<H.FaceResult> { // main screen refresh loop
const interpolated = human.next(human.result); // smoothen result using last-known results
human.draw.canvas(dom.video, dom.canvas); // draw canvas to screen
await human.draw.all(dom.canvas, interpolated); // draw labels, boxes, lines, etc.
const now = human.now();
ok.drawFPS.val = Math.round(10000 / (now - timestamp.draw)) / 10;
timestamp.draw = now;
ok.faceCount.val = human.result.face.length;
ok.faceCount.status = ok.faceCount.val === 1; // must be exactly detected face
if (ok.faceCount.status) { // skip the rest if no face
const gestures: string[] = Object.values(human.result.gesture).map((gesture: H.GestureResult) => gesture.gesture); // flatten all gestures
if (gestures.includes('blink left eye') || gestures.includes('blink right eye')) blink.start = human.now(); // blink starts when eyes get closed
if (blink.start > 0 && !gestures.includes('blink left eye') && !gestures.includes('blink right eye')) blink.end = human.now(); // if blink started how long until eyes are back open
ok.blinkDetected.status = ok.blinkDetected.status || (Math.abs(blink.end - blink.start) > options.blinkMin && Math.abs(blink.end - blink.start) < options.blinkMax);
if (ok.blinkDetected.status && blink.time === 0) blink.time = Math.trunc(blink.end - blink.start);
ok.facingCenter.status = gestures.includes('facing center');
ok.lookingCenter.status = gestures.includes('looking center'); // must face camera and look at camera
ok.faceConfidence.val = human.result.face[0].faceScore || human.result.face[0].boxScore || 0;
ok.faceConfidence.status = ok.faceConfidence.val >= options.minConfidence;
ok.antispoofCheck.val = human.result.face[0].real || 0;
ok.antispoofCheck.status = ok.antispoofCheck.val >= options.minConfidence;
ok.livenessCheck.val = human.result.face[0].live || 0;
ok.livenessCheck.status = ok.livenessCheck.val >= options.minConfidence;
ok.faceSize.val = Math.min(human.result.face[0].box[2], human.result.face[0].box[3]);
ok.faceSize.status = ok.faceSize.val >= options.minSize;
ok.distance.val = human.result.face[0].distance || 0;
ok.distance.status = (ok.distance.val >= options.distanceMin) && (ok.distance.val <= options.distanceMax);
ok.descriptor.val = human.result.face[0].embedding?.length || 0;
ok.descriptor.status = ok.descriptor.val > 0;
ok.age.val = human.result.face[0].age || 0;
ok.age.status = ok.age.val > 0;
ok.gender.val = human.result.face[0].genderScore || 0;
ok.gender.status = ok.gender.val >= options.minConfidence;
}
// run again
ok.timeout.status = ok.elapsedMs.val <= options.maxTime;
drawValidationTests();
if (allOk() || !ok.timeout.status) { // all criteria met
dom.video.pause();
return human.result.face[0];
}
ok.elapsedMs.val = Math.trunc(human.now() - startTime);
return new Promise((resolve) => {
setTimeout(async () => {
await validationLoop(); // run validation loop until conditions are met
resolve(human.result.face[0]); // recursive promise resolve
}, 30); // use to slow down refresh from max refresh rate to target of 30 fps
});
}
async function saveRecords() {
if (dom.name.value.length > 0) {
const image = dom.canvas.getContext('2d')?.getImageData(0, 0, dom.canvas.width, dom.canvas.height) as ImageData;
const rec = { id: 0, name: dom.name.value, descriptor: current.face?.embedding as number[], image };
await indexDb.save(rec);
log('saved face record:', rec.name, 'descriptor length:', current.face?.embedding?.length);
log('known face records:', await indexDb.count());
} else {
log('invalid name');
}
}
async function deleteRecord() {
if (current.record && current.record.id > 0) {
await indexDb.remove(current.record);
}
}
async function detectFace() {
dom.canvas.style.height = '';
dom.canvas.getContext('2d')?.clearRect(0, 0, options.minSize, options.minSize);
if (!current?.face?.tensor || !current?.face?.embedding) return false;
console.log('face record:', current.face); // eslint-disable-line no-console
log(`detected face: ${current.face.gender} ${current.face.age || 0}y distance ${100 * (current.face.distance || 0)}cm/${Math.round(100 * (current.face.distance || 0) / 2.54)}in`);
await human.draw.tensor(current.face.tensor, dom.canvas);
if (await indexDb.count() === 0) {
log('face database is empty: nothing to compare face with');
document.body.style.background = 'black';
dom.delete.style.display = 'none';
return false;
}
const db = await indexDb.load();
const descriptors = db.map((rec) => rec.descriptor).filter((desc) => desc.length > 0);
const res = human.match.find(current.face.embedding, descriptors, matchOptions);
current.record = db[res.index] || null;
if (current.record) {
log(`best match: ${current.record.name} | id: ${current.record.id} | similarity: ${Math.round(1000 * res.similarity) / 10}%`);
dom.name.value = current.record.name;
dom.source.style.display = '';
dom.source.getContext('2d')?.putImageData(current.record.image, 0, 0);
}
document.body.style.background = res.similarity > options.threshold ? 'darkgreen' : 'maroon';
return res.similarity > options.threshold;
}
async function main() { // main entry point
ok.faceCount.status = false;
ok.faceConfidence.status = false;
ok.facingCenter.status = false;
ok.blinkDetected.status = false;
ok.faceSize.status = false;
ok.antispoofCheck.status = false;
ok.livenessCheck.status = false;
ok.age.status = false;
ok.gender.status = false;
ok.elapsedMs.val = 0;
dom.match.style.display = 'none';
dom.retry.style.display = 'none';
dom.source.style.display = 'none';
dom.canvas.style.height = '50%';
document.body.style.background = 'black';
await webCam();
await detectionLoop(); // start detection loop
startTime = human.now();
current.face = await validationLoop(); // start validation loop
dom.canvas.width = current.face?.tensor?.shape[1] || options.minSize;
dom.canvas.height = current.face?.tensor?.shape[0] || options.minSize;
dom.source.width = dom.canvas.width;
dom.source.height = dom.canvas.height;
dom.canvas.style.width = '';
dom.match.style.display = 'flex';
dom.save.style.display = 'flex';
dom.delete.style.display = 'flex';
dom.retry.style.display = 'block';
if (!allOk()) { // is all criteria met?
log('did not find valid face');
return false;
}
return detectFace();
}
async function init() {
log('human version:', human.version, '| tfjs version:', human.tf.version['tfjs-core']);
log('options:', JSON.stringify(options).replace(/{|}|"|\[|\]/g, '').replace(/,/g, ' '));
log('initializing webcam...');
await webCam(); // start webcam
log('loading human models...');
await human.load(); // preload all models
log('initializing human...');
log('face embedding model:', humanConfig.face.description.enabled ? 'faceres' : '', humanConfig.face['mobilefacenet']?.enabled ? 'mobilefacenet' : '', humanConfig.face['insightface']?.enabled ? 'insightface' : '');
log('loading face database...');
log('known face records:', await indexDb.count());
dom.retry.addEventListener('click', main);
dom.save.addEventListener('click', saveRecords);
dom.delete.addEventListener('click', deleteRecord);
await human.warmup(); // warmup function to initialize backend for future faster detection
await main();
}
window.onload = init;

65
demo/faceid/indexdb.ts Normal file
View File

@ -0,0 +1,65 @@
let db: IDBDatabase; // instance of indexdb
const database = 'human';
const table = 'person';
export interface FaceRecord { id: number, name: string, descriptor: number[], image: ImageData }
const log = (...msg) => console.log('indexdb', ...msg); // eslint-disable-line no-console
export async function open() {
if (db) return true;
return new Promise((resolve) => {
const request: IDBOpenDBRequest = indexedDB.open(database, 1);
request.onerror = (evt) => log('error:', evt);
request.onupgradeneeded = (evt: IDBVersionChangeEvent) => { // create if doesnt exist
log('create:', evt.target);
db = (evt.target as IDBOpenDBRequest).result;
db.createObjectStore(table, { keyPath: 'id', autoIncrement: true });
};
request.onsuccess = (evt) => { // open
db = (evt.target as IDBOpenDBRequest).result;
log('open:', db);
resolve(true);
};
});
}
export async function load(): Promise<FaceRecord[]> {
const faceDB: FaceRecord[] = [];
if (!db) await open(); // open or create if not already done
return new Promise((resolve) => {
const cursor: IDBRequest = db.transaction([table], 'readwrite').objectStore(table).openCursor(null, 'next');
cursor.onerror = (evt) => log('load error:', evt);
cursor.onsuccess = (evt) => {
if ((evt.target as IDBRequest).result) {
faceDB.push((evt.target as IDBRequest).result.value);
(evt.target as IDBRequest).result.continue();
} else {
resolve(faceDB);
}
};
});
}
export async function count(): Promise<number> {
if (!db) await open(); // open or create if not already done
return new Promise((resolve) => {
const store: IDBRequest = db.transaction([table], 'readwrite').objectStore(table).count();
store.onerror = (evt) => log('count error:', evt);
store.onsuccess = () => resolve(store.result);
});
}
export async function save(faceRecord: FaceRecord) {
if (!db) await open(); // open or create if not already done
const newRecord = { name: faceRecord.name, descriptor: faceRecord.descriptor, image: faceRecord.image }; // omit id as its autoincrement
db.transaction([table], 'readwrite').objectStore(table).put(newRecord);
log('save:', newRecord);
}
export async function remove(faceRecord: FaceRecord) {
if (!db) await open(); // open or create if not already done
db.transaction([table], 'readwrite').objectStore(table).delete(faceRecord.id); // delete based on id
log('delete:', faceRecord);
}

84
demo/facematch/README.md Normal file
View File

@ -0,0 +1,84 @@
# Human Face Recognition & Matching
- **Browser** demo: `index.html` & `facematch.js`:
Loads sample images, extracts faces and runs match and similarity analysis
- **NodeJS** demo `node-match.js` and `node-match-worker.js`
Advanced multithreading demo that runs number of worker threads to process high number of matches
- Sample face database: `faces.json`
<br>
## Browser Face Recognition Demo
- `demo/facematch`: Demo for Browsers that uses all face description and embedding features to
detect, extract and identify all faces plus calculate similarity between them
It highlights functionality such as:
- Loading images
- Extracting faces from images
- Calculating face embedding descriptors
- Finding face similarity and sorting them by similarity
- Finding best face match based on a known list of faces and printing matches
<br>
## NodeJS Multi-Threading Match Solution
### Methods and Properties in `node-match`
- `createBuffer`: create shared buffer array
single copy of data regardless of number of workers
fixed size based on `options.dbMax`
- `appendRecord`: add additional batch of descriptors to buffer
can append batch of records to buffer at anytime
workers are informed of the new content after append has been completed
- `workersStart`: start or expand pool of `threadPoolSize` workers
each worker runs `node-match-worker` and listens for messages from main thread
can shutdown workers or create additional worker threads on-the-fly
safe against workers that exit
- `workersClose`: close workers in a pool
first request workers to exit then terminate after timeout
- `match`: dispach a match job to a worker
returns first match that satisfies `minThreshold`
assigment to workers using round-robin
since timing for each job is near-fixed and predictable
- `getDescriptor`: get descriptor array for a given id from a buffer
- `fuzDescriptor`: small randomize descriptor content for harder match
- `getLabel`: fetch label for resolved descriptor index
- `loadDB`: load face database from a JSON file `dbFile`
extracts descriptors and adds them to buffer
extracts labels and maintains them in main thread
for test purposes loads same database `dbFact` times to create a very large database
`node-match` runs in a listens for messages from workers until `maxJobs` have been reached
### Performance
Linear performance decrease that depends on number of records in database
Non-linear performance that increases with number of worker threads due to communication overhead
- Face dataase with 10k records:
> threadPoolSize: 1 => ~60 ms / match job
> threadPoolSize: 6 => ~25 ms / match job
- Face database with 50k records:
> threadPoolSize: 1 => ~300 ms / match job
> threadPoolSize: 6 => ~100 ms / match job
- Face database with 100k records:
> threadPoolSize: 1 => ~600 ms / match job
> threadPoolSize: 6 => ~200 ms / match job
### Example
> node node-match
<!-- eslint-skip -->
```js
INFO: options: { dbFile: './faces.json', dbMax: 10000, threadPoolSize: 6, workerSrc: './node-match-worker.js', debug: false, minThreshold: 0.9, descLength: 1024 }
DATA: created shared buffer: { maxDescriptors: 10000, totalBytes: 40960000, totalElements: 10240000 }
DATA: db loaded: { existingRecords: 0, newRecords: 5700 }
INFO: starting worker thread pool: { totalWorkers: 6, alreadyActive: 0 }
STATE: submitted: { matchJobs: 100, poolSize: 6, activeWorkers: 6 }
STATE: { matchJobsFinished: 100, totalTimeMs: 1769, averageTimeMs: 17.69 }
INFO: closing workers: { poolSize: 6, activeWorkers: 6 }
```

View File

@ -1,34 +1,36 @@
// @ts-nocheck // typescript checks disabled as this is pure javascript
/** /**
* Human demo for browsers * Human demo for browsers
* *
* Demo for face descriptor analysis and face simmilarity analysis * Demo for face descriptor analysis and face similarity analysis
*/ */
import Human from '../../dist/human.esm.js'; /** @type {Human} */
import { Human } from '../../dist/human.esm.js';
const userConfig = { const userConfig = {
backend: 'wasm', backend: 'humangl',
async: false, async: true,
warmup: 'none', warmup: 'none',
cacheSimilarity: 0, cacheSensitivity: 0.01,
debug: true, debug: true,
modelBasePath: '../../models/', modelBasePath: '../../models/',
// wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.9.0/dist/', deallocate: true,
filter: {
enabled: true,
equalization: true,
width: 0,
},
face: { face: {
enabled: true, enabled: true,
detector: { rotation: true, return: true, maxDetected: 20 }, detector: { return: true, rotation: true, maxDetected: 50, iouThreshold: 0.01, minConfidence: 0.2 },
mesh: { enabled: true }, mesh: { enabled: true },
embedding: { enabled: false }, iris: { enabled: false },
iris: { enabled: true },
emotion: { enabled: true }, emotion: { enabled: true },
description: { enabled: true }, description: { enabled: true },
}, },
hand: { enabled: false }, hand: { enabled: false },
gesture: { enabled: true }, gesture: { enabled: false },
body: { enabled: false }, body: { enabled: false },
filter: { enabled: true },
segmentation: { enabled: false }, segmentation: { enabled: false },
}; };
@ -42,8 +44,7 @@ const minScore = 0.4;
function log(...msg) { function log(...msg) {
const dt = new Date(); const dt = new Date();
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`; const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
// eslint-disable-next-line no-console console.log(ts, ...msg); // eslint-disable-line no-console
console.log(ts, ...msg);
} }
function title(msg) { function title(msg) {
@ -62,27 +63,16 @@ async function loadFaceMatchDB() {
} }
} }
async function SelectFaceCanvas(face) { async function selectFaceCanvas(face) {
// if we have face image tensor, enhance it and display it // if we have face image tensor, enhance it and display it
let embedding; let embedding;
document.getElementById('orig').style.filter = 'blur(16px)'; document.getElementById('orig').style.filter = 'blur(16px)';
if (face.tensor) { if (face.tensor) {
title('Sorting Faces by Similarity'); title('Sorting Faces by Similarity');
const enhanced = human.enhance(face);
if (enhanced) {
const c = document.getElementById('orig'); const c = document.getElementById('orig');
const squeeze = human.tf.squeeze(enhanced); await human.draw.tensor(face.tensor, c);
const normalize = human.tf.div(squeeze, 255);
await human.tf.browser.toPixels(normalize, c);
human.tf.dispose(enhanced);
human.tf.dispose(squeeze);
human.tf.dispose(normalize);
const ctx = c.getContext('2d');
ctx.font = 'small-caps 0.4rem "Lato"';
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
}
const arr = db.map((rec) => rec.embedding); const arr = db.map((rec) => rec.embedding);
const res = await human.match(face.embedding, arr); const res = await human.match.find(face.embedding, arr);
log('Match:', db[res.index].name); log('Match:', db[res.index].name);
const emotion = face.emotion[0] ? `${Math.round(100 * face.emotion[0].score)}% ${face.emotion[0].emotion}` : 'N/A'; const emotion = face.emotion[0] ? `${Math.round(100 * face.emotion[0].score)}% ${face.emotion[0].emotion}` : 'N/A';
document.getElementById('desc').innerHTML = ` document.getElementById('desc').innerHTML = `
@ -103,11 +93,11 @@ async function SelectFaceCanvas(face) {
for (const canvas of canvases) { for (const canvas of canvases) {
// calculate similarity from selected face to current one in the loop // calculate similarity from selected face to current one in the loop
const current = all[canvas.tag.sample][canvas.tag.face]; const current = all[canvas.tag.sample][canvas.tag.face];
const similarity = human.similarity(face.embedding, current.embedding); const similarity = human.match.similarity(face.embedding, current.embedding);
canvas.tag.similarity = similarity; canvas.tag.similarity = similarity;
// get best match // get best match
// draw the canvas // draw the canvas
await human.tf.browser.toPixels(current.tensor, canvas); await human.draw.tensor(current.tensor, canvas);
const ctx = canvas.getContext('2d'); const ctx = canvas.getContext('2d');
ctx.font = 'small-caps 1rem "Lato"'; ctx.font = 'small-caps 1rem "Lato"';
ctx.fillStyle = 'rgba(0, 0, 0, 1)'; ctx.fillStyle = 'rgba(0, 0, 0, 1)';
@ -118,10 +108,10 @@ async function SelectFaceCanvas(face) {
ctx.fillText(`${current.age}y ${(100 * (current.genderScore || 0)).toFixed(1)}% ${current.gender}`, 4, canvas.height - 6); ctx.fillText(`${current.age}y ${(100 * (current.genderScore || 0)).toFixed(1)}% ${current.gender}`, 4, canvas.height - 6);
// identify person // identify person
ctx.font = 'small-caps 1rem "Lato"'; ctx.font = 'small-caps 1rem "Lato"';
const start = performance.now(); const start = human.now();
const arr = db.map((rec) => rec.embedding); const arr = db.map((rec) => rec.embedding);
const res = await human.match(face.embedding, arr); const res = await human.match.find(current.embedding, arr);
time += (performance.now() - start); time += (human.now() - start);
if (res.similarity > minScore) ctx.fillText(`DB: ${(100 * res.similarity).toFixed(1)}% ${db[res.index].name}`, 4, canvas.height - 30); if (res.similarity > minScore) ctx.fillText(`DB: ${(100 * res.similarity).toFixed(1)}% ${db[res.index].name}`, 4, canvas.height - 30);
} }
@ -135,12 +125,11 @@ async function SelectFaceCanvas(face) {
title('Selected Face'); title('Selected Face');
} }
async function AddFaceCanvas(index, res, fileName) { async function addFaceCanvas(index, res, fileName) {
all[index] = res.face; all[index] = res.face;
let ok = false;
for (const i in res.face) { for (const i in res.face) {
if (res.face[i].mesh.length === 0) continue; if (!res.face[i].tensor) continue; // did not get valid results
ok = true; if ((res.face[i].faceScore || 0) < human.config.face.detector.minConfidence) continue; // face analysis score too low
all[index][i].fileName = fileName; all[index][i].fileName = fileName;
const canvas = document.createElement('canvas'); const canvas = document.createElement('canvas');
canvas.tag = { sample: index, face: i, source: fileName }; canvas.tag = { sample: index, face: i, source: fileName };
@ -155,40 +144,37 @@ async function AddFaceCanvas(index, res, fileName) {
gender: ${Math.round(100 * res.face[i].genderScore)}% ${res.face[i].gender} gender: ${Math.round(100 * res.face[i].genderScore)}% ${res.face[i].gender}
emotion: ${emotion} emotion: ${emotion}
`.replace(/ /g, ' '); `.replace(/ /g, ' ');
// mouse click on any face canvas triggers analysis await human.draw.tensor(res.face[i].tensor, canvas);
canvas.addEventListener('click', (evt) => {
log('Select:', 'Image:', evt.target.tag.sample, 'Face:', evt.target.tag.face, 'Source:', evt.target.tag.source, all[evt.target.tag.sample][evt.target.tag.face]);
SelectFaceCanvas(all[evt.target.tag.sample][evt.target.tag.face]);
});
// if we actually got face image tensor, draw canvas with that face
if (res.face[i].tensor) {
await human.tf.browser.toPixels(res.face[i].tensor, canvas);
document.getElementById('faces').appendChild(canvas);
const ctx = canvas.getContext('2d'); const ctx = canvas.getContext('2d');
if (!ctx) return;
ctx.font = 'small-caps 0.8rem "Lato"'; ctx.font = 'small-caps 0.8rem "Lato"';
ctx.fillStyle = 'rgba(255, 255, 255, 1)'; ctx.fillStyle = 'rgba(255, 255, 255, 1)';
ctx.fillText(`${res.face[i].age}y ${(100 * (res.face[i].genderScore || 0)).toFixed(1)}% ${res.face[i].gender}`, 4, canvas.height - 6); ctx.fillText(`${res.face[i].age}y ${(100 * (res.face[i].genderScore || 0)).toFixed(1)}% ${res.face[i].gender}`, 4, canvas.height - 6);
const arr = db.map((rec) => rec.embedding); const arr = db.map((rec) => rec.embedding);
const result = await human.match(res.face[i].embedding, arr); const result = human.match.find(res.face[i].embedding, arr);
ctx.font = 'small-caps 1rem "Lato"'; ctx.font = 'small-caps 1rem "Lato"';
if (result.similarity && res.similarity > minScore) ctx.fillText(`${(100 * result.similarity).toFixed(1)}% ${db[result.index].name}`, 4, canvas.height - 30); if (result.similarity && res.similarity > minScore) ctx.fillText(`${(100 * result.similarity).toFixed(1)}% ${db[result.index].name}`, 4, canvas.height - 30);
document.getElementById('faces').appendChild(canvas);
canvas.addEventListener('click', (evt) => {
log('Select:', 'Image:', evt.target.tag.sample, 'Face:', evt.target.tag.face, 'Source:', evt.target.tag.source, all[evt.target.tag.sample][evt.target.tag.face]);
selectFaceCanvas(all[evt.target.tag.sample][evt.target.tag.face]);
});
} }
}
return ok;
} }
async function AddImageElement(index, image, length) { async function addImageElement(index, image, length) {
const faces = all.reduce((prev, curr) => prev += curr.length, 0); const faces = all.reduce((prev, curr) => prev += curr.length, 0);
title(`Analyzing Input Images<br> ${Math.round(100 * index / length)}% [${index} / ${length}]<br>Found ${faces} Faces`); title(`Analyzing Input Images<br> ${Math.round(100 * index / length)}% [${index} / ${length}]<br>Found ${faces} Faces`);
return new Promise((resolve) => { return new Promise((resolve) => {
const img = new Image(128, 128); const img = new Image(128, 128);
img.onload = () => { // must wait until image is loaded img.onload = () => { // must wait until image is loaded
human.detect(img, userConfig).then(async (res) => { document.getElementById('images').appendChild(img); // and finally we can add it
const ok = await AddFaceCanvas(index, res, image); // then wait until image is analyzed human.detect(img, userConfig)
// log('Add image:', index + 1, image, 'faces:', res.face.length); .then((res) => { // eslint-disable-line promise/always-return
if (ok) document.getElementById('images').appendChild(img); // and finally we can add it addFaceCanvas(index, res, image); // then wait until image is analyzed
resolve(true); resolve(true);
}); })
.catch(() => log('human detect error'));
}; };
img.onerror = () => { img.onerror = () => {
log('Add image error:', index + 1, image); log('Add image error:', index + 1, image);
@ -199,7 +185,7 @@ async function AddImageElement(index, image, length) {
}); });
} }
async function createFaceMatchDB() { function createFaceMatchDB() {
log('Creating Faces DB...'); log('Creating Faces DB...');
for (const image of all) { for (const image of all) {
for (const face of image) db.push({ name: 'unknown', source: face.fileName, embedding: face.embedding }); for (const face of image) db.push({ name: 'unknown', source: face.fileName, embedding: face.embedding });
@ -226,36 +212,46 @@ async function main() {
// could not dynamically enumerate images so using static list // could not dynamically enumerate images so using static list
if (images.length === 0) { if (images.length === 0) {
images = [ images = [
'ai-body.jpg', 'ai-upper.jpg', 'ai-face.jpg', 'ai-upper.jpg', 'ai-body.jpg', 'solvay1927.jpg',
'person-carolina.jpg', 'person-celeste.jpg', 'person-leila1.jpg', 'person-leila2.jpg', 'person-lexi.jpg', 'person-linda.jpg', 'person-nicole.jpg', 'person-tasia.jpg',
'person-tetiana.jpg', 'person-vlado1.jpg', 'person-vlado5.jpg', 'person-vlado.jpg', 'person-christina.jpg', 'person-lauren.jpg',
'group-1.jpg', 'group-2.jpg', 'group-3.jpg', 'group-4.jpg', 'group-5.jpg', 'group-6.jpg', 'group-7.jpg', 'group-1.jpg', 'group-2.jpg', 'group-3.jpg', 'group-4.jpg', 'group-5.jpg', 'group-6.jpg', 'group-7.jpg',
'daz3d-brianna.jpg', 'daz3d-chiyo.jpg', 'daz3d-cody.jpg', 'daz3d-drew-01.jpg', 'daz3d-drew-02.jpg', 'daz3d-ella-01.jpg', 'daz3d-ella-02.jpg', 'daz3d-gillian.jpg', 'person-celeste.jpg', 'person-christina.jpg', 'person-lauren.jpg', 'person-lexi.jpg', 'person-linda.jpg', 'person-nicole.jpg', 'person-tasia.jpg', 'person-tetiana.jpg', 'person-vlado.jpg', 'person-vlado1.jpg', 'person-vlado5.jpg',
'daz3d-hye-01.jpg', 'daz3d-hye-02.jpg', 'daz3d-kaia.jpg', 'daz3d-karen.jpg', 'daz3d-kiaria-01.jpg', 'daz3d-kiaria-02.jpg', 'daz3d-lilah-01.jpg', 'daz3d-lilah-02.jpg', 'stock-group-1.jpg', 'stock-group-2.jpg',
'daz3d-lilah-03.jpg', 'daz3d-lila.jpg', 'daz3d-lindsey.jpg', 'daz3d-megah.jpg', 'daz3d-selina-01.jpg', 'daz3d-selina-02.jpg', 'daz3d-snow.jpg', 'stock-models-1.jpg', 'stock-models-2.jpg', 'stock-models-3.jpg', 'stock-models-4.jpg', 'stock-models-5.jpg', 'stock-models-6.jpg', 'stock-models-7.jpg', 'stock-models-8.jpg', 'stock-models-9.jpg',
'daz3d-sunshine.jpg', 'daz3d-taia.jpg', 'daz3d-tuesday-01.jpg', 'daz3d-tuesday-02.jpg', 'daz3d-tuesday-03.jpg', 'daz3d-zoe.jpg', 'daz3d-ginnifer.jpg', 'stock-teen-1.jpg', 'stock-teen-2.jpg', 'stock-teen-3.jpg', 'stock-teen-4.jpg', 'stock-teen-5.jpg', 'stock-teen-6.jpg', 'stock-teen-7.jpg', 'stock-teen-8.jpg',
'daz3d-_emotions01.jpg', 'daz3d-_emotions02.jpg', 'daz3d-_emotions03.jpg', 'daz3d-_emotions04.jpg', 'daz3d-_emotions05.jpg', 'stock-models-10.jpg', 'stock-models-11.jpg', 'stock-models-12.jpg', 'stock-models-13.jpg', 'stock-models-14.jpg', 'stock-models-15.jpg', 'stock-models-16.jpg',
'cgi-model-1.jpg', 'cgi-model-2.jpg', 'cgi-model-3.jpg', 'cgi-model-4.jpg', 'cgi-model-5.jpg', 'cgi-model-6.jpg', 'cgi-model-7.jpg', 'cgi-model-8.jpg', 'cgi-model-9.jpg',
'cgi-model-10.jpg', 'cgi-model-11.jpg', 'cgi-model-12.jpg', 'cgi-model-13.jpg', 'cgi-model-14.jpg', 'cgi-model-15.jpg', 'cgi-model-18.jpg', 'cgi-model-19.jpg',
'cgi-model-20.jpg', 'cgi-model-21.jpg', 'cgi-model-22.jpg', 'cgi-model-23.jpg', 'cgi-model-24.jpg', 'cgi-model-25.jpg', 'cgi-model-26.jpg', 'cgi-model-27.jpg', 'cgi-model-28.jpg', 'cgi-model-29.jpg',
'cgi-model-30.jpg', 'cgi-model-31.jpg', 'cgi-model-33.jpg', 'cgi-model-34.jpg',
'cgi-multiangle-1.jpg', 'cgi-multiangle-2.jpg', 'cgi-multiangle-3.jpg', 'cgi-multiangle-4.jpg', 'cgi-multiangle-6.jpg', 'cgi-multiangle-7.jpg', 'cgi-multiangle-8.jpg', 'cgi-multiangle-9.jpg', 'cgi-multiangle-10.jpg', 'cgi-multiangle-11.jpg',
'stock-emotions-a-1.jpg', 'stock-emotions-a-2.jpg', 'stock-emotions-a-3.jpg', 'stock-emotions-a-4.jpg', 'stock-emotions-a-5.jpg', 'stock-emotions-a-6.jpg', 'stock-emotions-a-7.jpg', 'stock-emotions-a-8.jpg',
'stock-emotions-b-1.jpg', 'stock-emotions-b-2.jpg', 'stock-emotions-b-3.jpg', 'stock-emotions-b-4.jpg', 'stock-emotions-b-5.jpg', 'stock-emotions-b-6.jpg', 'stock-emotions-b-7.jpg', 'stock-emotions-b-8.jpg',
]; ];
// add prefix for gitpages // add prefix for gitpages
images = images.map((a) => `/human/samples/in/${a}`); images = images.map((a) => `../../samples/in/${a}`);
log('Adding static image list:', images); log('Adding static image list:', images);
} else { } else {
log('Discovered images:', images); log('Discovered images:', images);
} }
// download and analyze all images // images = ['/samples/in/person-lexi.jpg', '/samples/in/person-carolina.jpg', '/samples/in/solvay1927.jpg'];
for (let i = 0; i < images.length; i++) await AddImageElement(i, images[i], images.length);
const t0 = human.now();
for (let i = 0; i < images.length; i++) await addImageElement(i, images[i], images.length);
const t1 = human.now();
// print stats // print stats
const num = all.reduce((prev, cur) => prev += cur.length, 0); const num = all.reduce((prev, cur) => prev += cur.length, 0);
log('Extracted faces:', num, 'from images:', all.length); log('Extracted faces:', num, 'from images:', all.length, 'time:', Math.round(t1 - t0));
log(human.tf.engine().memory()); log(human.tf.engine().memory());
// if we didn't download db, generate it from current faces // if we didn't download db, generate it from current faces
if (!db || db.length === 0) await createFaceMatchDB(); if (!db || db.length === 0) createFaceMatchDB();
title(''); title('');
log('Ready'); log('Ready');
human.validate(userConfig);
human.match.similarity([], []);
} }
window.onload = main; window.onload = main;

File diff suppressed because one or more lines are too long

View File

@ -1,8 +1,9 @@
<!DOCTYPE html> <!DOCTYPE html>
<html lang="en"> <html lang="en">
<head> <head>
<meta charset="utf-8">
<title>Human</title> <title>Human</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8"> <!-- <meta http-equiv="content-type" content="text/html; charset=utf-8"> -->
<meta name="viewport" content="width=device-width, shrink-to-fit=yes"> <meta name="viewport" content="width=device-width, shrink-to-fit=yes">
<meta name="keywords" content="Human"> <meta name="keywords" content="Human">
<meta name="application-name" content="Human"> <meta name="application-name" content="Human">

View File

@ -0,0 +1,76 @@
/**
* Runs in a worker thread started by `node-match` demo app
*
*/
const threads = require('worker_threads');
let debug = false;
/** @type SharedArrayBuffer */
let buffer;
/** @type Float32Array */
let view;
let threshold = 0;
let records = 0;
const descLength = 1024; // descriptor length in bytes
function distance(descBuffer, index, options = { order: 2, multiplier: 20 }) {
const descriptor = new Float32Array(descBuffer);
let sum = 0;
for (let i = 0; i < descriptor.length; i++) {
const diff = (options.order === 2) ? (descriptor[i] - view[index * descLength + i]) : (Math.abs(descriptor[i] - view[index * descLength + i]));
sum += (options.order === 2) ? (diff * diff) : (diff ** options.order);
}
return (options.multiplier || 20) * sum;
}
function match(descBuffer, options = { order: 2, multiplier: 20 }) {
let best = Number.MAX_SAFE_INTEGER;
let index = -1;
for (let i = 0; i < records; i++) {
const res = distance(descBuffer, i, { order: options.order, multiplier: options.multiplier });
if (res < best) {
best = res;
index = i;
}
if (best < threshold || best === 0) break; // short circuit
}
best = (options.order === 2) ? Math.sqrt(best) : best ** (1 / options.order);
const similarity = Math.round(100 * Math.max(0, 100 - best) / 100.0) / 100;
return { index, distance: best, similarity };
}
threads.parentPort?.on('message', (msg) => {
if (typeof msg.descriptor !== 'undefined') { // actual work order to find a match
const t0 = performance.now();
const result = match(msg.descriptor);
const t1 = performance.now();
threads.parentPort?.postMessage({ request: msg.request, time: Math.trunc(t1 - t0), ...result });
return; // short circuit
}
if (msg instanceof SharedArrayBuffer) { // called only once to receive reference to shared array buffer
buffer = msg;
view = new Float32Array(buffer); // initialize f64 view into buffer
if (debug) threads.parentPort?.postMessage(`buffer: ${buffer.byteLength}`);
}
if (typeof msg.records !== 'undefined') { // recived every time when number of records changes
records = msg.records;
if (debug) threads.parentPort?.postMessage(`records: ${records}`);
}
if (typeof msg.debug !== 'undefined') { // set verbose logging
debug = msg.debug;
// if (debug) threads.parentPort?.postMessage(`debug: ${debug}`);
}
if (typeof msg.threshold !== 'undefined') { // set minimum similarity threshold
threshold = msg.threshold;
// if (debug) threads.parentPort?.postMessage(`threshold: ${threshold}`);
}
if (typeof msg.shutdown !== 'undefined') { // got message to close worker
if (debug) threads.parentPort?.postMessage('shutting down');
process.exit(0); // eslint-disable-line no-process-exit
}
});
if (debug) threads.parentPort?.postMessage('started');

View File

@ -0,0 +1,184 @@
/**
* Human demo app for NodeJS that generates random facial descriptors
* and uses NodeJS multi-threading to start multiple threads for face matching
* uses `node-match-worker.js` to perform actual face matching analysis
*/
const fs = require('fs');
const path = require('path');
const threads = require('worker_threads');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// global optinos
const options = {
dbFile: 'demo/facematch/faces.json', // sample face db
dbMax: 10000, // maximum number of records to hold in memory
threadPoolSize: 12, // number of worker threads to create in thread pool
workerSrc: './node-match-worker.js', // code that executes in the worker thread
debug: true, // verbose messages
minThreshold: 0.5, // match returns first record that meets the similarity threshold, set to 0 to always scan all records
descLength: 1024, // descriptor length
};
// test options
const testOptions = {
dbFact: 175, // load db n times to fake huge size
maxJobs: 200, // exit after processing this many jobs
fuzDescriptors: true, // randomize descriptor content before match for harder jobs
};
// global data structures
const data = {
/** @type string[] */
labels: [], // array of strings, length of array serves as overal number of records so has to be maintained carefully
/** @type SharedArrayBuffer | null */
buffer: null,
/** @type Float32Array | null */
view: null,
/** @type threads.Worker[] */
workers: [], // holds instance of workers. worker can be null if exited
requestID: 0, // each request should increment this counter as its used for round robin assignment
};
let t0 = process.hrtime.bigint(); // used for perf counters
const appendRecords = (labels, descriptors) => {
if (!data.view) return 0;
if (descriptors.length !== labels.length) {
log.error('append error:', { descriptors: descriptors.length, labels: labels.length });
}
// if (options.debug) log.state('appending:', { descriptors: descriptors.length, labels: labels.length });
for (let i = 0; i < descriptors.length; i++) {
for (let j = 0; j < descriptors[i].length; j++) {
data.view[data.labels.length * descriptors[i].length + j] = descriptors[i][j]; // add each descriptors element to buffer
}
data.labels.push(labels[i]); // finally add to labels
}
for (const worker of data.workers) { // inform all workers how many records we have
if (worker) worker.postMessage({ records: data.labels.length });
}
return data.labels.length;
};
const getLabel = (index) => data.labels[index];
const getDescriptor = (index) => {
if (!data.view) return [];
const descriptor = [];
for (let i = 0; i < 1024; i++) descriptor.push(data.view[index * options.descLength + i]);
return descriptor;
};
const fuzDescriptor = (descriptor) => {
for (let i = 0; i < descriptor.length; i++) descriptor[i] += Math.random() - 0.5;
return descriptor;
};
const delay = (ms) => new Promise((resolve) => { setTimeout(resolve, ms); });
async function workersClose() {
const current = data.workers.filter((worker) => !!worker).length;
log.info('closing workers:', { poolSize: data.workers.length, activeWorkers: current });
for (const worker of data.workers) {
if (worker) worker.postMessage({ shutdown: true }); // tell worker to exit
}
await delay(250); // wait a little for threads to exit on their own
const remaining = data.workers.filter((worker) => !!worker).length;
if (remaining > 0) {
log.info('terminating remaining workers:', { remaining: current, pool: data.workers.length });
for (const worker of data.workers) {
if (worker) worker.terminate(); // if worker did not exit cleany terminate it
}
}
}
const workerMessage = (index, msg) => {
if (msg.request) {
if (options.debug) log.data('message:', { worker: index, request: msg.request, time: msg.time, label: getLabel(msg.index), similarity: msg.similarity });
if (msg.request >= testOptions.maxJobs) {
const t1 = process.hrtime.bigint();
const elapsed = Math.round(Number(t1 - t0) / 1000 / 1000);
log.state({ matchJobsFinished: testOptions.maxJobs, totalTimeMs: elapsed, averageTimeMs: Math.round(100 * elapsed / testOptions.maxJobs) / 100 });
workersClose();
}
} else {
log.data('message:', { worker: index, msg });
}
};
async function workerClose(id, code) {
const previous = data.workers.filter((worker) => !!worker).length;
delete data.workers[id];
const current = data.workers.filter((worker) => !!worker).length;
if (options.debug) log.state('worker exit:', { id, code, previous, current });
}
async function workersStart(numWorkers) {
const previous = data.workers.filter((worker) => !!worker).length;
log.info('starting worker thread pool:', { totalWorkers: numWorkers, alreadyActive: previous });
for (let i = 0; i < numWorkers; i++) {
if (!data.workers[i]) { // worker does not exist, so create it
const worker = new threads.Worker(path.join(__dirname, options.workerSrc));
worker.on('message', (msg) => workerMessage(i, msg));
worker.on('error', (err) => log.error('worker error:', { err }));
worker.on('exit', (code) => workerClose(i, code));
worker.postMessage(data.buffer); // send buffer to worker
data.workers[i] = worker;
}
data.workers[i]?.postMessage({ records: data.labels.length, threshold: options.minThreshold, debug: options.debug }); // inform worker how many records there are
}
await delay(100); // just wait a bit for everything to settle down
}
const match = (descriptor) => {
// const arr = Float32Array.from(descriptor);
const buffer = new ArrayBuffer(options.descLength * 4);
const view = new Float32Array(buffer);
view.set(descriptor);
const available = data.workers.filter((worker) => !!worker).length; // find number of available workers
if (available > 0) data.workers[data.requestID % available].postMessage({ descriptor: buffer, request: data.requestID }, [buffer]); // round robin to first available worker
else log.error('no available workers');
};
async function loadDB(count) {
const previous = data.labels.length;
if (!fs.existsSync(options.dbFile)) {
log.error('db file does not exist:', options.dbFile);
return;
}
t0 = process.hrtime.bigint();
for (let i = 0; i < count; i++) { // test loop: load entire face db from array of objects n times into buffer
const db = JSON.parse(fs.readFileSync(options.dbFile).toString());
const names = db.map((record) => record.name);
const descriptors = db.map((record) => record.embedding);
appendRecords(names, descriptors);
}
log.data('db loaded:', { existingRecords: previous, newRecords: data.labels.length });
}
async function createBuffer() {
data.buffer = new SharedArrayBuffer(4 * options.dbMax * options.descLength); // preallocate max number of records as sharedarraybuffers cannot grow
data.view = new Float32Array(data.buffer); // create view into buffer
data.labels.length = 0;
log.data('created shared buffer:', { maxDescriptors: (data.view.length || 0) / options.descLength, totalBytes: data.buffer.byteLength, totalElements: data.view.length });
}
async function main() {
log.header();
log.info('options:', options);
await createBuffer(); // create shared buffer array
await loadDB(testOptions.dbFact); // loadDB is a test method that calls actual addRecords
await workersStart(options.threadPoolSize); // can be called at anytime to modify worker pool size
for (let i = 0; i < testOptions.maxJobs; i++) {
const idx = Math.trunc(data.labels.length * Math.random()); // grab a random descriptor index that we'll search for
const descriptor = getDescriptor(idx); // grab a descriptor at index
data.requestID++; // increase request id
if (testOptions.fuzDescriptors) match(fuzDescriptor(descriptor)); // fuz descriptor for harder match
else match(descriptor);
if (options.debug) log.debug('submited job', data.requestID); // we already know what we're searching for so we can compare results
}
log.state('submitted:', { matchJobs: testOptions.maxJobs, poolSize: data.workers.length, activeWorkers: data.workers.filter((worker) => !!worker).length });
}
main();

3
demo/helpers/README.md Normal file
View File

@ -0,0 +1,3 @@
# Helper libraries
Used by main `Human` demo app

View File

@ -1,4 +1,3 @@
// @ts-nocheck
// based on: https://github.com/munrocket/gl-bench // based on: https://github.com/munrocket/gl-bench
const UICSS = ` const UICSS = `
@ -37,15 +36,13 @@ const UISVG = `
class GLBench { class GLBench {
/** GLBench constructor /** GLBench constructor
* @param { WebGLRenderingContext | WebGL2RenderingContext } gl context * @param { WebGLRenderingContext | WebGL2RenderingContext | null } gl context
* @param { Object | undefined } settings additional settings * @param { Object | undefined } settings additional settings
*/ */
constructor(gl, settings = {}) { constructor(gl, settings = {}) {
this.css = UICSS; this.css = UICSS;
this.svg = UISVG; this.svg = UISVG;
// eslint-disable-next-line @typescript-eslint/no-empty-function
this.paramLogger = () => {}; this.paramLogger = () => {};
// eslint-disable-next-line @typescript-eslint/no-empty-function
this.chartLogger = () => {}; this.chartLogger = () => {};
this.chartLen = 20; this.chartLen = 20;
this.chartHz = 20; this.chartHz = 20;
@ -92,7 +89,6 @@ class GLBench {
const addProfiler = (fn, self, target) => { const addProfiler = (fn, self, target) => {
const t = self.now(); const t = self.now();
// eslint-disable-next-line prefer-rest-params
fn.apply(target, arguments); fn.apply(target, arguments);
if (self.trackGPU) self.finished.push(glFinish(t, self.activeAccums.slice(0))); if (self.trackGPU) self.finished.push(glFinish(t, self.activeAccums.slice(0)));
}; };
@ -107,13 +103,11 @@ class GLBench {
if (gl[fn]) { if (gl[fn]) {
gl[fn] = addProfiler(gl[fn], this, gl); gl[fn] = addProfiler(gl[fn], this, gl);
} else { } else {
// eslint-disable-next-line no-console
console.log('bench: cannot attach to webgl function'); console.log('bench: cannot attach to webgl function');
} }
/* /*
gl.getExtension = ((fn, self) => { gl.getExtension = ((fn, self) => {
// eslint-disable-next-line prefer-rest-params
const ext = fn.apply(gl, arguments); const ext = fn.apply(gl, arguments);
if (ext) { if (ext) {
['drawElementsInstancedANGLE', 'drawBuffersWEBGL'].forEach((fn2) => { ['drawElementsInstancedANGLE', 'drawBuffersWEBGL'].forEach((fn2) => {
@ -148,7 +142,6 @@ class GLBench {
return (i, cpu, gpu, mem, fps, totalTime, frameId) => { return (i, cpu, gpu, mem, fps, totalTime, frameId) => {
nodes['gl-cpu'][i].style.strokeDasharray = (cpu * 0.27).toFixed(0) + ' 100'; nodes['gl-cpu'][i].style.strokeDasharray = (cpu * 0.27).toFixed(0) + ' 100';
nodes['gl-gpu'][i].style.strokeDasharray = (gpu * 0.27).toFixed(0) + ' 100'; nodes['gl-gpu'][i].style.strokeDasharray = (gpu * 0.27).toFixed(0) + ' 100';
// eslint-disable-next-line no-nested-ternary
nodes['gl-mem'][i].innerHTML = names[i] ? names[i] : (mem ? 'mem: ' + mem.toFixed(0) + 'mb' : ''); nodes['gl-mem'][i].innerHTML = names[i] ? names[i] : (mem ? 'mem: ' + mem.toFixed(0) + 'mb' : '');
nodes['gl-fps'][i].innerHTML = 'FPS: ' + fps.toFixed(1); nodes['gl-fps'][i].innerHTML = 'FPS: ' + fps.toFixed(1);
logger(names[i], cpu, gpu, mem, fps, totalTime, frameId); logger(names[i], cpu, gpu, mem, fps, totalTime, frameId);

View File

@ -64,9 +64,7 @@ function createNode() {
hideChildren() { hideChildren() {
if (Array.isArray(this.children)) { if (Array.isArray(this.children)) {
this.children.forEach((item) => { this.children.forEach((item) => {
// @ts-ignore
item['elem']['classList'].add('hide'); item['elem']['classList'].add('hide');
// @ts-ignore
if (item['expanded']) item.hideChildren(); if (item['expanded']) item.hideChildren();
}); });
} }
@ -74,9 +72,7 @@ function createNode() {
showChildren() { showChildren() {
if (Array.isArray(this.children)) { if (Array.isArray(this.children)) {
this.children.forEach((item) => { this.children.forEach((item) => {
// @ts-ignore
item['elem']['classList'].remove('hide'); item['elem']['classList'].remove('hide');
// @ts-ignore
if (item['expanded']) item.showChildren(); if (item['expanded']) item.showChildren();
}); });
} }

View File

@ -1,5 +1,3 @@
//@ts-nocheck
let instance = 0; let instance = 0;
let CSScreated = false; let CSScreated = false;
@ -86,6 +84,7 @@ class Menu {
} }
createMenu(parent, title = '', position = { top: null, left: null, bottom: null, right: null }) { createMenu(parent, title = '', position = { top: null, left: null, bottom: null, right: null }) {
/** @type {HTMLDivElement} */
this.menu = document.createElement('div'); this.menu = document.createElement('div');
this.menu.id = `menu-${instance}`; this.menu.id = `menu-${instance}`;
this.menu.className = 'menu'; this.menu.className = 'menu';
@ -133,11 +132,11 @@ class Menu {
} }
get width() { get width() {
return this.menu.offsetWidth || 0; return this.menu ? this.menu.offsetWidth : 0;
} }
get height() { get height() {
return this.menu.offsetHeight || 0; return this.menu ? this.menu.offsetHeight : 0;
} }
hide() { hide() {
@ -205,8 +204,10 @@ class Menu {
el.innerHTML = `<div class="menu-checkbox"><input class="menu-checkbox" type="checkbox" id="${this.newID}" ${object[variable] ? 'checked' : ''}/><label class="menu-checkbox-label" for="${this.ID}"></label></div>${title}`; el.innerHTML = `<div class="menu-checkbox"><input class="menu-checkbox" type="checkbox" id="${this.newID}" ${object[variable] ? 'checked' : ''}/><label class="menu-checkbox-label" for="${this.ID}"></label></div>${title}`;
if (this.container) this.container.appendChild(el); if (this.container) this.container.appendChild(el);
el.addEventListener('change', (evt) => { el.addEventListener('change', (evt) => {
object[variable] = evt.target.checked; if (evt.target) {
if (callback) callback(evt.target.checked); object[variable] = evt.target['checked'];
if (callback) callback(evt.target['checked']);
}
}); });
return el; return el;
} }
@ -225,7 +226,7 @@ class Menu {
el.style.fontVariant = document.body.style.fontVariant; el.style.fontVariant = document.body.style.fontVariant;
if (this.container) this.container.appendChild(el); if (this.container) this.container.appendChild(el);
el.addEventListener('change', (evt) => { el.addEventListener('change', (evt) => {
if (callback) callback(items[evt.target.selectedIndex]); if (callback && evt.target) callback(items[evt.target['selectedIndex']]);
}); });
return el; return el;
} }
@ -237,12 +238,12 @@ class Menu {
if (this.container) this.container.appendChild(el); if (this.container) this.container.appendChild(el);
el.addEventListener('change', (evt) => { el.addEventListener('change', (evt) => {
if (evt.target) { if (evt.target) {
object[variable] = parseInt(evt.target.value) === parseFloat(evt.target.value) ? parseInt(evt.target.value) : parseFloat(evt.target.value); object[variable] = parseInt(evt.target['value']) === parseFloat(evt.target['value']) ? parseInt(evt.target['value']) : parseFloat(evt.target['value']);
evt.target.setAttribute('value', evt.target.value); evt.target.setAttribute('value', evt.target['value']);
if (callback) callback(evt.target.value); if (callback) callback(evt.target['value']);
} }
}); });
el.input = el.children[0]; el['input'] = el.children[0];
return el; return el;
} }
@ -282,7 +283,6 @@ class Menu {
return el; return el;
} }
// eslint-disable-next-line class-methods-use-this
updateValue(title, val, suffix = '') { updateValue(title, val, suffix = '') {
const el = document.getElementById(`menu-val-${title}`); const el = document.getElementById(`menu-val-${title}`);
if (el) el.innerText = `${title}: ${val}${suffix}`; if (el) el.innerText = `${title}: ${val}${suffix}`;
@ -299,12 +299,13 @@ class Menu {
return el; return el;
} }
// eslint-disable-next-line class-methods-use-this
async updateChart(id, values) { async updateChart(id, values) {
if (!values || (values.length === 0)) return; if (!values || (values.length === 0)) return;
/** @type {HTMLCanvasElement} */
const canvas = document.getElementById(`menu-canvas-${id}`); const canvas = document.getElementById(`menu-canvas-${id}`);
if (!canvas) return; if (!canvas) return;
const ctx = canvas.getContext('2d'); const ctx = canvas.getContext('2d');
if (!ctx) return;
ctx.fillStyle = theme.background; ctx.fillStyle = theme.background;
ctx.fillRect(0, 0, canvas.width, canvas.height); ctx.fillRect(0, 0, canvas.width, canvas.height);
const width = canvas.width / values.length; const width = canvas.width / values.length;
@ -318,7 +319,7 @@ class Menu {
ctx.fillRect(i * width, 0, width - 4, canvas.height); ctx.fillRect(i * width, 0, width - 4, canvas.height);
ctx.fillStyle = theme.background; ctx.fillStyle = theme.background;
ctx.font = `${width / 1.5}px "Segoe UI"`; ctx.font = `${width / 1.5}px "Segoe UI"`;
ctx.fillText(Math.round(values[i]), i * width + 1, canvas.height - 1, width - 1); ctx.fillText(Math.round(values[i]).toString(), i * width + 1, canvas.height - 1, width - 1);
} }
} }
} }

View File

@ -1,870 +0,0 @@
// @ts-nocheck
import { Vector2, Vector3, Spherical, MOUSE, Quaternion, EventDispatcher } from './three.js';
/**
* @author qiao / https://github.com/qiao
* @author mrdoob / http://mrdoob.com
* @author alteredq / http://alteredqualia.com/
* @author WestLangley / http://github.com/WestLangley
* @author erich666 / http://erichaines.com
*/
// This set of controls performs orbiting, dollying (zooming), and panning.
// Unlike TrackballControls, it maintains the "up" direction object.up (+Y by default).
//
// Orbit - left mouse / touch: one-finger move
// Zoom - middle mouse, or mousewheel / touch: two-finger spread or squish
// Pan - right mouse, or left mouse + ctrl/metaKey, or arrow keys / touch: two-finger move
const OrbitControls = function (object, domElement) {
this.object = object;
this.domElement = (domElement !== undefined) ? domElement : document;
// Set to false to disable this control
this.enabled = true;
// "target" sets the location of focus, where the object orbits around
this.target = new Vector3();
// How far you can dolly in and out ( PerspectiveCamera only )
this.minDistance = 0;
this.maxDistance = Infinity;
// How far you can zoom in and out ( OrthographicCamera only )
this.minZoom = 0;
this.maxZoom = Infinity;
// How far you can orbit vertically, upper and lower limits.
// Range is 0 to Math.PI radians.
this.minPolarAngle = 0; // radians
this.maxPolarAngle = Math.PI; // radians
// How far you can orbit horizontally, upper and lower limits.
// If set, must be a sub-interval of the interval [ - Math.PI, Math.PI ].
this.minAzimuthAngle = -Infinity; // radians
this.maxAzimuthAngle = Infinity; // radians
// Set to true to enable damping (inertia)
// If damping is enabled, you must call controls.update() in your animation loop
this.enableDamping = false;
this.dampingFactor = 0.25;
// This option actually enables dollying in and out; left as "zoom" for backwards compatibility.
// Set to false to disable zooming
this.enableZoom = true;
this.zoomSpeed = 1.0;
// Set to false to disable rotating
this.enableRotate = true;
this.rotateSpeed = 1.0;
// Set to false to disable panning
this.enablePan = true;
this.panSpeed = 1.0;
this.screenSpacePanning = false; // if true, pan in screen-space
this.keyPanSpeed = 7.0; // pixels moved per arrow key push
// Set to true to automatically rotate around the target
// If auto-rotate is enabled, you must call controls.update() in your animation loop
this.autoRotate = false;
this.autoRotateSpeed = 2.0; // 30 seconds per round when fps is 60
// Set to false to disable use of the keys
this.enableKeys = true;
// The four arrow keys
this.keys = { LEFT: 37, UP: 38, RIGHT: 39, BOTTOM: 40 };
// Mouse buttons
this.mouseButtons = { LEFT: MOUSE.LEFT, MIDDLE: MOUSE.MIDDLE, RIGHT: MOUSE.RIGHT };
// for reset
this.target0 = this.target.clone();
this.position0 = this.object.position.clone();
this.zoom0 = this.object.zoom;
//
// public methods
//
this.getPolarAngle = function () {
return spherical.phi;
};
this.getAzimuthalAngle = function () {
return spherical.theta;
};
this.saveState = function () {
scope.target0.copy(scope.target);
scope.position0.copy(scope.object.position);
scope.zoom0 = scope.object.zoom;
};
this.reset = function () {
scope.target.copy(scope.target0);
scope.object.position.copy(scope.position0);
scope.object.zoom = scope.zoom0;
scope.object.updateProjectionMatrix();
scope.dispatchEvent(changeEvent);
scope.update();
state = STATE.NONE;
};
// this method is exposed, but perhaps it would be better if we can make it private...
this.update = (function () {
const offset = new Vector3();
// so camera.up is the orbit axis
const quat = new Quaternion().setFromUnitVectors(object.up, new Vector3(0, 1, 0));
const quatInverse = quat.clone().invert();
const lastPosition = new Vector3();
const lastQuaternion = new Quaternion();
return function update() {
const position = scope.object.position;
offset.copy(position).sub(scope.target);
// rotate offset to "y-axis-is-up" space
offset.applyQuaternion(quat);
// angle from z-axis around y-axis
spherical.setFromVector3(offset);
if (scope.autoRotate && state === STATE.NONE) {
rotateLeft(getAutoRotationAngle());
}
spherical.theta += sphericalDelta.theta;
spherical.phi += sphericalDelta.phi;
// restrict theta to be between desired limits
spherical.theta = Math.max(scope.minAzimuthAngle, Math.min(scope.maxAzimuthAngle, spherical.theta));
// restrict phi to be between desired limits
spherical.phi = Math.max(scope.minPolarAngle, Math.min(scope.maxPolarAngle, spherical.phi));
spherical.makeSafe();
spherical.radius *= scale;
// restrict radius to be between desired limits
spherical.radius = Math.max(scope.minDistance, Math.min(scope.maxDistance, spherical.radius));
// move target to panned location
scope.target.add(panOffset);
offset.setFromSpherical(spherical);
// rotate offset back to "camera-up-vector-is-up" space
offset.applyQuaternion(quatInverse);
position.copy(scope.target).add(offset);
scope.object.lookAt(scope.target);
if (scope.enableDamping === true) {
sphericalDelta.theta *= (1 - scope.dampingFactor);
sphericalDelta.phi *= (1 - scope.dampingFactor);
panOffset.multiplyScalar(1 - scope.dampingFactor);
} else {
sphericalDelta.set(0, 0, 0);
panOffset.set(0, 0, 0);
}
scale = 1;
// update condition is:
// min(camera displacement, camera rotation in radians)^2 > EPS
// using small-angle approximation cos(x/2) = 1 - x^2 / 8
if (zoomChanged
|| lastPosition.distanceToSquared(scope.object.position) > EPS
|| 8 * (1 - lastQuaternion.dot(scope.object.quaternion)) > EPS) {
scope.dispatchEvent(changeEvent);
lastPosition.copy(scope.object.position);
lastQuaternion.copy(scope.object.quaternion);
zoomChanged = false;
return true;
}
return false;
};
}());
this.dispose = function () {
scope.domElement.removeEventListener('contextmenu', onContextMenu, false);
scope.domElement.removeEventListener('mousedown', onMouseDown, false);
scope.domElement.removeEventListener('wheel', onMouseWheel, false);
scope.domElement.removeEventListener('touchstart', onTouchStart, false);
scope.domElement.removeEventListener('touchend', onTouchEnd, false);
scope.domElement.removeEventListener('touchmove', onTouchMove, false);
document.removeEventListener('mousemove', onMouseMove, false);
document.removeEventListener('mouseup', onMouseUp, false);
window.removeEventListener('keydown', onKeyDown, false);
// scope.dispatchEvent( { type: 'dispose' } ); // should this be added here?
};
//
// internals
//
var scope = this;
var changeEvent = { type: 'change' };
const startEvent = { type: 'start' };
const endEvent = { type: 'end' };
var STATE = { NONE: -1, ROTATE: 0, DOLLY: 1, PAN: 2, TOUCH_ROTATE: 3, TOUCH_DOLLY_PAN: 4 };
var state = STATE.NONE;
var EPS = 0.000001;
// current position in spherical coordinates
var spherical = new Spherical();
var sphericalDelta = new Spherical();
var scale = 1;
var panOffset = new Vector3();
var zoomChanged = false;
const rotateStart = new Vector2();
const rotateEnd = new Vector2();
const rotateDelta = new Vector2();
const panStart = new Vector2();
const panEnd = new Vector2();
const panDelta = new Vector2();
const dollyStart = new Vector2();
const dollyEnd = new Vector2();
const dollyDelta = new Vector2();
function getAutoRotationAngle() {
return 2 * Math.PI / 60 / 60 * scope.autoRotateSpeed;
}
function getZoomScale() {
return Math.pow(0.95, scope.zoomSpeed);
}
function rotateLeft(angle) {
sphericalDelta.theta -= angle;
}
function rotateUp(angle) {
sphericalDelta.phi -= angle;
}
const panLeft = (function () {
const v = new Vector3();
return function panLeft(distance, objectMatrix) {
v.setFromMatrixColumn(objectMatrix, 0); // get X column of objectMatrix
v.multiplyScalar(-distance);
panOffset.add(v);
};
}());
const panUp = (function () {
const v = new Vector3();
return function panUp(distance, objectMatrix) {
if (scope.screenSpacePanning === true) {
v.setFromMatrixColumn(objectMatrix, 1);
} else {
v.setFromMatrixColumn(objectMatrix, 0);
v.crossVectors(scope.object.up, v);
}
v.multiplyScalar(distance);
panOffset.add(v);
};
}());
// deltaX and deltaY are in pixels; right and down are positive
const pan = (function () {
const offset = new Vector3();
return function pan(deltaX, deltaY) {
const element = scope.domElement === document ? scope.domElement.body : scope.domElement;
if (scope.object.isPerspectiveCamera) {
// perspective
const position = scope.object.position;
offset.copy(position).sub(scope.target);
let targetDistance = offset.length();
// half of the fov is center to top of screen
targetDistance *= Math.tan((scope.object.fov / 2) * Math.PI / 180.0);
// we use only clientHeight here so aspect ratio does not distort speed
panLeft(2 * deltaX * targetDistance / element.clientHeight, scope.object.matrix);
panUp(2 * deltaY * targetDistance / element.clientHeight, scope.object.matrix);
} else if (scope.object.isOrthographicCamera) {
// orthographic
panLeft(deltaX * (scope.object.right - scope.object.left) / scope.object.zoom / element.clientWidth,
scope.object.matrix);
panUp(deltaY * (scope.object.top - scope.object.bottom) / scope.object.zoom / element.clientHeight, scope
.object.matrix);
} else {
// camera neither orthographic nor perspective
scope.enablePan = false;
}
};
}());
function dollyIn(dollyScale) {
if (scope.object.isPerspectiveCamera) {
scale /= dollyScale;
} else if (scope.object.isOrthographicCamera) {
scope.object.zoom = Math.max(scope.minZoom, Math.min(scope.maxZoom, scope.object.zoom * dollyScale));
scope.object.updateProjectionMatrix();
zoomChanged = true;
} else {
scope.enableZoom = false;
}
}
function dollyOut(dollyScale) {
if (scope.object.isPerspectiveCamera) {
scale *= dollyScale;
} else if (scope.object.isOrthographicCamera) {
scope.object.zoom = Math.max(scope.minZoom, Math.min(scope.maxZoom, scope.object.zoom / dollyScale));
scope.object.updateProjectionMatrix();
zoomChanged = true;
} else {
scope.enableZoom = false;
}
}
//
// event callbacks - update the object state
//
function handleMouseDownRotate(event) {
// console.log( 'handleMouseDownRotate' );
rotateStart.set(event.clientX, event.clientY);
}
function handleMouseDownDolly(event) {
// console.log( 'handleMouseDownDolly' );
dollyStart.set(event.clientX, event.clientY);
}
function handleMouseDownPan(event) {
// console.log( 'handleMouseDownPan' );
panStart.set(event.clientX, event.clientY);
}
function handleMouseMoveRotate(event) {
// console.log( 'handleMouseMoveRotate' );
rotateEnd.set(event.clientX, event.clientY);
rotateDelta.subVectors(rotateEnd, rotateStart).multiplyScalar(scope.rotateSpeed);
const element = scope.domElement === document ? scope.domElement.body : scope.domElement;
rotateLeft(2 * Math.PI * rotateDelta.x / element.clientHeight); // yes, height
rotateUp(2 * Math.PI * rotateDelta.y / element.clientHeight);
rotateStart.copy(rotateEnd);
scope.update();
}
function handleMouseMoveDolly(event) {
// console.log( 'handleMouseMoveDolly' );
dollyEnd.set(event.clientX, event.clientY);
dollyDelta.subVectors(dollyEnd, dollyStart);
if (dollyDelta.y > 0) {
dollyIn(getZoomScale());
} else if (dollyDelta.y < 0) {
dollyOut(getZoomScale());
}
dollyStart.copy(dollyEnd);
scope.update();
}
function handleMouseMovePan(event) {
// console.log( 'handleMouseMovePan' );
panEnd.set(event.clientX, event.clientY);
panDelta.subVectors(panEnd, panStart).multiplyScalar(scope.panSpeed);
pan(panDelta.x, panDelta.y);
panStart.copy(panEnd);
scope.update();
}
function handleMouseUp(event) {
// console.log( 'handleMouseUp' );
}
function handleMouseWheel(event) {
// console.log( 'handleMouseWheel' );
if (event.deltaY < 0) {
dollyOut(getZoomScale());
} else if (event.deltaY > 0) {
dollyIn(getZoomScale());
}
scope.update();
}
function handleKeyDown(event) {
// console.log( 'handleKeyDown' );
switch (event.keyCode) {
case scope.keys.UP:
pan(0, scope.keyPanSpeed);
scope.update();
break;
case scope.keys.BOTTOM:
pan(0, -scope.keyPanSpeed);
scope.update();
break;
case scope.keys.LEFT:
pan(scope.keyPanSpeed, 0);
scope.update();
break;
case scope.keys.RIGHT:
pan(-scope.keyPanSpeed, 0);
scope.update();
break;
}
}
function handleTouchStartRotate(event) {
// console.log( 'handleTouchStartRotate' );
rotateStart.set(event.touches[0].pageX, event.touches[0].pageY);
}
function handleTouchStartDollyPan(event) {
// console.log( 'handleTouchStartDollyPan' );
if (scope.enableZoom) {
const dx = event.touches[0].pageX - event.touches[1].pageX;
const dy = event.touches[0].pageY - event.touches[1].pageY;
const distance = Math.sqrt(dx * dx + dy * dy);
dollyStart.set(0, distance);
}
if (scope.enablePan) {
const x = 0.5 * (event.touches[0].pageX + event.touches[1].pageX);
const y = 0.5 * (event.touches[0].pageY + event.touches[1].pageY);
panStart.set(x, y);
}
}
function handleTouchMoveRotate(event) {
// console.log( 'handleTouchMoveRotate' );
rotateEnd.set(event.touches[0].pageX, event.touches[0].pageY);
rotateDelta.subVectors(rotateEnd, rotateStart).multiplyScalar(scope.rotateSpeed);
const element = scope.domElement === document ? scope.domElement.body : scope.domElement;
rotateLeft(2 * Math.PI * rotateDelta.x / element.clientHeight); // yes, height
rotateUp(2 * Math.PI * rotateDelta.y / element.clientHeight);
rotateStart.copy(rotateEnd);
scope.update();
}
function handleTouchMoveDollyPan(event) {
// console.log( 'handleTouchMoveDollyPan' );
if (scope.enableZoom) {
const dx = event.touches[0].pageX - event.touches[1].pageX;
const dy = event.touches[0].pageY - event.touches[1].pageY;
const distance = Math.sqrt(dx * dx + dy * dy);
dollyEnd.set(0, distance);
dollyDelta.set(0, Math.pow(dollyEnd.y / dollyStart.y, scope.zoomSpeed));
dollyIn(dollyDelta.y);
dollyStart.copy(dollyEnd);
}
if (scope.enablePan) {
const x = 0.5 * (event.touches[0].pageX + event.touches[1].pageX);
const y = 0.5 * (event.touches[0].pageY + event.touches[1].pageY);
panEnd.set(x, y);
panDelta.subVectors(panEnd, panStart).multiplyScalar(scope.panSpeed);
pan(panDelta.x, panDelta.y);
panStart.copy(panEnd);
}
scope.update();
}
function handleTouchEnd(event) {
// console.log( 'handleTouchEnd' );
}
//
// event handlers - FSM: listen for events and reset state
//
function onMouseDown(event) {
if (scope.enabled === false) return;
event.preventDefault();
switch (event.button) {
case scope.mouseButtons.LEFT:
if (event.ctrlKey || event.metaKey) {
if (scope.enablePan === false) return;
handleMouseDownPan(event);
state = STATE.PAN;
} else {
if (scope.enableRotate === false) return;
handleMouseDownRotate(event);
state = STATE.ROTATE;
}
break;
case scope.mouseButtons.MIDDLE:
if (scope.enableZoom === false) return;
handleMouseDownDolly(event);
state = STATE.DOLLY;
break;
case scope.mouseButtons.RIGHT:
if (scope.enablePan === false) return;
handleMouseDownPan(event);
state = STATE.PAN;
break;
}
if (state !== STATE.NONE) {
document.addEventListener('mousemove', onMouseMove, false);
document.addEventListener('mouseup', onMouseUp, false);
scope.dispatchEvent(startEvent);
}
}
function onMouseMove(event) {
if (scope.enabled === false) return;
event.preventDefault();
switch (state) {
case STATE.ROTATE:
if (scope.enableRotate === false) return;
handleMouseMoveRotate(event);
break;
case STATE.DOLLY:
if (scope.enableZoom === false) return;
handleMouseMoveDolly(event);
break;
case STATE.PAN:
if (scope.enablePan === false) return;
handleMouseMovePan(event);
break;
}
}
function onMouseUp(event) {
if (scope.enabled === false) return;
handleMouseUp(event);
document.removeEventListener('mousemove', onMouseMove, false);
document.removeEventListener('mouseup', onMouseUp, false);
scope.dispatchEvent(endEvent);
state = STATE.NONE;
}
function onMouseWheel(event) {
if (scope.enabled === false || scope.enableZoom === false || (state !== STATE.NONE && state !== STATE.ROTATE)) return;
event.preventDefault();
event.stopPropagation();
scope.dispatchEvent(startEvent);
handleMouseWheel(event);
scope.dispatchEvent(endEvent);
}
function onKeyDown(event) {
if (scope.enabled === false || scope.enableKeys === false || scope.enablePan === false) return;
handleKeyDown(event);
}
function onTouchStart(event) {
if (scope.enabled === false) return;
event.preventDefault();
switch (event.touches.length) {
case 1: // one-fingered touch: rotate
if (scope.enableRotate === false) return;
handleTouchStartRotate(event);
state = STATE.TOUCH_ROTATE;
break;
case 2: // two-fingered touch: dolly-pan
if (scope.enableZoom === false && scope.enablePan === false) return;
handleTouchStartDollyPan(event);
state = STATE.TOUCH_DOLLY_PAN;
break;
default:
state = STATE.NONE;
}
if (state !== STATE.NONE) {
scope.dispatchEvent(startEvent);
}
}
function onTouchMove(event) {
if (scope.enabled === false) return;
event.preventDefault();
event.stopPropagation();
switch (event.touches.length) {
case 1: // one-fingered touch: rotate
if (scope.enableRotate === false) return;
if (state !== STATE.TOUCH_ROTATE) return; // is this needed?
handleTouchMoveRotate(event);
break;
case 2: // two-fingered touch: dolly-pan
if (scope.enableZoom === false && scope.enablePan === false) return;
if (state !== STATE.TOUCH_DOLLY_PAN) return; // is this needed?
handleTouchMoveDollyPan(event);
break;
default:
state = STATE.NONE;
}
}
function onTouchEnd(event) {
if (scope.enabled === false) return;
handleTouchEnd(event);
scope.dispatchEvent(endEvent);
state = STATE.NONE;
}
function onContextMenu(event) {
if (scope.enabled === false) return;
event.preventDefault();
}
//
scope.domElement.addEventListener('contextmenu', onContextMenu, false);
scope.domElement.addEventListener('mousedown', onMouseDown, false);
scope.domElement.addEventListener('wheel', onMouseWheel, false);
scope.domElement.addEventListener('touchstart', onTouchStart, false);
scope.domElement.addEventListener('touchend', onTouchEnd, false);
scope.domElement.addEventListener('touchmove', onTouchMove, false);
window.addEventListener('keydown', onKeyDown, false);
// force an update at start
this.update();
};
OrbitControls.prototype = Object.create(EventDispatcher.prototype);
OrbitControls.prototype.constructor = OrbitControls;
Object.defineProperties(OrbitControls.prototype, {
center: {
get() {
return this.target;
},
},
// backward compatibility
noZoom: {
get() {
return !this.enableZoom;
},
set(value) {
this.enableZoom = !value;
},
},
noRotate: {
get() {
return !this.enableRotate;
},
set(value) {
this.enableRotate = !value;
},
},
noPan: {
get() {
return !this.enablePan;
},
set(value) {
this.enablePan = !value;
},
},
noKeys: {
get() {
return !this.enableKeys;
},
set(value) {
this.enableKeys = !value;
},
},
staticMoving: {
get() {
return !this.enableDamping;
},
set(value) {
this.enableDamping = !value;
},
},
dynamicDampingFactor: {
get() {
return this.dampingFactor;
},
set(value) {
this.dampingFactor = value;
},
},
});
export { OrbitControls };

File diff suppressed because one or more lines are too long

View File

@ -4,8 +4,7 @@ async function log(...msg) {
if (debug) { if (debug) {
const dt = new Date(); const dt = new Date();
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`; const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
// eslint-disable-next-line no-console console.log(ts, 'webrtc', ...msg); // eslint-disable-line no-console
console.log(ts, 'webrtc', ...msg);
} }
} }

View File

@ -2,6 +2,7 @@
* PWA Service Worker for Human main demo * PWA Service Worker for Human main demo
*/ */
/* eslint-disable no-restricted-globals */
/// <reference lib="webworker" /> /// <reference lib="webworker" />
const skipCaching = false; const skipCaching = false;
@ -19,8 +20,7 @@ const stats = { hit: 0, miss: 0 };
const log = (...msg) => { const log = (...msg) => {
const dt = new Date(); const dt = new Date();
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`; const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
// eslint-disable-next-line no-console console.log(ts, 'pwa', ...msg); // eslint-disable-line no-console
console.log(ts, 'pwa', ...msg);
}; };
async function updateCached(req) { async function updateCached(req) {
@ -31,7 +31,7 @@ async function updateCached(req) {
caches caches
.open(cacheName) .open(cacheName)
.then((cache) => cache.put(req, update)) .then((cache) => cache.put(req, update))
.catch((err) => log('cache update error', err)); .catch((err) => log('cache update error', err)); // eslint-disable-line promise/no-nesting
} }
return true; return true;
}) })
@ -75,14 +75,13 @@ async function getCached(evt) {
} }
function cacheInit() { function cacheInit() {
// eslint-disable-next-line promise/catch-or-return
caches.open(cacheName) caches.open(cacheName)
// eslint-disable-next-line promise/no-nesting
.then((cache) => cache.addAll(cacheFiles) .then((cache) => cache.addAll(cacheFiles)
.then( .then( // eslint-disable-line promise/no-nesting
() => log('cache refresh:', cacheFiles.length, 'files'), () => log('cache refresh:', cacheFiles.length, 'files'),
(err) => log('cache error', err), (err) => log('cache error', err),
)); ))
.catch(() => log('cache error'));
} }
if (!listening) { if (!listening) {
@ -99,14 +98,12 @@ if (!listening) {
self.addEventListener('install', (evt) => { self.addEventListener('install', (evt) => {
log('install'); log('install');
// @ts-ignore scope for self is ServiceWorkerGlobalScope not Window
self.skipWaiting(); self.skipWaiting();
evt.waitUntil(cacheInit); evt.waitUntil(cacheInit);
}); });
self.addEventListener('activate', (evt) => { self.addEventListener('activate', (evt) => {
log('activate'); log('activate');
// @ts-ignore scope for self is ServiceWorkerGlobalScope not Window
evt.waitUntil(self.clients.claim()); evt.waitUntil(self.clients.claim());
}); });
@ -114,7 +111,7 @@ if (!listening) {
const uri = new URL(evt.request.url); const uri = new URL(evt.request.url);
// if (uri.pathname === '/') { log('cache skip /', evt.request); return; } // skip root access requests // if (uri.pathname === '/') { log('cache skip /', evt.request); return; } // skip root access requests
if (evt.request.cache === 'only-if-cached' && evt.request.mode !== 'same-origin') return; // required due to chrome bug if (evt.request.cache === 'only-if-cached' && evt.request.mode !== 'same-origin') return; // required due to chrome bug
if (uri.origin !== location.origin) return; // skip non-local requests if (uri.origin !== self.location.origin) return; // skip non-local requests
if (evt.request.method !== 'GET') return; // only cache get requests if (evt.request.method !== 'GET') return; // only cache get requests
if (evt.request.url.includes('/api/')) return; // don't cache api requests, failures are handled at the time of call if (evt.request.url.includes('/api/')) return; // don't cache api requests, failures are handled at the time of call
@ -129,7 +126,7 @@ if (!listening) {
log(`PWA: ${evt.type}`); log(`PWA: ${evt.type}`);
if (refreshed) return; if (refreshed) return;
refreshed = true; refreshed = true;
location.reload(); self.location.reload();
}); });
listening = true; listening = true;

View File

@ -1,12 +1,15 @@
/// <reference lib="webworker" /> /**
* Web worker used by main demo app
* Loaded from index.js
*/
/// <reference lib="webworker"/>
// load Human using IIFE script as Chome Mobile does not support Modules as Workers // load Human using IIFE script as Chome Mobile does not support Modules as Workers
// import Human from '../dist/human.esm.js'; self.importScripts('../dist/human.js'); // eslint-disable-line no-restricted-globals
self.importScripts('../dist/human.js');
let busy = false; let busy = false;
// @ts-ignore // Human is registered as global namespace using IIFE script // eslint-disable-next-line new-cap, no-undef
// eslint-disable-next-line no-undef, new-cap
const human = new Human.default(); const human = new Human.default();
onmessage = async (msg) => { // receive message from main thread onmessage = async (msg) => { // receive message from main thread

View File

@ -35,7 +35,7 @@
.video { display: none; } .video { display: none; }
.canvas { margin: 0 auto; } .canvas { margin: 0 auto; }
.bench { position: absolute; right: 0; bottom: 0; } .bench { position: absolute; right: 0; bottom: 0; }
.compare-image { width: 256px; position: absolute; top: 150px; left: 30px; box-shadow: 0 0 2px 2px black; background: black; display: none; } .compare-image { width: 200px; position: absolute; top: 150px; left: 30px; box-shadow: 0 0 2px 2px black; background: black; display: none; }
.loader { width: 300px; height: 300px; border: 3px solid transparent; border-radius: 50%; border-top: 4px solid #f15e41; animation: spin 4s linear infinite; position: absolute; bottom: 15%; left: 50%; margin-left: -150px; z-index: 15; } .loader { width: 300px; height: 300px; border: 3px solid transparent; border-radius: 50%; border-top: 4px solid #f15e41; animation: spin 4s linear infinite; position: absolute; bottom: 15%; left: 50%; margin-left: -150px; z-index: 15; }
.loader::before, .loader::after { content: ""; position: absolute; top: 6px; bottom: 6px; left: 6px; right: 6px; border-radius: 50%; border: 4px solid transparent; } .loader::before, .loader::after { content: ""; position: absolute; top: 6px; bottom: 6px; left: 6px; right: 6px; border-radius: 50%; border: 4px solid transparent; }
.loader::before { border-top-color: #bad375; animation: 3s spin linear infinite; } .loader::before { border-top-color: #bad375; animation: 3s spin linear infinite; }
@ -67,7 +67,7 @@
.hint { opacity: 0; transition-duration: 0.5s; transition-property: opacity; font-style: italic; position: fixed; top: 5rem; padding: 8px; margin: 8px; box-shadow: 0 0 2px 2px #303030; } .hint { opacity: 0; transition-duration: 0.5s; transition-property: opacity; font-style: italic; position: fixed; top: 5rem; padding: 8px; margin: 8px; box-shadow: 0 0 2px 2px #303030; }
.input-file { align-self: center; width: 5rem; } .input-file { align-self: center; width: 5rem; }
.results { position: absolute; left: 0; top: 6rem; background: #303030; width: 20rem; height: 90%; font-size: 0.8rem; overflow-y: auto; display: none } .results { position: absolute; left: 0; top: 5rem; background: #303030; width: 20rem; height: 90%; font-size: 0.8rem; overflow-y: auto; display: none }
.results::-webkit-scrollbar { background-color: #303030; } .results::-webkit-scrollbar { background-color: #303030; }
.results::-webkit-scrollbar-thumb { background: black; border-radius: 10px; } .results::-webkit-scrollbar-thumb { background: black; border-radius: 10px; }
.json-line { margin: 4px 0; display: flex; justify-content: flex-start; } .json-line { margin: 4px 0; display: flex; justify-content: flex-start; }
@ -89,9 +89,9 @@
<body> <body>
<div id="play" class="play icon-play"></div> <div id="play" class="play icon-play"></div>
<div id="background"> <div id="background">
<div class='wave one'></div> <div class="wave one"></div>
<div class='wave two'></div> <div class="wave two"></div>
<div class='wave three'></div> <div class="wave three"></div>
</div> </div>
<div id="loader" class="loader"></div> <div id="loader" class="loader"></div>
<div id="status" class="status"></div> <div id="status" class="status"></div>
@ -107,13 +107,9 @@
<video id="video" playsinline class="video"></video> <video id="video" playsinline class="video"></video>
</div> </div>
<div id="compare-container" class="compare-image"> <div id="compare-container" class="compare-image">
<canvas id="compare-canvas" width="256" height="256"></canvas> <canvas id="compare-canvas" width="200" height="200"></canvas>
<div id="similarity"></div> <div id="similarity"></div>
</div> </div>
<div id="segmentation-container" class="compare-image">
<canvas id="segmentation-mask" width="256" height="256" style="width: 256px; height: 256px;"></canvas>
<canvas id="segmentation-canvas" width="256" height="256" style="width: 256px; height: 256px;"></canvas>
</div>
<div id="samples-container" class="samples-container"></div> <div id="samples-container" class="samples-container"></div>
<div id="hint" class="hint"></div> <div id="hint" class="hint"></div>
<div id="log" class="log"></div> <div id="log" class="log"></div>

View File

@ -18,11 +18,12 @@
* ui={}: contains all variables exposed in the UI * ui={}: contains all variables exposed in the UI
*/ */
// test url <https://human.local/?worker=false&async=false&bench=false&draw=true&warmup=full&backend=humangl> // WARNING!!!
// This demo is written using older code style and a lot of manual setup
// Newer versions of Human have richer functionality allowing for much cleaner & easier usage
// It is recommended to use other demos such as `demo/typescript` for usage examples
// @ts-nocheck // typescript checks disabled as this is pure javascript import { Human } from '../dist/human.esm.js'; // equivalent of @vladmandic/human
import Human from '../dist/human.esm.js'; // equivalent of @vladmandic/human
import Menu from './helpers/menu.js'; import Menu from './helpers/menu.js';
import GLBench from './helpers/gl-bench.js'; import GLBench from './helpers/gl-bench.js';
import webRTC from './helpers/webrtc.js'; import webRTC from './helpers/webrtc.js';
@ -31,17 +32,17 @@ import jsonView from './helpers/jsonview.js';
let human; let human;
let userConfig = { let userConfig = {
// face: { enabled: false },
// body: { enabled: false },
// hand: { enabled: false },
/* /*
warmup: 'none', warmup: 'none',
backend: 'humangl', backend: 'webgl',
debug: true, debug: true,
wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.9.0/dist/', wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.9.0/dist/',
async: false, async: false,
cacheSensitivity: 0.75, cacheSensitivity: 0.75,
filter: { filter: { enabled: false, flip: false },
enabled: false,
flip: false,
},
face: { enabled: false, face: { enabled: false,
detector: { return: false, rotation: true }, detector: { return: false, rotation: true },
mesh: { enabled: false }, mesh: { enabled: false },
@ -51,13 +52,17 @@ let userConfig = {
}, },
object: { enabled: false }, object: { enabled: false },
gesture: { enabled: true }, gesture: { enabled: true },
// hand: { enabled: false },
hand: { enabled: true, maxDetected: 1, minConfidence: 0.5, detector: { modelPath: 'handtrack.json' } }, hand: { enabled: true, maxDetected: 1, minConfidence: 0.5, detector: { modelPath: 'handtrack.json' } },
body: { enabled: false }, body: { enabled: false },
// body: { enabled: true, modelPath: 'movenet-multipose.json' }, // body: { enabled: true, modelPath: 'movenet-multipose.json' },
// body: { enabled: true, modelPath: 'posenet.json' },
segmentation: { enabled: false }, segmentation: { enabled: false },
*/ */
/*
face: { iris: { enabled: false }, emotion: { enabled: false } },
hand: { enabled: false },
body: { enabled: false },
gesture: { enabled: false },
*/
}; };
const drawOptions = { const drawOptions = {
@ -65,6 +70,7 @@ const drawOptions = {
drawBoxes: true, drawBoxes: true,
drawGaze: true, drawGaze: true,
drawLabels: true, drawLabels: true,
drawGestures: true,
drawPolygons: true, drawPolygons: true,
drawPoints: false, drawPoints: false,
fillPolygons: false, fillPolygons: false,
@ -80,7 +86,7 @@ const ui = {
facing: true, // camera facing front or back facing: true, // camera facing front or back
baseBackground: 'rgba(50, 50, 50, 1)', // 'grey' baseBackground: 'rgba(50, 50, 50, 1)', // 'grey'
columns: 2, // when processing sample images create this many columns columns: 2, // when processing sample images create this many columns
useWorker: true, // use web workers for processing useWorker: false, // use web workers for processing
worker: 'index-worker.js', worker: 'index-worker.js',
maxFPSframes: 10, // keep fps history for how many frames maxFPSframes: 10, // keep fps history for how many frames
modelsPreload: false, // preload human models on startup modelsPreload: false, // preload human models on startup
@ -108,7 +114,6 @@ const ui = {
results: false, // show results tree results: false, // show results tree
lastFrame: 0, // time of last frame processing lastFrame: 0, // time of last frame processing
viewportSet: false, // internal, has custom viewport been set viewportSet: false, // internal, has custom viewport been set
background: null, // holds instance of segmentation background image
transferCanvas: null, // canvas used to transfer data to and from worker transferCanvas: null, // canvas used to transfer data to and from worker
// webrtc // webrtc
@ -147,6 +152,10 @@ let worker;
let bench; let bench;
let lastDetectedResult = {}; let lastDetectedResult = {};
// helper function: async pause
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
const delay = (ms) => new Promise((resolve) => { setTimeout(resolve, ms); });
// helper function: translates json to human readable string // helper function: translates json to human readable string
function str(...msg) { function str(...msg) {
if (!Array.isArray(msg)) return msg; if (!Array.isArray(msg)) return msg;
@ -162,30 +171,30 @@ function str(...msg) {
function log(...msg) { function log(...msg) {
const dt = new Date(); const dt = new Date();
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`; const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
// eslint-disable-next-line no-console if (ui.console) console.log(ts, ...msg); // eslint-disable-line no-console
if (ui.console) console.log(ts, ...msg);
} }
let prevStatus = '';
function status(msg) { function status(msg) {
const div = document.getElementById('status'); const div = document.getElementById('status');
if (div && msg && msg.length > 0) { if (div && msg && msg !== prevStatus && msg.length > 0) {
log('status', msg); log('status', msg);
document.getElementById('play').style.display = 'none'; document.getElementById('play').style.display = 'none';
document.getElementById('loader').style.display = 'block'; document.getElementById('loader').style.display = 'block';
div.innerText = msg; div.innerText = msg;
prevStatus = msg;
} else { } else {
const video = document.getElementById('video'); const video = document.getElementById('video');
const playing = (video.srcObject !== null) && !video.paused; const playing = isLive(video) && !video.paused; // eslint-disable-line no-use-before-define
document.getElementById('play').style.display = playing ? 'none' : 'block'; document.getElementById('play').style.display = playing ? 'none' : 'block';
document.getElementById('loader').style.display = 'none'; document.getElementById('loader').style.display = 'none';
div.innerText = ''; div.innerText = '';
} }
} }
async function videoPlay() { async function videoPlay(videoElement = document.getElementById('video')) {
document.getElementById('btnStartText').innerHTML = 'pause video'; document.getElementById('btnStartText').innerHTML = 'pause video';
await document.getElementById('video').play(); await videoElement.play();
// status();
} }
async function videoPause() { async function videoPause() {
@ -198,68 +207,56 @@ async function videoPause() {
const compare = { enabled: false, original: null }; const compare = { enabled: false, original: null };
async function calcSimmilarity(result) { async function calcSimmilarity(result) {
document.getElementById('compare-container').onclick = () => {
log('resetting face compare baseline:');
compare.original = null;
};
document.getElementById('compare-container').style.display = compare.enabled ? 'block' : 'none'; document.getElementById('compare-container').style.display = compare.enabled ? 'block' : 'none';
if (!compare.enabled) return; if (!compare.enabled) {
compare.original = null;
return;
}
if (!result || !result.face || !result.face[0] || !result.face[0].embedding) return; if (!result || !result.face || !result.face[0] || !result.face[0].embedding) return;
if (!(result.face.length > 0) || (result.face[0].embedding.length <= 64)) return; if (!(result.face.length > 0) || (result.face[0].embedding.length <= 64)) return;
if (!compare.original) { if (!compare.original) {
compare.original = result; compare.original = result;
log('setting face compare baseline:', result.face[0]); log('setting face compare baseline:', result.face[0]);
if (result.face[0].tensor) { if (result.face[0].tensor) {
const enhanced = human.enhance(result.face[0]);
if (enhanced) {
const c = document.getElementById('orig'); const c = document.getElementById('orig');
const squeeze = human.tf.squeeze(enhanced); human.draw.tensor(result.face[0].tensor, c);
const norm = human.tf.div(squeeze, 255);
human.tf.browser.toPixels(norm, c);
human.tf.dispose(enhanced);
human.tf.dispose(squeeze);
human.tf.dispose(norm);
}
} else { } else {
document.getElementById('compare-canvas').getContext('2d').drawImage(compare.original.canvas, 0, 0, 200, 200); document.getElementById('compare-canvas').getContext('2d').drawImage(compare.original.canvas, 0, 0, 200, 200);
} }
} }
const similarity = human.similarity(compare.original.face[0].embedding, result.face[0].embedding); const similarity = human.match.similarity(compare.original.face[0].embedding, result.face[0].embedding);
document.getElementById('similarity').innerText = `similarity: ${Math.trunc(1000 * similarity) / 10}%`; document.getElementById('similarity').innerText = `similarity: ${Math.trunc(1000 * similarity) / 10}%`;
} }
const isLive = (input) => { const isLive = (input) => {
const videoLive = input.readyState > 2; const isCamera = input.srcObject?.getVideoTracks()[0] && input.srcObject?.getVideoTracks()[0].enabled;
const cameraLive = input.srcObject?.getVideoTracks()[0].readyState === 'live'; const isVideoLive = input.readyState > 2;
const live = (videoLive || cameraLive) && (!input.paused); const isCameraLive = input.srcObject?.getVideoTracks()[0].readyState === 'live';
let live = isCamera ? isCameraLive : isVideoLive;
live = live && !input.paused;
return live; return live;
}; };
// draws processed results and starts processing of a next frame // draws processed results and starts processing of a next frame
let lastDraw = performance.now(); let lastDraw = 0;
async function drawResults(input) { async function drawResults(input) {
// await delay(25);
const result = lastDetectedResult; const result = lastDetectedResult;
const canvas = document.getElementById('canvas'); const canvas = document.getElementById('canvas');
// update draw fps data // update draw fps data
ui.drawFPS.push(1000 / (performance.now() - lastDraw)); ui.drawFPS.push(1000 / (human.now() - lastDraw));
if (ui.drawFPS.length > ui.maxFPSframes) ui.drawFPS.shift(); if (ui.drawFPS.length > ui.maxFPSframes) ui.drawFPS.shift();
lastDraw = performance.now(); lastDraw = human.now();
// draw fps chart // draw fps chart
await menu.process.updateChart('FPS', ui.detectFPS); await menu.process.updateChart('FPS', ui.detectFPS);
document.getElementById('segmentation-container').style.display = userConfig.segmentation.enabled ? 'block' : 'none'; if (!result.canvas || ui.buffered) { // refresh with input if using buffered output or if missing canvas
if (userConfig.segmentation.enabled && ui.buffered) { // refresh segmentation if using buffered output
const seg = await human.segmentation(input, ui.background);
if (seg.alpha) {
const canvasSegMask = document.getElementById('segmentation-mask');
const ctxSegMask = canvasSegMask.getContext('2d');
ctxSegMask.clearRect(0, 0, canvasSegMask.width, canvasSegMask.height); // need to clear as seg.alpha is alpha based canvas so it adds
ctxSegMask.drawImage(seg.alpha, 0, 0, seg.alpha.width, seg.alpha.height, 0, 0, canvasSegMask.width, canvasSegMask.height);
const canvasSegCanvas = document.getElementById('segmentation-canvas');
const ctxSegCanvas = canvasSegCanvas.getContext('2d');
ctxSegCanvas.clearRect(0, 0, canvasSegCanvas.width, canvasSegCanvas.height); // need to clear as seg.alpha is alpha based canvas so it adds
ctxSegCanvas.drawImage(seg.canvas, 0, 0, seg.alpha.width, seg.alpha.height, 0, 0, canvasSegCanvas.width, canvasSegCanvas.height);
}
// result.canvas = seg.alpha;
} else if (!result.canvas || ui.buffered) { // refresh with input if using buffered output or if missing canvas
const image = await human.image(input, false); const image = await human.image(input, false);
result.canvas = image.canvas; result.canvas = image.canvas;
human.tf.dispose(image.tensor); human.tf.dispose(image.tensor);
@ -318,22 +315,21 @@ async function drawResults(input) {
${warning}<br> ${warning}<br>
`; `;
ui.framesDraw++; ui.framesDraw++;
ui.lastFrame = performance.now(); ui.lastFrame = human.now();
// if buffered, immediate loop but limit frame rate although it's going to run slower as JS is singlethreaded
if (ui.buffered) { if (ui.buffered) {
if (isLive(input)) { if (isLive(input)) {
ui.drawThread = requestAnimationFrame(() => drawResults(input)); // ui.drawThread = requestAnimationFrame(() => drawResults(input));
ui.drawThread = setTimeout(() => drawResults(input), 25);
} else { } else {
cancelAnimationFrame(ui.drawThread); cancelAnimationFrame(ui.drawThread);
videoPause();
ui.drawThread = null; ui.drawThread = null;
} }
} else { } else if (ui.drawThread) {
if (ui.drawThread) {
log('stopping buffered refresh'); log('stopping buffered refresh');
cancelAnimationFrame(ui.drawThread); cancelAnimationFrame(ui.drawThread);
ui.drawThread = null; ui.drawThread = null;
} }
}
} }
// setup webcam // setup webcam
@ -381,10 +377,11 @@ async function setupCamera() {
}, },
}; };
// enumerate devices for diag purposes // enumerate devices for diag purposes
if (initialCameraAccess) { const devices = await navigator.mediaDevices.enumerateDevices();
navigator.mediaDevices.enumerateDevices().then((devices) => log('enumerated input devices:', devices)); if (initialCameraAccess) log('enumerated input devices:', devices);
log('camera constraints', constraints); // to select specific camera add deviceid from enumerated devices to camera constraints
} // constraints.video.deviceId = '6794499e046cf4aebf41cfeb7d1ef48a17bd65f72bafb55f3c0b06405d3d487b';
if (initialCameraAccess) log('camera constraints', constraints);
try { try {
stream = await navigator.mediaDevices.getUserMedia(constraints); stream = await navigator.mediaDevices.getUserMedia(constraints);
} catch (err) { } catch (err) {
@ -406,13 +403,13 @@ async function setupCamera() {
} }
const track = stream.getVideoTracks()[0]; const track = stream.getVideoTracks()[0];
const settings = track.getSettings(); const settings = track.getSettings();
if (initialCameraAccess) log('selected video source:', track, settings); // log('selected camera:', track.label, 'id:', settings.deviceId); if (initialCameraAccess) log('selected video source:', track, settings);
ui.camera = { name: track.label.toLowerCase(), width: settings.width, height: settings.height, facing: settings.facingMode === 'user' ? 'front' : 'back' }; ui.camera = { name: track.label.toLowerCase(), width: settings.width, height: settings.height, facing: settings.facingMode === 'user' ? 'front' : 'back' };
initialCameraAccess = false; initialCameraAccess = false;
if (!stream) return 'camera stream empty'; if (!stream) return 'camera stream empty';
const ready = new Promise((resolve) => (video.onloadeddata = () => resolve(true))); const ready = new Promise((resolve) => { (video.onloadeddata = () => resolve(true)); });
video.srcObject = stream; video.srcObject = stream;
await ready; await ready;
if (settings.width > settings.height) canvas.style.width = '100vw'; if (settings.width > settings.height) canvas.style.width = '100vw';
@ -422,8 +419,7 @@ async function setupCamera() {
ui.menuWidth.input.setAttribute('value', video.videoWidth); ui.menuWidth.input.setAttribute('value', video.videoWidth);
ui.menuHeight.input.setAttribute('value', video.videoHeight); ui.menuHeight.input.setAttribute('value', video.videoHeight);
if (live || ui.autoPlay) await videoPlay(); if (live || ui.autoPlay) await videoPlay();
// eslint-disable-next-line no-use-before-define if ((live || ui.autoPlay) && !ui.detectThread) runHumanDetect(video, canvas); // eslint-disable-line no-use-before-define
if ((live || ui.autoPlay) && !ui.detectThread) runHumanDetect(video, canvas);
return 'camera stream ready'; return 'camera stream ready';
} }
@ -477,8 +473,7 @@ function webWorker(input, image, canvas, timestamp) {
ui.framesDetect++; ui.framesDetect++;
if (!ui.drawThread) drawResults(input); if (!ui.drawThread) drawResults(input);
if (isLive(input)) { if (isLive(input)) {
// eslint-disable-next-line no-use-before-define ui.detectThread = requestAnimationFrame((now) => runHumanDetect(input, canvas, now)); // eslint-disable-line no-use-before-define
ui.detectThread = requestAnimationFrame((now) => runHumanDetect(input, canvas, now));
} }
}); });
} }
@ -515,19 +510,9 @@ function runHumanDetect(input, canvas, timestamp) {
// perform detection in worker // perform detection in worker
webWorker(input, data, canvas, timestamp); webWorker(input, data, canvas, timestamp);
} else { } else {
human.detect(input, userConfig).then((result) => { human.detect(input, userConfig)
.then((result) => {
status(); status();
/*
setTimeout(async () => { // simulate gl context lost 2sec after initial detection
const ext = human.gl && human.gl.gl ? human.gl.gl.getExtension('WEBGL_lose_context') : {};
if (ext && ext.loseContext) {
log('simulate context lost:', human.env.webgl, human.gl, ext);
human.gl.gl.getExtension('WEBGL_lose_context').loseContext();
await videoPause();
status('Exception: WebGL');
}
}, 2000);
*/
if (result.performance && result.performance.total) ui.detectFPS.push(1000 / result.performance.total); if (result.performance && result.performance.total) ui.detectFPS.push(1000 / result.performance.total);
if (ui.detectFPS.length > ui.maxFPSframes) ui.detectFPS.shift(); if (ui.detectFPS.length > ui.maxFPSframes) ui.detectFPS.shift();
if (ui.bench) { if (ui.bench) {
@ -544,7 +529,9 @@ function runHumanDetect(input, canvas, timestamp) {
ui.framesDetect++; ui.framesDetect++;
ui.detectThread = requestAnimationFrame((now) => runHumanDetect(input, canvas, now)); ui.detectThread = requestAnimationFrame((now) => runHumanDetect(input, canvas, now));
} }
}); return result;
})
.catch(() => log('human detect error'));
} }
} }
@ -591,8 +578,7 @@ async function processImage(input, title) {
// copy to clipboard on click // copy to clipboard on click
if (typeof ClipboardItem !== 'undefined' && navigator.clipboard) { if (typeof ClipboardItem !== 'undefined' && navigator.clipboard) {
evt.target.toBlob((blob) => { evt.target.toBlob((blob) => {
// eslint-disable-next-line no-undef const item = new ClipboardItem({ 'image/png': blob }); // eslint-disable-line no-undef
const item = new ClipboardItem({ 'image/png': blob });
navigator.clipboard.write([item]); navigator.clipboard.write([item]);
log('copied image to clipboard'); log('copied image to clipboard');
}); });
@ -603,6 +589,7 @@ async function processImage(input, title) {
const prev = document.getElementsByClassName('thumbnail'); const prev = document.getElementsByClassName('thumbnail');
if (prev && prev.length > 0) document.getElementById('samples-container').insertBefore(thumb, prev[0]); if (prev && prev.length > 0) document.getElementById('samples-container').insertBefore(thumb, prev[0]);
else document.getElementById('samples-container').appendChild(thumb); else document.getElementById('samples-container').appendChild(thumb);
document.getElementById('samples-container').style.display = 'block';
// finish up // finish up
status(); status();
@ -619,20 +606,17 @@ async function processImage(input, title) {
async function processVideo(input, title) { async function processVideo(input, title) {
status(`processing video: ${title}`); status(`processing video: ${title}`);
const video = document.createElement('video'); const video = document.getElementById('video');
const canvas = document.getElementById('canvas'); const canvas = document.getElementById('canvas');
video.id = 'video-file';
video.controls = true;
video.loop = true;
// video.onerror = async () => status(`video loading error: ${video.error.message}`);
video.addEventListener('error', () => status(`video loading error: ${video.error.message}`)); video.addEventListener('error', () => status(`video loading error: ${video.error.message}`));
video.addEventListener('canplay', async () => { video.addEventListener('canplay', async () => {
for (const m of Object.values(menu)) m.hide(); for (const m of Object.values(menu)) m.hide();
document.getElementById('samples-container').style.display = 'none'; document.getElementById('samples-container').style.display = 'none';
canvas.style.display = 'block'; canvas.style.display = 'block';
await videoPlay(); await videoPlay();
if (!ui.detectThread) runHumanDetect(video, canvas); runHumanDetect(video, canvas);
}); });
video.srcObject = null;
video.src = input; video.src = input;
} }
@ -643,9 +627,8 @@ async function detectVideo() {
const canvas = document.getElementById('canvas'); const canvas = document.getElementById('canvas');
canvas.style.display = 'block'; canvas.style.display = 'block';
cancelAnimationFrame(ui.detectThread); cancelAnimationFrame(ui.detectThread);
if ((video.srcObject !== null) && !video.paused) { if (isLive(video) && !video.paused) {
await videoPause(); await videoPause();
// if (ui.drawThread) cancelAnimationFrame(ui.drawThread);
} else { } else {
const cameraError = await setupCamera(); const cameraError = await setupCamera();
if (!cameraError) { if (!cameraError) {
@ -702,6 +685,7 @@ function setupMenu() {
menu.image = new Menu(document.body, '', { top, left: x[1] }); menu.image = new Menu(document.body, '', { top, left: x[1] });
menu.image.addBool('enabled', userConfig.filter, 'enabled', (val) => userConfig.filter.enabled = val); menu.image.addBool('enabled', userConfig.filter, 'enabled', (val) => userConfig.filter.enabled = val);
menu.image.addBool('histogram equalization', userConfig.filter, 'equalization', (val) => userConfig.filter.equalization = val);
ui.menuWidth = menu.image.addRange('image width', userConfig.filter, 'width', 0, 3840, 10, (val) => userConfig.filter.width = parseInt(val)); ui.menuWidth = menu.image.addRange('image width', userConfig.filter, 'width', 0, 3840, 10, (val) => userConfig.filter.width = parseInt(val));
ui.menuHeight = menu.image.addRange('image height', userConfig.filter, 'height', 0, 2160, 10, (val) => userConfig.filter.height = parseInt(val)); ui.menuHeight = menu.image.addRange('image height', userConfig.filter, 'height', 0, 2160, 10, (val) => userConfig.filter.height = parseInt(val));
menu.image.addHTML('<hr style="border-style: inset; border-color: dimgray">'); menu.image.addHTML('<hr style="border-style: inset; border-color: dimgray">');
@ -720,7 +704,6 @@ function setupMenu() {
menu.image.addBool('technicolor', userConfig.filter, 'technicolor', (val) => userConfig.filter.technicolor = val); menu.image.addBool('technicolor', userConfig.filter, 'technicolor', (val) => userConfig.filter.technicolor = val);
menu.image.addBool('polaroid', userConfig.filter, 'polaroid', (val) => userConfig.filter.polaroid = val); menu.image.addBool('polaroid', userConfig.filter, 'polaroid', (val) => userConfig.filter.polaroid = val);
menu.image.addHTML('<input type="file" id="file-input" class="input-file"></input> &nbsp input'); menu.image.addHTML('<input type="file" id="file-input" class="input-file"></input> &nbsp input');
menu.image.addHTML('<input type="file" id="file-background" class="input-file"></input> &nbsp background');
menu.process = new Menu(document.body, '', { top, left: x[2] }); menu.process = new Menu(document.body, '', { top, left: x[2] });
menu.process.addList('backend', ['cpu', 'webgl', 'wasm', 'humangl'], userConfig.backend, (val) => userConfig.backend = val); menu.process.addList('backend', ['cpu', 'webgl', 'wasm', 'humangl'], userConfig.backend, (val) => userConfig.backend = val);
@ -768,8 +751,6 @@ function setupMenu() {
menu.models.addHTML('<hr style="border-style: inset; border-color: dimgray">'); menu.models.addHTML('<hr style="border-style: inset; border-color: dimgray">');
menu.models.addBool('gestures', userConfig.gesture, 'enabled', (val) => userConfig.gesture.enabled = val); menu.models.addBool('gestures', userConfig.gesture, 'enabled', (val) => userConfig.gesture.enabled = val);
menu.models.addHTML('<hr style="border-style: inset; border-color: dimgray">'); menu.models.addHTML('<hr style="border-style: inset; border-color: dimgray">');
menu.models.addBool('body segmentation', userConfig.segmentation, 'enabled', (val) => userConfig.segmentation.enabled = val);
menu.models.addHTML('<hr style="border-style: inset; border-color: dimgray">');
menu.models.addBool('object detection', userConfig.object, 'enabled', (val) => userConfig.object.enabled = val); menu.models.addBool('object detection', userConfig.object, 'enabled', (val) => userConfig.object.enabled = val);
menu.models.addHTML('<hr style="border-style: inset; border-color: dimgray">'); menu.models.addHTML('<hr style="border-style: inset; border-color: dimgray">');
menu.models.addBool('face compare', compare, 'enabled', (val) => { menu.models.addBool('face compare', compare, 'enabled', (val) => {
@ -789,6 +770,7 @@ function setupMenu() {
async function resize() { async function resize() {
window.onresize = null; window.onresize = null;
log('resize');
// best setting for mobile, ignored for desktop // best setting for mobile, ignored for desktop
// can set dynamic value such as Math.min(1, Math.round(100 * window.innerWidth / 960) / 100); // can set dynamic value such as Math.min(1, Math.round(100 * window.innerWidth / 960) / 100);
const viewportScale = 0.7; const viewportScale = 0.7;
@ -837,42 +819,12 @@ async function processDataURL(f, action) {
if (e.target.result.startsWith('data:video')) await processVideo(e.target.result, f.name); if (e.target.result.startsWith('data:video')) await processVideo(e.target.result, f.name);
document.getElementById('canvas').style.display = 'none'; document.getElementById('canvas').style.display = 'none';
} }
if (action === 'background') {
const image = new Image();
image.onerror = async () => status('image loading error');
image.onload = async () => {
ui.background = image;
if (document.getElementById('canvas').style.display === 'block') { // replace canvas used for video
const canvas = document.getElementById('canvas');
const ctx = canvas.getContext('2d');
const seg = await human.segmentation(canvas, ui.background, userConfig);
if (seg.canvas) ctx.drawImage(seg.canvas, 0, 0);
} else {
const canvases = document.getElementById('samples-container').children; // replace loaded images
for (const canvas of canvases) {
const ctx = canvas.getContext('2d');
const seg = await human.segmentation(canvas, ui.background, userConfig);
if (seg.canvas) ctx.drawImage(seg.canvas, 0, 0);
}
}
};
image.src = e.target.result;
}
resolve(true); resolve(true);
}; };
reader.readAsDataURL(f); reader.readAsDataURL(f);
}); });
} }
async function runSegmentation() {
document.getElementById('file-background').onchange = async (evt) => {
userConfig.segmentation.enabled = true;
evt.preventDefault();
if (evt.target.files.length < 2) ui.columns = 1;
for (const f of evt.target.files) await processDataURL(f, 'background');
};
}
async function dragAndDrop() { async function dragAndDrop() {
document.body.addEventListener('dragenter', (evt) => evt.preventDefault()); document.body.addEventListener('dragenter', (evt) => evt.preventDefault());
document.body.addEventListener('dragleave', (evt) => evt.preventDefault()); document.body.addEventListener('dragleave', (evt) => evt.preventDefault());
@ -910,10 +862,10 @@ async function pwaRegister() {
const regs = await navigator.serviceWorker.getRegistrations(); const regs = await navigator.serviceWorker.getRegistrations();
for (const reg of regs) { for (const reg of regs) {
log('pwa found:', reg.scope); log('pwa found:', reg.scope);
if (reg.scope.startsWith(location.origin)) found = reg; if (reg.scope.startsWith(window.location.origin)) found = reg;
} }
if (!found) { if (!found) {
const reg = await navigator.serviceWorker.register(pwa.scriptFile, { scope: location.pathname }); const reg = await navigator.serviceWorker.register(pwa.scriptFile, { scope: window.location.pathname });
found = reg; found = reg;
log('pwa registered:', reg.scope); log('pwa registered:', reg.scope);
} }
@ -945,8 +897,7 @@ async function main() {
if (ui.detectThread) cancelAnimationFrame(ui.detectThread); if (ui.detectThread) cancelAnimationFrame(ui.detectThread);
if (ui.drawThread) cancelAnimationFrame(ui.drawThread); if (ui.drawThread) cancelAnimationFrame(ui.drawThread);
const msg = evt.reason.message || evt.reason || evt; const msg = evt.reason.message || evt.reason || evt;
// eslint-disable-next-line no-console console.error(msg); // eslint-disable-line no-console
console.error(msg);
document.getElementById('log').innerHTML = msg; document.getElementById('log').innerHTML = msg;
status(`exception: ${msg}`); status(`exception: ${msg}`);
evt.preventDefault(); evt.preventDefault();
@ -969,7 +920,7 @@ async function main() {
await pwaRegister(); await pwaRegister();
// parse url search params // parse url search params
const params = new URLSearchParams(location.search); const params = new URLSearchParams(window.location.search);
log('url options:', params.toString()); log('url options:', params.toString());
if (params.has('worker')) { if (params.has('worker')) {
ui.useWorker = JSON.parse(params.get('worker')); ui.useWorker = JSON.parse(params.get('worker'));
@ -1006,14 +957,14 @@ async function main() {
// create instance of human // create instance of human
human = new Human(userConfig); human = new Human(userConfig);
// human.env.perfadd = true;
log('human version:', human.version); log('human version:', human.version);
// we've merged human defaults with user config and now lets store it back so it can be accessed by methods such as menu // we've merged human defaults with user config and now lets store it back so it can be accessed by methods such as menu
userConfig = human.config; userConfig = human.config;
if (typeof tf !== 'undefined') { if (typeof tf !== 'undefined') {
// eslint-disable-next-line no-undef log('TensorFlow external version:', tf.version); // eslint-disable-line no-undef
log('TensorFlow external version:', tf.version); human.tf = tf; // eslint-disable-line no-undef
// eslint-disable-next-line no-undef
human.tf = tf; // use externally loaded version of tfjs
} }
log('tfjs version:', human.tf.version.tfjs); log('tfjs version:', human.tf.version.tfjs);
@ -1026,8 +977,7 @@ async function main() {
if (ui.modelsPreload && !ui.useWorker) { if (ui.modelsPreload && !ui.useWorker) {
status('loading'); status('loading');
await human.load(userConfig); // this is not required, just pre-loads all models await human.load(userConfig); // this is not required, just pre-loads all models
const loaded = Object.keys(human.models).filter((a) => human.models[a]); log('demo loaded models:', human.models.loaded());
log('demo loaded models:', loaded);
} else { } else {
await human.init(); await human.init();
} }
@ -1049,9 +999,6 @@ async function main() {
// init drag & drop // init drag & drop
await dragAndDrop(); await dragAndDrop();
// init segmentation
await runSegmentation();
if (params.has('image')) { if (params.has('image')) {
try { try {
const image = JSON.parse(params.get('image')); const image = JSON.parse(params.get('image'));
@ -1072,7 +1019,7 @@ async function main() {
} }
if (human.config.debug) log('environment:', human.env); if (human.config.debug) log('environment:', human.env);
if (human.config.backend === 'humangl' && human.config.debug) log('backend:', human.gl); if (human.config.backend === 'webgl' && human.config.debug) log('backend:', human.gl);
} }
window.onload = main; window.onload = main;

View File

@ -0,0 +1,71 @@
# Human Multithreading Demos
- **Browser** demo `multithread` & `worker`
Runs each `human` module in a separate web worker for highest possible performance
- **NodeJS** demo `node-multiprocess` & `node-multiprocess-worker`
Runs multiple parallel `human` by dispaching them to pool of pre-created worker processes
<br><hr><br>
## NodeJS Multi-process Demo
`nodejs/node-multiprocess.js` and `nodejs/node-multiprocess-worker.js`: Demo using NodeJS with CommonJS module
Demo that starts n child worker processes for parallel execution
```shell
node demo/nodejs/node-multiprocess.js
```
<!-- eslint-skip -->
```json
2021-06-01 08:54:19 INFO: @vladmandic/human version 2.0.0
2021-06-01 08:54:19 INFO: User: vlado Platform: linux Arch: x64 Node: v16.0.0
2021-06-01 08:54:19 INFO: Human multi-process test
2021-06-01 08:54:19 STATE: Enumerated images: ./assets 15
2021-06-01 08:54:19 STATE: Main: started worker: 130362
2021-06-01 08:54:19 STATE: Main: started worker: 130363
2021-06-01 08:54:19 STATE: Main: started worker: 130369
2021-06-01 08:54:19 STATE: Main: started worker: 130370
2021-06-01 08:54:20 STATE: Worker: PID: 130370 TensorFlow/JS 3.6.0 Human 2.0.0 Backend: tensorflow
2021-06-01 08:54:20 STATE: Worker: PID: 130362 TensorFlow/JS 3.6.0 Human 2.0.0 Backend: tensorflow
2021-06-01 08:54:20 STATE: Worker: PID: 130369 TensorFlow/JS 3.6.0 Human 2.0.0 Backend: tensorflow
2021-06-01 08:54:20 STATE: Worker: PID: 130363 TensorFlow/JS 3.6.0 Human 2.0.0 Backend: tensorflow
2021-06-01 08:54:21 STATE: Main: dispatching to worker: 130370
2021-06-01 08:54:21 INFO: Latency: worker initializtion: 1348 message round trip: 0
2021-06-01 08:54:21 DATA: Worker received message: 130370 { test: true }
2021-06-01 08:54:21 STATE: Main: dispatching to worker: 130362
2021-06-01 08:54:21 DATA: Worker received message: 130362 { image: 'samples/ai-face.jpg' }
2021-06-01 08:54:21 DATA: Worker received message: 130370 { image: 'samples/ai-body.jpg' }
2021-06-01 08:54:21 STATE: Main: dispatching to worker: 130369
2021-06-01 08:54:21 STATE: Main: dispatching to worker: 130363
2021-06-01 08:54:21 DATA: Worker received message: 130369 { image: 'assets/human-sample-upper.jpg' }
2021-06-01 08:54:21 DATA: Worker received message: 130363 { image: 'assets/sample-me.jpg' }
2021-06-01 08:54:24 DATA: Main: worker finished: 130362 detected faces: 1 bodies: 1 hands: 0 objects: 1
2021-06-01 08:54:24 STATE: Main: dispatching to worker: 130362
2021-06-01 08:54:24 DATA: Worker received message: 130362 { image: 'assets/sample1.jpg' }
2021-06-01 08:54:25 DATA: Main: worker finished: 130369 detected faces: 1 bodies: 1 hands: 0 objects: 1
2021-06-01 08:54:25 STATE: Main: dispatching to worker: 130369
2021-06-01 08:54:25 DATA: Main: worker finished: 130370 detected faces: 1 bodies: 1 hands: 0 objects: 1
2021-06-01 08:54:25 STATE: Main: dispatching to worker: 130370
2021-06-01 08:54:25 DATA: Worker received message: 130369 { image: 'assets/sample2.jpg' }
2021-06-01 08:54:25 DATA: Main: worker finished: 130363 detected faces: 1 bodies: 1 hands: 0 objects: 2
2021-06-01 08:54:25 STATE: Main: dispatching to worker: 130363
2021-06-01 08:54:25 DATA: Worker received message: 130370 { image: 'assets/sample3.jpg' }
2021-06-01 08:54:25 DATA: Worker received message: 130363 { image: 'assets/sample4.jpg' }
2021-06-01 08:54:30 DATA: Main: worker finished: 130362 detected faces: 3 bodies: 1 hands: 0 objects: 7
2021-06-01 08:54:30 STATE: Main: dispatching to worker: 130362
2021-06-01 08:54:30 DATA: Worker received message: 130362 { image: 'assets/sample5.jpg' }
2021-06-01 08:54:31 DATA: Main: worker finished: 130369 detected faces: 3 bodies: 1 hands: 0 objects: 5
2021-06-01 08:54:31 STATE: Main: dispatching to worker: 130369
2021-06-01 08:54:31 DATA: Worker received message: 130369 { image: 'assets/sample6.jpg' }
2021-06-01 08:54:31 DATA: Main: worker finished: 130363 detected faces: 4 bodies: 1 hands: 2 objects: 2
2021-06-01 08:54:31 STATE: Main: dispatching to worker: 130363
2021-06-01 08:54:39 STATE: Main: worker exit: 130370 0
2021-06-01 08:54:39 DATA: Main: worker finished: 130362 detected faces: 1 bodies: 1 hands: 0 objects: 1
2021-06-01 08:54:39 DATA: Main: worker finished: 130369 detected faces: 1 bodies: 1 hands: 1 objects: 3
2021-06-01 08:54:39 STATE: Main: worker exit: 130362 0
2021-06-01 08:54:39 STATE: Main: worker exit: 130369 0
2021-06-01 08:54:41 DATA: Main: worker finished: 130363 detected faces: 9 bodies: 1 hands: 0 objects: 10
2021-06-01 08:54:41 STATE: Main: worker exit: 130363 0
2021-06-01 08:54:41 INFO: Processed: 15 images in total: 22006 ms working: 20658 ms average: 1377 ms
```

View File

@ -9,10 +9,10 @@
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>"> <meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>"> <meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="theme-color" content="#000000"> <meta name="theme-color" content="#000000">
<link rel="manifest" href="../manifest.webmanifest"> <link rel="manifest" href="../../manifest.webmanifest">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon"> <link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../../assets/icon.png"> <link rel="apple-touch-icon" href="../../assets/icon.png">
<script src="./index.js" type="module"></script> <script src="../multithread/index.js" type="module"></script>
<style> <style>
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') } @font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; } html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; }

View File

@ -4,17 +4,16 @@
* @description Demo app that enables all Human modules and runs them in separate worker threads * @description Demo app that enables all Human modules and runs them in separate worker threads
* *
*/ */
// @ts-nocheck // typescript checks disabled as this is pure javascript
import Human from '../../dist/human.esm.js'; // equivalent of @vladmandic/human import { Human } from '../../dist/human.esm.js'; // equivalent of @vladmandic/human
import GLBench from '../helpers/gl-bench.js'; import GLBench from '../helpers/gl-bench.js';
const workerJS = './worker.js'; const workerJS = '../multithread/worker.js';
const config = { const config = {
main: { // processes input and runs gesture analysis main: { // processes input and runs gesture analysis
warmup: 'none', warmup: 'none',
backend: 'humangl', backend: 'webgl',
modelBasePath: '../../models/', modelBasePath: '../../models/',
async: false, async: false,
filter: { enabled: true }, filter: { enabled: true },
@ -27,7 +26,7 @@ const config = {
}, },
face: { // runs all face models face: { // runs all face models
warmup: 'none', warmup: 'none',
backend: 'humangl', backend: 'webgl',
modelBasePath: '../../models/', modelBasePath: '../../models/',
async: false, async: false,
filter: { enabled: false }, filter: { enabled: false },
@ -40,7 +39,7 @@ const config = {
}, },
body: { // runs body model body: { // runs body model
warmup: 'none', warmup: 'none',
backend: 'humangl', backend: 'webgl',
modelBasePath: '../../models/', modelBasePath: '../../models/',
async: false, async: false,
filter: { enabled: false }, filter: { enabled: false },
@ -53,7 +52,7 @@ const config = {
}, },
hand: { // runs hands model hand: { // runs hands model
warmup: 'none', warmup: 'none',
backend: 'humangl', backend: 'webgl',
modelBasePath: '../../models/', modelBasePath: '../../models/',
async: false, async: false,
filter: { enabled: false }, filter: { enabled: false },
@ -66,7 +65,7 @@ const config = {
}, },
object: { // runs object model object: { // runs object model
warmup: 'none', warmup: 'none',
backend: 'humangl', backend: 'webgl',
modelBasePath: '../../models/', modelBasePath: '../../models/',
async: false, async: false,
filter: { enabled: false }, filter: { enabled: false },
@ -92,9 +91,13 @@ const busy = {
}; };
const workers = { const workers = {
/** @type {Worker | null} */
face: null, face: null,
/** @type {Worker | null} */
body: null, body: null,
/** @type {Worker | null} */
hand: null, hand: null,
/** @type {Worker | null} */
object: null, object: null,
}; };
@ -127,60 +130,58 @@ const result = { // initialize empty result object which will be partially fille
function log(...msg) { function log(...msg) {
const dt = new Date(); const dt = new Date();
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`; const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
// eslint-disable-next-line no-console console.log(ts, ...msg); // eslint-disable-line no-console
console.log(ts, ...msg);
} }
async function drawResults() { async function drawResults() {
start.draw = performance.now(); start.draw = human.now();
const interpolated = human.next(result); const interpolated = human.next(result);
await human.draw.all(canvas, interpolated); await human.draw.all(canvas, interpolated);
time.draw = Math.round(1 + performance.now() - start.draw); time.draw = Math.round(1 + human.now() - start.draw);
const fps = Math.round(10 * 1000 / time.main) / 10; const fps = Math.round(10 * 1000 / time.main) / 10;
const draw = Math.round(10 * 1000 / time.draw) / 10; const draw = Math.round(10 * 1000 / time.draw) / 10;
document.getElementById('log').innerText = `Human: version ${human.version} | Performance: Main ${time.main}ms Face: ${time.face}ms Body: ${time.body}ms Hand: ${time.hand}ms Object ${time.object}ms | FPS: ${fps} / ${draw}`; const div = document.getElementById('log');
if (div) div.innerText = `Human: version ${human.version} | Performance: Main ${time.main}ms Face: ${time.face}ms Body: ${time.body}ms Hand: ${time.hand}ms Object ${time.object}ms | FPS: ${fps} / ${draw}`;
requestAnimationFrame(drawResults); requestAnimationFrame(drawResults);
} }
async function receiveMessage(msg) { async function receiveMessage(msg) {
result[msg.data.type] = msg.data.result; result[msg.data.type] = msg.data.result;
busy[msg.data.type] = false; busy[msg.data.type] = false;
time[msg.data.type] = Math.round(performance.now() - start[msg.data.type]); time[msg.data.type] = Math.round(human.now() - start[msg.data.type]);
} }
async function runDetection() { async function runDetection() {
start.main = performance.now(); start.main = human.now();
if (!bench) { if (!bench) {
bench = new GLBench(null, { trackGPU: false, chartHz: 20, chartLen: 20 }); bench = new GLBench(null, { trackGPU: false, chartHz: 20, chartLen: 20 });
bench.begin(); bench.begin('human');
} }
const ctx = canvas.getContext('2d'); const ctx = canvas.getContext('2d');
// const image = await human.image(video);
// ctx.drawImage(image.canvas, 0, 0, canvas.width, canvas.height);
ctx.drawImage(video, 0, 0, canvas.width, canvas.height); ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height); const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
if (!busy.face) { if (!busy.face) {
busy.face = true; busy.face = true;
start.face = performance.now(); start.face = human.now();
workers.face.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.face, type: 'face' }, [imageData.data.buffer.slice(0)]); if (workers.face) workers.face.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.face, type: 'face' }, [imageData.data.buffer.slice(0)]);
} }
if (!busy.body) { if (!busy.body) {
busy.body = true; busy.body = true;
start.body = performance.now(); start.body = human.now();
workers.body.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.body, type: 'body' }, [imageData.data.buffer.slice(0)]); if (workers.body) workers.body.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.body, type: 'body' }, [imageData.data.buffer.slice(0)]);
} }
if (!busy.hand) { if (!busy.hand) {
busy.hand = true; busy.hand = true;
start.hand = performance.now(); start.hand = human.now();
workers.hand.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.hand, type: 'hand' }, [imageData.data.buffer.slice(0)]); if (workers.hand) workers.hand.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.hand, type: 'hand' }, [imageData.data.buffer.slice(0)]);
} }
if (!busy.object) { if (!busy.object) {
busy.object = true; busy.object = true;
start.object = performance.now(); start.object = human.now();
workers.object.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.object, type: 'object' }, [imageData.data.buffer.slice(0)]); if (workers.object) workers.object.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.object, type: 'object' }, [imageData.data.buffer.slice(0)]);
} }
time.main = Math.round(performance.now() - start.main); time.main = Math.round(human.now() - start.main);
bench.nextFrame(); bench.nextFrame();
requestAnimationFrame(runDetection); requestAnimationFrame(runDetection);
@ -197,37 +198,40 @@ async function setupCamera() {
facingMode: 'user', facingMode: 'user',
resizeMode: 'crop-and-scale', resizeMode: 'crop-and-scale',
width: { ideal: document.body.clientWidth }, width: { ideal: document.body.clientWidth },
// height: { ideal: document.body.clientHeight }, // not set as we're using aspectRation to get height instead
aspectRatio: document.body.clientWidth / document.body.clientHeight, aspectRatio: document.body.clientWidth / document.body.clientHeight,
}, },
}; };
// enumerate devices for diag purposes // enumerate devices for diag purposes
navigator.mediaDevices.enumerateDevices().then((devices) => log('enumerated devices:', devices)); navigator.mediaDevices.enumerateDevices()
.then((devices) => log('enumerated devices:', devices))
.catch(() => log('mediaDevices error'));
log('camera constraints', constraints); log('camera constraints', constraints);
try { try {
stream = await navigator.mediaDevices.getUserMedia(constraints); stream = await navigator.mediaDevices.getUserMedia(constraints);
} catch (err) { } catch (err) {
output.innerText += `\n${err.name}: ${err.message}`; if (output) output.innerText += `\n${err.name}: ${err.message}`;
status(err.name);
log('camera error:', err); log('camera error:', err);
} }
if (stream) {
const tracks = stream.getVideoTracks(); const tracks = stream.getVideoTracks();
log('enumerated viable tracks:', tracks); log('enumerated viable tracks:', tracks);
const track = stream.getVideoTracks()[0]; const track = stream.getVideoTracks()[0];
const settings = track.getSettings(); const settings = track.getSettings();
log('selected video source:', track, settings); log('selected video source:', track, settings);
} else {
log('missing video stream');
}
const promise = !stream || new Promise((resolve) => { const promise = !stream || new Promise((resolve) => {
video.onloadeddata = () => { video.onloadeddata = () => {
if (settings.width > settings.height) canvas.style.width = '100vw'; canvas.style.height = '100vh';
else canvas.style.height = '100vh';
canvas.width = video.videoWidth; canvas.width = video.videoWidth;
canvas.height = video.videoHeight; canvas.height = video.videoHeight;
video.play(); video.play();
resolve(); resolve(true);
}; };
}); });
// attach input to video element // attach input to video element
if (stream) video.srcObject = stream; if (stream && video) video.srcObject = stream;
return promise; return promise;
} }
@ -243,21 +247,13 @@ async function startWorkers() {
} }
async function main() { async function main() {
window.addEventListener('unhandledrejection', (evt) => {
// eslint-disable-next-line no-console
console.error(evt.reason || evt);
document.getElementById('log').innerHTML = evt.reason.message || evt.reason || evt;
status('exception error');
evt.preventDefault();
});
if (typeof Worker === 'undefined' || typeof OffscreenCanvas === 'undefined') { if (typeof Worker === 'undefined' || typeof OffscreenCanvas === 'undefined') {
status('workers are not supported');
return; return;
} }
human = new Human(config.main); human = new Human(config.main);
document.getElementById('log').innerText = `Human: version ${human.version}`; const div = document.getElementById('log');
if (div) div.innerText = `Human: version ${human.version}`;
await startWorkers(); await startWorkers();
await setupCamera(); await setupCamera();

View File

@ -6,17 +6,16 @@
*/ */
const fs = require('fs'); const fs = require('fs');
const log = require('@vladmandic/pilogger'); const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// workers actual import tfjs and faceapi modules // workers actual import tfjs and human modules
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
const tf = require('@tensorflow/tfjs-node');
const Human = require('../../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default; const Human = require('../../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
let human = null; let human = null;
const myConfig = { const myConfig = {
backend: 'tensorflow', // backend: 'tensorflow',
modelBasePath: 'file://models/', modelBasePath: 'file://models/',
debug: false, debug: false,
async: true, async: true,
@ -36,7 +35,7 @@ const myConfig = {
object: { enabled: true }, object: { enabled: true },
}; };
// read image from a file and create tensor to be used by faceapi // read image from a file and create tensor to be used by human
// this way we don't need any monkey patches // this way we don't need any monkey patches
// you can add any pre-proocessing here such as resizing, etc. // you can add any pre-proocessing here such as resizing, etc.
async function image(img) { async function image(img) {
@ -45,7 +44,7 @@ async function image(img) {
return tensor; return tensor;
} }
// actual faceapi detection // actual human detection
async function detect(img) { async function detect(img) {
const tensor = await image(img); const tensor = await image(img);
const result = await human.detect(tensor); const result = await human.detect(tensor);
@ -64,11 +63,9 @@ async function main() {
// on worker start first initialize message handler so we don't miss any messages // on worker start first initialize message handler so we don't miss any messages
process.on('message', (msg) => { process.on('message', (msg) => {
// @ts-ignore // if main told worker to exit
if (msg.exit && process.exit) process.exit(); // if main told worker to exit if (msg.exit && process.exit) process.exit(); // eslint-disable-line no-process-exit
// @ts-ignore
if (msg.test && process.send) process.send({ test: true }); if (msg.test && process.send) process.send({ test: true });
// @ts-ignore
if (msg.image) detect(msg.image); // if main told worker to process image if (msg.image) detect(msg.image); // if main told worker to process image
log.data('Worker received message:', process.pid, msg); // generic log log.data('Worker received message:', process.pid, msg); // generic log
}); });
@ -78,7 +75,7 @@ async function main() {
// wait until tf is ready // wait until tf is ready
await human.tf.ready(); await human.tf.ready();
// pre-load models // pre-load models
log.state('Worker: PID:', process.pid, `TensorFlow/JS ${human.tf.version_core} Human ${human.version} Backend: ${human.tf.getBackend()}`); log.state('Worker: PID:', process.pid, `TensorFlow/JS ${human.tf.version['tfjs-core']} Human ${human.version} Backend: ${human.tf.getBackend()}`);
await human.load(); await human.load();
// now we're ready, so send message back to main that it knows it can use this worker // now we're ready, so send message back to main that it knows it can use this worker

View File

@ -8,13 +8,12 @@
const fs = require('fs'); const fs = require('fs');
const path = require('path'); const path = require('path');
// eslint-disable-next-line import/no-extraneous-dependencies, node/no-unpublished-require const childProcess = require('child_process'); // eslint-disable-line camelcase
const log = require('@vladmandic/pilogger'); // this is my simple logger with few extra features const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
const child_process = require('child_process');
// note that main process does not import human or tfjs at all, it's all done from worker process // note that main process does not import human or tfjs at all, it's all done from worker process
const workerFile = 'demo/nodejs/node-multiprocess-worker.js'; const workerFile = 'demo/multithread/node-multiprocess-worker.js';
const imgPathRoot = './assets'; // modify to include your sample images const imgPathRoot = './samples/in'; // modify to include your sample images
const numWorkers = 4; // how many workers will be started const numWorkers = 4; // how many workers will be started
const workers = []; // this holds worker processes const workers = []; // this holds worker processes
const images = []; // this holds queue of enumerated images const images = []; // this holds queue of enumerated images
@ -23,7 +22,7 @@ let numImages;
// trigered by main when worker sends ready message // trigered by main when worker sends ready message
// if image pool is empty, signal worker to exit otherwise dispatch image to worker and remove image from queue // if image pool is empty, signal worker to exit otherwise dispatch image to worker and remove image from queue
async function detect(worker) { async function submitDetect(worker) {
if (!t[2]) t[2] = process.hrtime.bigint(); // first time do a timestamp so we can measure initial latency if (!t[2]) t[2] = process.hrtime.bigint(); // first time do a timestamp so we can measure initial latency
if (images.length === numImages) worker.send({ test: true }); // for first image in queue just measure latency if (images.length === numImages) worker.send({ test: true }); // for first image in queue just measure latency
if (images.length === 0) worker.send({ exit: true }); // nothing left in queue if (images.length === 0) worker.send({ exit: true }); // nothing left in queue
@ -58,7 +57,7 @@ async function main() {
}); });
log.header(); log.header();
log.info('FaceAPI multi-process test'); log.info('Human multi-process test');
// enumerate all images into queue // enumerate all images into queue
const dir = fs.readdirSync(imgPathRoot); const dir = fs.readdirSync(imgPathRoot);
@ -74,13 +73,13 @@ async function main() {
// manage worker processes // manage worker processes
for (let i = 0; i < numWorkers; i++) { for (let i = 0; i < numWorkers; i++) {
// create worker process // create worker process
workers[i] = await child_process.fork(workerFile, ['special']); workers[i] = await childProcess.fork(workerFile, ['special']);
// parse message that worker process sends back to main // parse message that worker process sends back to main
// if message is ready, dispatch next image in queue // if message is ready, dispatch next image in queue
// if message is processing result, just print how many faces were detected // if message is processing result, just print how many faces were detected
// otherwise it's an unknown message // otherwise it's an unknown message
workers[i].on('message', (msg) => { workers[i].on('message', (msg) => {
if (msg.ready) detect(workers[i]); if (msg.ready) submitDetect(workers[i]);
else if (msg.image) log.data('Main: worker finished:', workers[i].pid, 'detected faces:', msg.detected.face?.length, 'bodies:', msg.detected.body?.length, 'hands:', msg.detected.hand?.length, 'objects:', msg.detected.object?.length); else if (msg.image) log.data('Main: worker finished:', workers[i].pid, 'detected faces:', msg.detected.face?.length, 'bodies:', msg.detected.body?.length, 'hands:', msg.detected.hand?.length, 'objects:', msg.detected.object?.length);
else if (msg.test) measureLatency(); else if (msg.test) measureLatency();
else log.data('Main: worker message:', workers[i].pid, msg); else log.data('Main: worker message:', workers[i].pid, msg);

View File

@ -1,9 +1,7 @@
// load Human using IIFE script as Chome Mobile does not support Modules as Workers
/// <reference lib="webworker" /> /// <reference lib="webworker" />
// import Human from '../dist/human.esm.js'; // load Human using IIFE script as Chome Mobile does not support Modules as Workers
self.importScripts('../../dist/human.js'); self.importScripts('../../dist/human.js'); // eslint-disable-line no-restricted-globals
let human; let human;
@ -11,9 +9,8 @@ onmessage = async (msg) => {
// received from index.js using: // received from index.js using:
// worker.postMessage({ image: image.data.buffer, width: canvas.width, height: canvas.height, config }, [image.data.buffer]); // worker.postMessage({ image: image.data.buffer, width: canvas.width, height: canvas.height, config }, [image.data.buffer]);
// @ts-ignore // Human is registered as global namespace using IIFE script // Human is registered as global namespace using IIFE script
// eslint-disable-next-line no-undef, new-cap if (!human) human = new Human.default(msg.data.config); // eslint-disable-line no-undef, new-cap
if (!human) human = new Human.default(msg.data.config);
const image = new ImageData(new Uint8ClampedArray(msg.data.image), msg.data.width, msg.data.height); const image = new ImageData(new Uint8ClampedArray(msg.data.image), msg.data.width, msg.data.height);
let result = {}; let result = {};
result = await human.detect(image, msg.data.config); result = await human.detect(image, msg.data.config);

121
demo/nodejs/README.md Normal file
View File

@ -0,0 +1,121 @@
# Human Demos for NodeJS
- `node`: Process images from files, folders or URLs
uses native methods for image loading and decoding without external dependencies
- `node-canvas`: Process image from file or URL and draw results to a new image file using `node-canvas`
uses `node-canvas` library to load and decode images from files, draw detection results and write output to a new image file
- `node-video`: Processing of video input using `ffmpeg`
uses `ffmpeg` to decode video input (can be a file, stream or device such as webcam) and
output results in a pipe that are captured by demo app as frames and processed by `Human` library
- `node-webcam`: Processing of webcam screenshots using `fswebcam`
uses `fswebcam` to connect to web cam and take screenshots at regular interval which are then processed by `Human` library
- `node-event`: Showcases usage of `Human` eventing to get notifications on processing
- `node-similarity`: Compares two input images for similarity of detected faces
- `process-folder`: Processing all images in input folder and creates output images
interally used to generate samples gallery
<br>
## Main Demo
`nodejs/node.js`: Demo using NodeJS with CommonJS module
Simple demo that can process any input image
Note that you can run demo as-is and it will perform detection on provided sample images,
or you can pass a path to image to analyze, either on local filesystem or using URL
```shell
node demo/nodejs/node.js
```
<!-- eslint-skip -->
```js
2021-06-01 08:52:15 INFO: @vladmandic/human version 2.0.0
2021-06-01 08:52:15 INFO: User: vlado Platform: linux Arch: x64 Node: v16.0.0
2021-06-01 08:52:15 INFO: Current folder: /home/vlado/dev/human
2021-06-01 08:52:15 INFO: Human: 2.0.0
2021-06-01 08:52:15 INFO: Active Configuration {
backend: 'tensorflow',
modelBasePath: 'file://models/',
wasmPath: '../node_modules/@tensorflow/tfjs-backend-wasm/dist/',
debug: true,
async: false,
warmup: 'full',
cacheSensitivity: 0.75,
filter: {
enabled: true,
width: 0,
height: 0,
flip: true,
return: true,
brightness: 0,
contrast: 0,
sharpness: 0,
blur: 0,
saturation: 0,
hue: 0,
negative: false,
sepia: false,
vintage: false,
kodachrome: false,
technicolor: false,
polaroid: false,
pixelate: 0
},
gesture: { enabled: true },
face: {
enabled: true,
detector: { modelPath: 'blazeface.json', rotation: false, maxDetected: 10, skipFrames: 15, minConfidence: 0.2, iouThreshold: 0.1, return: false, enabled: true },
mesh: { enabled: true, modelPath: 'facemesh.json' },
iris: { enabled: true, modelPath: 'iris.json' },
description: { enabled: true, modelPath: 'faceres.json', skipFrames: 16, minConfidence: 0.1 },
emotion: { enabled: true, minConfidence: 0.1, skipFrames: 17, modelPath: 'emotion.json' }
},
body: { enabled: true, modelPath: 'movenet-lightning.json', maxDetected: 1, minConfidence: 0.2 },
hand: {
enabled: true,
rotation: true,
skipFrames: 18,
minConfidence: 0.1,
iouThreshold: 0.1,
maxDetected: 2,
landmarks: true,
detector: { modelPath: 'handdetect.json' },
skeleton: { modelPath: 'handskeleton.json' }
},
object: { enabled: true, modelPath: 'centernet.json', minConfidence: 0.2, iouThreshold: 0.4, maxDetected: 10, skipFrames: 19 }
}
08:52:15.673 Human: version: 2.0.0
08:52:15.674 Human: tfjs version: 3.6.0
08:52:15.674 Human: platform: linux x64
08:52:15.674 Human: agent: NodeJS v16.0.0
08:52:15.674 Human: setting backend: tensorflow
08:52:15.710 Human: load model: file://models/blazeface.json
08:52:15.743 Human: load model: file://models/facemesh.json
08:52:15.744 Human: load model: file://models/iris.json
08:52:15.760 Human: load model: file://models/emotion.json
08:52:15.847 Human: load model: file://models/handdetect.json
08:52:15.847 Human: load model: file://models/handskeleton.json
08:52:15.914 Human: load model: file://models/movenet-lightning.json
08:52:15.957 Human: load model: file://models/centernet.json
08:52:16.015 Human: load model: file://models/faceres.json
08:52:16.015 Human: tf engine state: 50796152 bytes 1318 tensors
2021-06-01 08:52:16 INFO: Loaded: [ 'face', 'movenet', 'handpose', 'emotion', 'centernet', 'faceres', [length]: 6 ]
2021-06-01 08:52:16 INFO: Memory state: { unreliable: true, numTensors: 1318, numDataBuffers: 1318, numBytes: 50796152 }
2021-06-01 08:52:16 INFO: Loading image: private/daz3d/daz3d-kiaria-02.jpg
2021-06-01 08:52:16 STATE: Processing: [ 1, 1300, 1000, 3, [length]: 4 ]
2021-06-01 08:52:17 DATA: Results:
2021-06-01 08:52:17 DATA: Face: #0 boxScore:0.88 faceScore:1 age:16.3 genderScore:0.97 gender:female emotionScore:0.85 emotion:happy iris:61.05
2021-06-01 08:52:17 DATA: Body: #0 score:0.82 keypoints:17
2021-06-01 08:52:17 DATA: Hand: #0 score:0.89
2021-06-01 08:52:17 DATA: Hand: #1 score:0.97
2021-06-01 08:52:17 DATA: Gesture: face#0 gesture:facing left
2021-06-01 08:52:17 DATA: Gesture: body#0 gesture:leaning right
2021-06-01 08:52:17 DATA: Gesture: hand#0 gesture:pinky forward middlefinger up
2021-06-01 08:52:17 DATA: Gesture: hand#1 gesture:pinky forward middlefinger up
2021-06-01 08:52:17 DATA: Gesture: iris#0 gesture:looking left
2021-06-01 08:52:17 DATA: Object: #0 score:0.55 label:person
2021-06-01 08:52:17 DATA: Object: #1 score:0.23 label:bottle
2021-06-01 08:52:17 DATA: Persons:
2021-06-01 08:52:17 DATA: #0: Face:score:1 age:16.3 gender:female iris:61.05 Body:score:0.82 keypoints:17 LeftHand:no RightHand:yes Gestures:4
```

66
demo/nodejs/node-bench.js Normal file
View File

@ -0,0 +1,66 @@
/**
* Human simple demo for NodeJS
*/
const childProcess = require('child_process'); // eslint-disable-line camelcase
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
const canvas = require('canvas'); // eslint-disable-line node/no-unpublished-require
const config = {
cacheSensitivity: 0.01,
wasmPlatformFetch: true,
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
};
const count = 10;
async function loadImage(input) {
const inputImage = await canvas.loadImage(input);
const inputCanvas = new canvas.Canvas(inputImage.width, inputImage.height);
const inputCtx = inputCanvas.getContext('2d');
inputCtx.drawImage(inputImage, 0, 0);
const imageData = inputCtx.getImageData(0, 0, inputCanvas.width, inputCanvas.height);
process.send({ input, resolution: [inputImage.width, inputImage.height] });
return imageData;
}
async function runHuman(module, backend) {
if (backend === 'wasm') require('@tensorflow/tfjs-backend-wasm'); // eslint-disable-line node/no-unpublished-require, global-require
const Human = require('../../dist/' + module); // eslint-disable-line global-require, import/no-dynamic-require
config.backend = backend;
const human = new Human.Human(config);
human.env.Canvas = canvas.Canvas;
human.env.Image = canvas.Image;
human.env.ImageData = canvas.ImageData;
process.send({ human: human.version, module });
await human.init();
process.send({ desired: human.config.backend, wasm: human.env.wasm, tfjs: human.tf.version.tfjs, tensorflow: human.env.tensorflow });
const imageData = await loadImage('samples/in/ai-body.jpg');
const t0 = human.now();
await human.load();
const t1 = human.now();
await human.warmup();
const t2 = human.now();
for (let i = 0; i < count; i++) await human.detect(imageData);
const t3 = human.now();
process.send({ backend: human.tf.getBackend(), load: Math.round(t1 - t0), warmup: Math.round(t2 - t1), detect: Math.round(t3 - t2), count, memory: human.tf.memory().numBytes });
}
async function executeWorker(args) {
return new Promise((resolve) => {
const worker = childProcess.fork(process.argv[1], args);
worker.on('message', (msg) => log.data(msg));
worker.on('exit', () => resolve(true));
});
}
async function main() {
if (process.argv[2]) {
await runHuman(process.argv[2], process.argv[3]);
} else {
await executeWorker(['human.node.js', 'tensorflow']);
await executeWorker(['human.node-gpu.js', 'tensorflow']);
await executeWorker(['human.node-wasm.js', 'wasm']);
}
}
main();

View File

@ -1,18 +1,22 @@
/** /**
* Human demo for NodeJS using Canvas library * Human demo for NodeJS using Canvas library
*
* Requires [canvas](https://www.npmjs.com/package/canvas) to provide Canvas functionality in NodeJS environment
*/ */
const fs = require('fs'); const fs = require('fs');
const process = require('process'); const process = require('process');
const log = require('@vladmandic/pilogger'); const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
const canvas = require('canvas'); // in nodejs environments tfjs-node is required to be loaded before human
require('@tensorflow/tfjs-node'); // for nodejs, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
const Human = require('../../dist/human.node.js'); // this is 'const Human = require('../dist/human.node-gpu.js').default;' const canvas = require('canvas'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
const config = { // just enable all and leave default settings const config = { // just enable all and leave default settings
debug: false, debug: false,
face: { enabled: true }, // includes mesh, iris, emotion, descriptor face: { enabled: true, detector: { maxDetected: 10 } }, // includes mesh, iris, emotion, descriptor
hand: { enabled: true, maxDetected: 2, minConfidence: 0.5, detector: { modelPath: 'handtrack.json' } }, // use alternative hand model hand: { enabled: true, maxDetected: 20, minConfidence: 0.5, detector: { modelPath: 'handtrack.json' } }, // use alternative hand model
body: { enabled: true }, body: { enabled: true },
object: { enabled: true }, object: { enabled: true },
gestures: { enabled: true }, gestures: { enabled: true },
@ -28,15 +32,16 @@ async function main() {
// init // init
const human = new Human.Human(config); // create instance of human const human = new Human.Human(config); // create instance of human
log.info('Human:', human.version); log.info('Human:', human.version, 'TF:', tf.version_core);
await human.load(); // pre-load models await human.load(); // pre-load models
log.info('Loaded models:', Object.keys(human.models).filter((a) => human.models[a])); log.info('Loaded models:', human.models.loaded());
log.info('Memory state:', human.tf.engine().memory()); log.info('Memory state:', human.tf.engine().memory());
// parse cmdline // parse cmdline
const input = process.argv[2]; const input = process.argv[2];
const output = process.argv[3]; let output = process.argv[3];
if (!output.toLowerCase().endsWith('.jpg')) output += '.jpg';
if (process.argv.length !== 4) log.error('Parameters: <input-image> <output-image> missing'); if (process.argv.length !== 4) log.error('Parameters: <input-image> <output-image> missing');
else if (!fs.existsSync(input) && !input.startsWith('http')) log.error(`File not found: ${process.argv[2]}`); else if (!fs.existsSync(input) && !input.startsWith('http')) log.error(`File not found: ${process.argv[2]}`);
else { else {
@ -44,15 +49,12 @@ async function main() {
const inputImage = await canvas.loadImage(input); // load image using canvas library const inputImage = await canvas.loadImage(input); // load image using canvas library
log.info('Loaded image', input, inputImage.width, inputImage.height); log.info('Loaded image', input, inputImage.width, inputImage.height);
const inputCanvas = new canvas.Canvas(inputImage.width, inputImage.height); // create canvas const inputCanvas = new canvas.Canvas(inputImage.width, inputImage.height); // create canvas
const ctx = inputCanvas.getContext('2d'); const inputCtx = inputCanvas.getContext('2d');
ctx.drawImage(inputImage, 0, 0); // draw input image onto canvas inputCtx.drawImage(inputImage, 0, 0); // draw input image onto canvas
const imageData = inputCtx.getImageData(0, 0, inputCanvas.width, inputCanvas.height);
// run detection // run detection
const result = await human.detect(inputCanvas); const result = await human.detect(imageData);
// run segmentation
// const seg = await human.segmentation(inputCanvas);
// log.data('Segmentation:', { data: seg.data.length, alpha: typeof seg.alpha, canvas: typeof seg.canvas });
// print results summary // print results summary
const persons = result.persons; // invoke persons getter, only used to print summary on console const persons = result.persons; // invoke persons getter, only used to print summary on console
@ -60,16 +62,19 @@ async function main() {
const face = persons[i].face; const face = persons[i].face;
const faceTxt = face ? `score:${face.score} age:${face.age} gender:${face.gender} iris:${face.iris}` : null; const faceTxt = face ? `score:${face.score} age:${face.age} gender:${face.gender} iris:${face.iris}` : null;
const body = persons[i].body; const body = persons[i].body;
const bodyTxt = body ? `score:${body.score} keypoints:${body.keypoints?.length}` : null; const bodyTxt = body ? `score:${body.score} keypoints:${body.keypoints.length}` : null;
log.data(`Detected: #${i}: Face:${faceTxt} Body:${bodyTxt} LeftHand:${persons[i].hands.left ? 'yes' : 'no'} RightHand:${persons[i].hands.right ? 'yes' : 'no'} Gestures:${persons[i].gestures.length}`); log.data(`Detected: #${i}: Face:${faceTxt} Body:${bodyTxt} LeftHand:${persons[i].hands.left ? 'yes' : 'no'} RightHand:${persons[i].hands.right ? 'yes' : 'no'} Gestures:${persons[i].gestures.length}`);
} }
// draw detected results onto canvas and save it to a file // draw detected results onto canvas and save it to a file
human.draw.all(inputCanvas, result); // use human build-in method to draw results as overlays on canvas const outputCanvas = new canvas.Canvas(inputImage.width, inputImage.height); // create canvas
const outputCtx = outputCanvas.getContext('2d');
outputCtx.drawImage(result.canvas || inputImage, 0, 0); // draw input image onto canvas
human.draw.all(outputCanvas, result); // use human build-in method to draw results as overlays on canvas
const outFile = fs.createWriteStream(output); // write canvas to new image file const outFile = fs.createWriteStream(output); // write canvas to new image file
outFile.on('finish', () => log.state('Output image:', output, inputCanvas.width, inputCanvas.height)); outFile.on('finish', () => log.state('Output image:', output, outputCanvas.width, outputCanvas.height));
outFile.on('error', (err) => log.error('Output error:', output, err)); outFile.on('error', (err) => log.error('Output error:', output, err));
const stream = inputCanvas.createJPEGStream({ quality: 0.5, progressive: true, chromaSubsampling: true }); const stream = outputCanvas.createJPEGStream({ quality: 0.5, progressive: true, chromaSubsampling: true });
stream.pipe(outFile); stream.pipe(outFile);
} }
} }

View File

@ -1,21 +0,0 @@
const log = require('@vladmandic/pilogger');
const Human = require('../../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
const config = {
debug: false,
};
async function main() {
const human = new Human(config);
await human.tf.ready();
log.info('Human:', human.version);
log.data('Environment', human.env);
await human.load();
const models = Object.keys(human.models).map((model) => ({ name: model, loaded: (human.models[model] !== null) }));
log.data('Models:', models);
log.info('Memory state:', human.tf.engine().memory());
// log.data('Config', human.config);
log.info('TFJS flags:', human.tf.ENV.flags);
}
main();

View File

@ -2,23 +2,18 @@
* Human demo for NodeJS * Human demo for NodeJS
*/ */
const log = require('@vladmandic/pilogger');
const fs = require('fs'); const fs = require('fs');
const process = require('process'); const process = require('process');
let fetch; // fetch is dynamically imported later const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// in nodejs environments tfjs-node is required to be loaded before human
// for NodeJS, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars // const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const tf = require('@tensorflow/tfjs-node'); // or const tf = require('@tensorflow/tfjs-node-gpu'); const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
// load specific version of Human library that matches TensorFlow mode
const Human = require('../../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
let human = null; let human = null;
const myConfig = { const myConfig = {
backend: 'tensorflow',
modelBasePath: 'file://models/', modelBasePath: 'file://models/',
debug: false, debug: false,
async: true, async: true,
@ -41,29 +36,17 @@ async function detect(input) {
let buffer; let buffer;
log.info('Loading image:', input); log.info('Loading image:', input);
if (input.startsWith('http:') || input.startsWith('https:')) { if (input.startsWith('http:') || input.startsWith('https:')) {
fetch = (await import('node-fetch')).default;
const res = await fetch(input); const res = await fetch(input);
if (res && res.ok) buffer = await res.buffer(); if (res && res.ok) buffer = Buffer.from(await res.arrayBuffer());
else log.error('Invalid image URL:', input, res.status, res.statusText, res.headers.get('content-type')); else log.error('Invalid image URL:', input, res.status, res.statusText, res.headers.get('content-type'));
} else { } else {
buffer = fs.readFileSync(input); buffer = fs.readFileSync(input);
} }
log.data('Image bytes:', buffer?.length, 'buffer:', buffer?.slice(0, 32));
// decode image using tfjs-node so we don't need external depenencies // decode image using tfjs-node so we don't need external depenencies
if (!buffer) return; if (!buffer) return;
const tensor = human.tf.tidy(() => { const tensor = human.tf.node.decodeImage(buffer, 3);
const decode = human.tf.node.decodeImage(buffer, 3);
let expand;
if (decode.shape[2] === 4) { // input is in rgba format, need to convert to rgb
const channels = human.tf.split(decode, 4, 2); // split rgba to channels
const rgb = human.tf.stack([channels[0], channels[1], channels[2]], 2); // stack channels back to rgb and ignore alpha
expand = human.tf.reshape(rgb, [1, decode.shape[0], decode.shape[1], 3]); // move extra dim from the end of tensor and use it as batch number instead
} else {
expand = human.tf.expandDims(decode, 0);
}
const cast = human.tf.cast(expand, 'float32');
return cast;
});
// run detection // run detection
await human.detect(tensor, myConfig); await human.detect(tensor, myConfig);
@ -73,15 +56,16 @@ async function detect(input) {
async function main() { async function main() {
log.header(); log.header();
human = new Human(myConfig); human = new Human.Human(myConfig);
log.info('Human:', human.version, 'TF:', tf.version_core);
if (human.events) {
human.events.addEventListener('warmup', () => { human.events.addEventListener('warmup', () => {
log.info('Event Warmup'); log.info('Event Warmup');
}); });
human.events.addEventListener('load', () => { human.events.addEventListener('load', () => {
const loaded = Object.keys(human.models).filter((a) => human.models[a]); log.info('Event Loaded:', human.models.loaded(), human.tf.engine().memory());
log.info('Event Loaded:', loaded, human.tf.engine().memory());
}); });
human.events.addEventListener('image', () => { human.events.addEventListener('image', () => {
@ -93,12 +77,13 @@ async function main() {
const persons = human.result.persons; const persons = human.result.persons;
for (let i = 0; i < persons.length; i++) { for (let i = 0; i < persons.length; i++) {
const face = persons[i].face; const face = persons[i].face;
const faceTxt = face ? `score:${face.score} age:${face.age} gender:${face.gender} iris:${face.iris}` : null; const faceTxt = face ? `score:${face.score} age:${face.age} gender:${face.gender} iris:${face.distance}` : null;
const body = persons[i].body; const body = persons[i].body;
const bodyTxt = body ? `score:${body.score} keypoints:${body.keypoints?.length}` : null; const bodyTxt = body ? `score:${body.score} keypoints:${body.keypoints?.length}` : null;
log.data(` #${i}: Face:${faceTxt} Body:${bodyTxt} LeftHand:${persons[i].hands.left ? 'yes' : 'no'} RightHand:${persons[i].hands.right ? 'yes' : 'no'} Gestures:${persons[i].gestures.length}`); log.data(` #${i}: Face:${faceTxt} Body:${bodyTxt} LeftHand:${persons[i].hands.left ? 'yes' : 'no'} RightHand:${persons[i].hands.right ? 'yes' : 'no'} Gestures:${persons[i].gestures.length}`);
} }
}); });
}
await human.tf.ready(); // wait until tf is ready await human.tf.ready(); // wait until tf is ready

30
demo/nodejs/node-fetch.js Normal file
View File

@ -0,0 +1,30 @@
/**
* Human demo for NodeJS using http fetch to get image file
*
* Requires [node-fetch](https://www.npmjs.com/package/node-fetch) to provide `fetch` functionality in NodeJS environment
*/
const fs = require('fs');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// in nodejs environments tfjs-node is required to be loaded before human
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
const humanConfig = {
modelBasePath: 'https://vladmandic.github.io/human/models/',
};
async function main(inputFile) {
global.fetch = (await import('node-fetch')).default; // eslint-disable-line node/no-unpublished-import, import/no-unresolved, node/no-missing-import, node/no-extraneous-import
const human = new Human.Human(humanConfig); // create instance of human using default configuration
log.info('Human:', human.version, 'TF:', tf.version_core);
await human.load(); // optional as models would be loaded on-demand first time they are required
await human.warmup(); // optional as model warmup is performed on-demand first time its executed
const buffer = fs.readFileSync(inputFile); // read file data into buffer
const tensor = human.tf.node.decodeImage(buffer); // decode jpg data
const result = await human.detect(tensor); // run detection; will initialize backend and on-demand load models
log.data(result.gesture);
}
main('samples/in/ai-body.jpg');

View File

@ -0,0 +1,64 @@
/**
* Human Person Similarity test for NodeJS
*/
const fs = require('fs');
const process = require('process');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// in nodejs environments tfjs-node is required to be loaded before human
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
let human = null;
const myConfig = {
modelBasePath: 'file://models/',
debug: true,
face: { emotion: { enabled: false } },
body: { enabled: false },
hand: { enabled: false },
gesture: { enabled: false },
};
async function init() {
human = new Human.Human(myConfig);
await human.tf.ready();
log.info('Human:', human.version, 'TF:', tf.version_core);
await human.load();
log.info('Loaded:', human.models.loaded());
log.info('Memory state:', human.tf.engine().memory());
}
async function detect(input) {
if (!fs.existsSync(input)) {
throw new Error('Cannot load image:', input);
}
const buffer = fs.readFileSync(input);
const tensor = human.tf.node.decodeImage(buffer, 3);
log.state('Loaded image:', input, tensor.shape);
const result = await human.detect(tensor, myConfig);
human.tf.dispose(tensor);
log.state('Detected faces:', result.face.length);
return result;
}
async function main() {
log.configure({ inspect: { breakLength: 265 } });
log.header();
if (process.argv.length !== 4) {
log.error('Parameters: <first image> <second image> missing');
return;
}
await init();
const res1 = await detect(process.argv[2]);
const res2 = await detect(process.argv[3]);
if (!res1 || !res1.face || res1.face.length === 0 || !res2 || !res2.face || res2.face.length === 0) {
throw new Error('Could not detect face descriptors');
}
const similarity = human.match.similarity(res1.face[0].embedding, res2.face[0].embedding, { order: 2 });
log.data('Similarity: ', similarity);
}
main();

View File

@ -0,0 +1,32 @@
/**
* Human simple demo for NodeJS
*/
const fs = require('fs');
const process = require('process');
// in nodejs environments tfjs-node is required to be loaded before human
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
const humanConfig = {
// add any custom config here
debug: true,
body: { enabled: false },
};
async function detect(inputFile) {
const human = new Human.Human(humanConfig); // create instance of human using default configuration
console.log('Human:', human.version, 'TF:', tf.version_core); // eslint-disable-line no-console
await human.load(); // optional as models would be loaded on-demand first time they are required
await human.warmup(); // optional as model warmup is performed on-demand first time its executed
const buffer = fs.readFileSync(inputFile); // read file data into buffer
const tensor = human.tf.node.decodeImage(buffer); // decode jpg data
console.log('loaded input file:', inputFile, 'resolution:', tensor.shape); // eslint-disable-line no-console
const result = await human.detect(tensor); // run detection; will initialize backend and on-demand load models
console.log(result); // eslint-disable-line no-console
}
if (process.argv.length === 3) detect(process.argv[2]); // if input file is provided as cmdline parameter use it
else detect('samples/in/ai-body.jpg'); // else use built-in test inputfile

View File

@ -7,27 +7,26 @@
* If you want process at specific intervals, set output fps to some value * If you want process at specific intervals, set output fps to some value
* If you want to process an input stream, set real-time flag and set input as required * If you want to process an input stream, set real-time flag and set input as required
* *
* Note that pipe2jpeg is not part of Human dependencies and should be installed manually * Note that [pipe2jpeg](https://www.npmjs.com/package/pipe2jpeg) is not part of Human dependencies and should be installed manually
* Working version of ffmpeg must be present on the system * Working version of `ffmpeg` must be present on the system
*/ */
const process = require('process');
const spawn = require('child_process').spawn; const spawn = require('child_process').spawn;
const log = require('@vladmandic/pilogger'); const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// @ts-ignore pipe2jpeg is not installed by default // in nodejs environments tfjs-node is required to be loaded before human
// eslint-disable-next-line node/no-missing-require // const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
const Pipe2Jpeg = require('pipe2jpeg'); // const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
// for NodeJS, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human const Pipe2Jpeg = require('pipe2jpeg'); // eslint-disable-line node/no-missing-require, import/no-unresolved
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars // const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const tf = require('@tensorflow/tfjs-node'); // or const tf = require('@tensorflow/tfjs-node-gpu'); const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
// load specific version of Human library that matches TensorFlow mode
const Human = require('../../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
let count = 0; // counter let count = 0; // counter
let busy = false; // busy flag let busy = false; // busy flag
const inputFile = './test.mp4'; let inputFile = './test.mp4';
if (process.argv.length === 3) inputFile = process.argv[2];
const humanConfig = { const humanConfig = {
backend: 'tensorflow',
modelBasePath: 'file://models/', modelBasePath: 'file://models/',
debug: false, debug: false,
async: true, async: true,
@ -45,7 +44,7 @@ const humanConfig = {
object: { enabled: false }, object: { enabled: false },
}; };
const human = new Human(humanConfig); const human = new Human.Human(humanConfig);
const pipe2jpeg = new Pipe2Jpeg(); const pipe2jpeg = new Pipe2Jpeg();
const ffmpegParams = [ const ffmpegParams = [
@ -62,18 +61,16 @@ const ffmpegParams = [
'pipe:1', // output to unix pipe that is then captured by pipe2jpeg 'pipe:1', // output to unix pipe that is then captured by pipe2jpeg
]; ];
async function process(jpegBuffer) { async function detect(jpegBuffer) {
if (busy) return; // skip processing if busy if (busy) return; // skip processing if busy
busy = true; busy = true;
const decoded = tf.node.decodeJpeg(jpegBuffer, 3); // decode jpeg buffer to raw tensor const tensor = human.tf.node.decodeJpeg(jpegBuffer, 3); // decode jpeg buffer to raw tensor
const tensor = tf.expandDims(decoded, 0); // almost all tf models use first dimension as batch number so we add it
tf.dispose(decoded);
log.state('input frame:', ++count, 'size:', jpegBuffer.length, 'decoded shape:', tensor.shape);
const res = await human.detect(tensor); const res = await human.detect(tensor);
log.data('gesture', JSON.stringify(res.gesture)); human.tf.dispose(tensor); // must dispose tensor
// do processing here // start custom processing here
tf.dispose(tensor); // must dispose tensor log.data('frame', { frame: ++count, size: jpegBuffer.length, shape: tensor.shape, face: res?.face?.length, body: res?.body?.length, hand: res?.hand?.length, gesture: res?.gesture?.length });
if (res?.face?.[0]) log.data('person', { score: [res.face[0].boxScore, res.face[0].faceScore], age: res.face[0].age || 0, gender: [res.face[0].genderScore || 0, res.face[0].gender], emotion: res.face[0].emotion?.[0] });
// at the of processing mark loop as not busy so it can process next frame
busy = false; busy = false;
} }
@ -81,8 +78,9 @@ async function main() {
log.header(); log.header();
await human.tf.ready(); await human.tf.ready();
// pre-load models // pre-load models
log.info('human:', human.version); log.info({ human: human.version, tf: human.tf.version_core });
pipe2jpeg.on('jpeg', (jpegBuffer) => process(jpegBuffer)); log.info({ input: inputFile });
pipe2jpeg.on('data', (jpegBuffer) => detect(jpegBuffer));
const ffmpeg = spawn('ffmpeg', ffmpegParams, { stdio: ['ignore', 'pipe', 'ignore'] }); const ffmpeg = spawn('ffmpeg', ffmpegParams, { stdio: ['ignore', 'pipe', 'ignore'] });
ffmpeg.on('error', (error) => log.error('ffmpeg error:', error)); ffmpeg.on('error', (error) => log.error('ffmpeg error:', error));

View File

@ -2,20 +2,18 @@
* Human demo for NodeJS * Human demo for NodeJS
* Unsupported sample of using external utility fswebcam to capture screenshot from attached webcam in regular intervals and process it using Human * Unsupported sample of using external utility fswebcam to capture screenshot from attached webcam in regular intervals and process it using Human
* *
* Note that node-webcam is not part of Human dependencies and should be installed manually * Note that [node-webcam](https://www.npmjs.com/package/node-webcam) is not part of Human dependencies and should be installed manually
* Working version of fswebcam must be present on the system * Working version of `fswebcam` must be present on the system
*/ */
let initial = true; // remember if this is the first run to print additional details let initial = true; // remember if this is the first run to print additional details
const log = require('@vladmandic/pilogger'); const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// @ts-ignore node-webcam is not installed by default const nodeWebCam = require('node-webcam'); // eslint-disable-line import/no-unresolved, node/no-missing-require
// eslint-disable-next-line node/no-missing-require
const nodeWebCam = require('node-webcam'); // in nodejs environments tfjs-node is required to be loaded before human
// for NodeJS, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars // const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const tf = require('@tensorflow/tfjs-node'); // or const tf = require('@tensorflow/tfjs-node-gpu'); const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
// load specific version of Human library that matches TensorFlow mode
const Human = require('../../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
// options for node-webcam // options for node-webcam
const tempFile = 'webcam-snap'; // node-webcam requires writting snapshot to a file, recommended to use tmpfs to avoid excessive disk writes const tempFile = 'webcam-snap'; // node-webcam requires writting snapshot to a file, recommended to use tmpfs to avoid excessive disk writes
@ -27,10 +25,10 @@ const camera = nodeWebCam.create(optionsCamera);
// options for human // options for human
const optionsHuman = { const optionsHuman = {
backend: 'tensorflow',
modelBasePath: 'file://models/', modelBasePath: 'file://models/',
}; };
const human = new Human(optionsHuman);
const human = new Human.Human(optionsHuman);
function buffer2tensor(buffer) { function buffer2tensor(buffer) {
return human.tf.tidy(() => { return human.tf.tidy(() => {
@ -62,18 +60,20 @@ async function detect() {
} else { } else {
const tensor = buffer2tensor(data); // create tensor from image buffer const tensor = buffer2tensor(data); // create tensor from image buffer
if (initial) log.data('input tensor:', tensor.shape); if (initial) log.data('input tensor:', tensor.shape);
// eslint-disable-next-line promise/no-promise-in-callback human.detect(tensor) // eslint-disable-line promise/no-promise-in-callback
human.detect(tensor).then((result) => { .then((result) => {
if (result && result.face && result.face.length > 0) { if (result && result.face && result.face.length > 0) {
for (let i = 0; i < result.face.length; i++) { for (let i = 0; i < result.face.length; i++) {
const face = result.face[i]; const face = result.face[i];
const emotion = face.emotion.reduce((prev, curr) => (prev.score > curr.score ? prev : curr)); const emotion = face.emotion?.reduce((prev, curr) => (prev.score > curr.score ? prev : curr));
log.data(`detected face: #${i} boxScore:${face.boxScore} faceScore:${face.faceScore} age:${face.age} genderScore:${face.genderScore} gender:${face.gender} emotionScore:${emotion.score} emotion:${emotion.emotion} iris:${face.iris}`); log.data(`detected face: #${i} boxScore:${face.boxScore} faceScore:${face.faceScore} age:${face.age} genderScore:${face.genderScore} gender:${face.gender} emotionScore:${emotion?.score} emotion:${emotion?.emotion} iris:${face.iris}`);
} }
} else { } else {
log.data(' Face: N/A'); log.data(' Face: N/A');
} }
}); return result;
})
.catch(() => log.error('human detect error'));
} }
initial = false; initial = false;
}); });
@ -82,6 +82,7 @@ async function detect() {
} }
async function main() { async function main() {
log.info('human:', human.version, 'tf:', tf.version_core);
camera.list((list) => { camera.list((list) => {
log.data('detected camera:', list); log.data('detected camera:', list);
}); });

View File

@ -1,25 +1,21 @@
/** /**
* Human demo for NodeJS * Human demo for NodeJS
*/ */
const log = require('@vladmandic/pilogger');
const fs = require('fs'); const fs = require('fs');
const path = require('path'); const path = require('path');
const process = require('process'); const process = require('process');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
let fetch; // fetch is dynamically imported later // in nodejs environments tfjs-node is required to be loaded before human
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// for NodeJS, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human // const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
const tf = require('@tensorflow/tfjs-node'); // or const tf = require('@tensorflow/tfjs-node-gpu');
// load specific version of Human library that matches TensorFlow mode
const Human = require('../../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
let human = null; let human = null;
const myConfig = { const myConfig = {
backend: 'tensorflow', // backend: 'tensorflow',
modelBasePath: 'file://models/', modelBasePath: 'file://models/',
debug: true, debug: true,
async: false, async: false,
@ -45,16 +41,17 @@ const myConfig = {
async function init() { async function init() {
// create instance of human // create instance of human
human = new Human(myConfig); human = new Human.Human(myConfig);
// wait until tf is ready // wait until tf is ready
await human.tf.ready(); await human.tf.ready();
log.info('human:', human.version, 'tf:', tf.version_core);
// pre-load models // pre-load models
log.info('Human:', human.version); log.info('Human:', human.version);
// log.info('Active Configuration', human.config); // log.info('Active Configuration', human.config);
await human.load(); await human.load();
const loaded = Object.keys(human.models).filter((a) => human.models[a]); log.info('Loaded:', human.models.loaded());
log.info('Loaded:', loaded); // log.info('Memory state:', human.tf.engine().memory());
log.info('Memory state:', human.tf.engine().memory()); log.data(tf.backend().binding ? tf.backend().binding.TF_Version : null);
} }
async function detect(input) { async function detect(input) {
@ -63,11 +60,12 @@ async function detect(input) {
log.info('Loading image:', input); log.info('Loading image:', input);
if (input.startsWith('http:') || input.startsWith('https:')) { if (input.startsWith('http:') || input.startsWith('https:')) {
const res = await fetch(input); const res = await fetch(input);
if (res && res.ok) buffer = await res.buffer(); if (res && res.ok) buffer = Buffer.from(await res.arrayBuffer());
else log.error('Invalid image URL:', input, res.status, res.statusText, res.headers.get('content-type')); else log.error('Invalid image URL:', input, res.status, res.statusText, res.headers.get('content-type'));
} else { } else {
buffer = fs.readFileSync(input); buffer = fs.readFileSync(input);
} }
log.data('Image bytes:', buffer?.length, 'buffer:', buffer?.slice(0, 32));
// decode image using tfjs-node so we don't need external depenencies // decode image using tfjs-node so we don't need external depenencies
// can also be done using canvas.js or some other 3rd party image library // can also be done using canvas.js or some other 3rd party image library
@ -87,14 +85,14 @@ async function detect(input) {
}); });
// image shape contains image dimensions and depth // image shape contains image dimensions and depth
log.state('Processing:', tensor['shape']); log.state('Processing:', tensor.shape);
// run actual detection // run actual detection
let result; let result;
try { try {
result = await human.detect(tensor, myConfig); result = await human.detect(tensor, myConfig);
} catch (err) { } catch (err) {
log.error('caught'); log.error('caught', err);
} }
// dispose image tensor as we no longer need it // dispose image tensor as we no longer need it
@ -106,7 +104,7 @@ async function detect(input) {
for (let i = 0; i < result.face.length; i++) { for (let i = 0; i < result.face.length; i++) {
const face = result.face[i]; const face = result.face[i];
const emotion = face.emotion.reduce((prev, curr) => (prev.score > curr.score ? prev : curr)); const emotion = face.emotion.reduce((prev, curr) => (prev.score > curr.score ? prev : curr));
log.data(` Face: #${i} boxScore:${face.boxScore} faceScore:${face.faceScore} age:${face.age} genderScore:${face.genderScore} gender:${face.gender} emotionScore:${emotion.score} emotion:${emotion.emotion} iris:${face.iris}`); log.data(` Face: #${i} boxScore:${face.boxScore} faceScore:${face.faceScore} age:${face.age} genderScore:${face.genderScore} gender:${face.gender} emotionScore:${emotion.score} emotion:${emotion.emotion} distance:${face.distance}`);
} }
} else { } else {
log.data(' Face: N/A'); log.data(' Face: N/A');
@ -190,7 +188,6 @@ async function main() {
log.configure({ inspect: { breakLength: 265 } }); log.configure({ inspect: { breakLength: 265 } });
log.header(); log.header();
log.info('Current folder:', process.env.PWD); log.info('Current folder:', process.env.PWD);
fetch = (await import('node-fetch')).default;
await init(); await init();
const f = process.argv[2]; const f = process.argv[2];
if (process.argv.length !== 3) { if (process.argv.length !== 3) {
@ -198,8 +195,7 @@ async function main() {
await test(); await test();
} else if (!fs.existsSync(f) && !f.startsWith('http')) { } else if (!fs.existsSync(f) && !f.startsWith('http')) {
log.error(`File not found: ${process.argv[2]}`); log.error(`File not found: ${process.argv[2]}`);
} else { } else if (fs.existsSync(f)) {
if (fs.existsSync(f)) {
const stat = fs.statSync(f); const stat = fs.statSync(f);
if (stat.isDirectory()) { if (stat.isDirectory()) {
const dir = fs.readdirSync(f); const dir = fs.readdirSync(f);
@ -212,7 +208,6 @@ async function main() {
} else { } else {
await detect(f); await detect(f);
} }
}
} }
main(); main();

View File

@ -1,76 +1,119 @@
/**
* Human demo for NodeJS
*
* Takes input and output folder names parameters and processes all images
* found in input folder and creates annotated images in output folder
*
* Requires [canvas](https://www.npmjs.com/package/canvas) to provide Canvas functionality in NodeJS environment
*/
const fs = require('fs'); const fs = require('fs');
const path = require('path'); const path = require('path');
const process = require('process'); const process = require('process');
const log = require('@vladmandic/pilogger'); const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
const canvas = require('canvas'); const canvas = require('canvas'); // eslint-disable-line node/no-unpublished-require
const tf = require('@tensorflow/tfjs-node'); // for nodejs, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human // for nodejs, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human
const Human = require('../../dist/human.node.js'); // this is 'const Human = require('../dist/human.node-gpu.js').default;' const tf = require('@tensorflow/tfjs-node-gpu'); // eslint-disable-line node/no-unpublished-require
const Human = require('../../dist/human.node-gpu.js'); // this is 'const Human = require('../dist/human.node-gpu.js').default;'
const config = { // just enable all and leave default settings const config = { // just enable all and leave default settings
modelBasePath: 'file://models',
debug: true, debug: true,
async: false, softwareKernels: true, // slower but enhanced precision since face rotation can work in software mode in nodejs environments
cacheSensitivity: 0, cacheSensitivity: 0.01,
face: { enabled: true, detector: { maxDetected: 20 } }, face: { enabled: true, detector: { maxDetected: 100, minConfidence: 0.1 } },
object: { enabled: true }, object: { enabled: true, maxDetected: 100, minConfidence: 0.1 },
gesture: { enabled: true }, gesture: { enabled: true },
hand: { enabled: true }, hand: { enabled: true, maxDetected: 100, minConfidence: 0.2 },
body: { enabled: true, modelPath: 'https://vladmandic.github.io/human-models/models/movenet-multipose.json' }, body: { enabled: true, maxDetected: 100, minConfidence: 0.1, modelPath: 'https://vladmandic.github.io/human-models/models/movenet-multipose.json' },
}; };
const poolSize = 4;
const human = new Human.Human(config); // create instance of human
async function saveFile(shape, buffer, result, outFile) {
return new Promise(async (resolve, reject) => { // eslint-disable-line no-async-promise-executor
const outputCanvas = new canvas.Canvas(shape[2], shape[1]); // create canvas
const outputCtx = outputCanvas.getContext('2d');
const inputImage = await canvas.loadImage(buffer); // load image using canvas library
outputCtx.drawImage(inputImage, 0, 0); // draw input image onto canvas
human.draw.all(outputCanvas, result); // use human build-in method to draw results as overlays on canvas
const outStream = fs.createWriteStream(outFile); // write canvas to new image file
outStream.on('finish', () => {
log.data('Output image:', outFile, outputCanvas.width, outputCanvas.height);
resolve();
});
outStream.on('error', (err) => {
log.error('Output error:', outFile, err);
reject();
});
const stream = outputCanvas.createJPEGStream({ quality: 0.5, progressive: true, chromaSubsampling: true });
stream.pipe(outStream);
});
}
async function processFile(image, inFile, outFile) {
const buffer = fs.readFileSync(inFile);
const tensor = tf.tidy(() => {
const decode = tf.node.decodeImage(buffer, 3);
const expand = tf.expandDims(decode, 0);
const cast = tf.cast(expand, 'float32');
return cast;
});
log.state('Loaded image:', inFile, tensor.shape);
const result = await human.detect(tensor);
human.tf.dispose(tensor);
log.data(`Detected: ${image}:`, 'Face:', result.face.length, 'Body:', result.body.length, 'Hand:', result.hand.length, 'Objects:', result.object.length, 'Gestures:', result.gesture.length);
if (outFile) await saveFile(tensor.shape, buffer, result, outFile);
}
async function main() { async function main() {
log.header(); log.header();
globalThis.Canvas = canvas.Canvas; // patch global namespace with canvas library globalThis.Canvas = canvas.Canvas; // patch global namespace with canvas library
globalThis.ImageData = canvas.ImageData; // patch global namespace with canvas library globalThis.ImageData = canvas.ImageData; // patch global namespace with canvas library
const human = new Human.Human(config); // create instance of human log.info('Human:', human.version, 'TF:', tf.version_core);
log.info('Human:', human.version);
const configErrors = await human.validate(); const configErrors = await human.validate();
if (configErrors.length > 0) log.error('Configuration errors:', configErrors); if (configErrors.length > 0) log.error('Configuration errors:', configErrors);
await human.load(); // pre-load models await human.load(); // pre-load models
log.info('Loaded models:', Object.keys(human.models).filter((a) => human.models[a])); log.info('Loaded models:', human.models.loaded());
const inDir = process.argv[2]; const inDir = process.argv[2];
const outDir = process.argv[3]; const outDir = process.argv[3];
if (process.argv.length !== 4) { if (!inDir) {
log.error('Parameters: <input-directory> <output-directory> missing'); log.error('Parameters: <input-directory> missing');
return; return;
} }
if (!fs.existsSync(inDir) || !fs.statSync(inDir).isDirectory() || !fs.existsSync(outDir) || !fs.statSync(outDir).isDirectory()) { if (inDir && (!fs.existsSync(inDir) || !fs.statSync(inDir).isDirectory())) {
log.error('Invalid directory specified:', 'input:', fs.existsSync(inDir) ?? fs.statSync(inDir).isDirectory(), 'output:', fs.existsSync(outDir) ?? fs.statSync(outDir).isDirectory()); log.error('Invalid input directory:', fs.existsSync(inDir) ?? fs.statSync(inDir).isDirectory());
return;
}
if (!outDir) {
log.info('Parameters: <output-directory> missing, images will not be saved');
}
if (outDir && (!fs.existsSync(outDir) || !fs.statSync(outDir).isDirectory())) {
log.error('Invalid output directory:', fs.existsSync(outDir) ?? fs.statSync(outDir).isDirectory());
return; return;
} }
const dir = fs.readdirSync(inDir); const dir = fs.readdirSync(inDir);
const images = dir.filter((f) => fs.statSync(path.join(inDir, f)).isFile() && (f.toLocaleLowerCase().endsWith('.jpg') || f.toLocaleLowerCase().endsWith('.jpeg'))); const images = dir.filter((f) => fs.statSync(path.join(inDir, f)).isFile() && (f.toLocaleLowerCase().endsWith('.jpg') || f.toLocaleLowerCase().endsWith('.jpeg')));
log.info(`Processing folder: ${inDir} entries:`, dir.length, 'images', images.length); log.info(`Processing folder: ${inDir} entries:`, dir.length, 'images', images.length);
for (const image of images) { const t0 = performance.now();
const inFile = path.join(inDir, image); const promises = [];
const buffer = fs.readFileSync(inFile); for (let i = 0; i < images.length; i++) {
const tensor = human.tf.tidy(() => { const inFile = path.join(inDir, images[i]);
const decode = human.tf.node.decodeImage(buffer, 3); const outFile = outDir ? path.join(outDir, images[i]) : null;
const expand = human.tf.expandDims(decode, 0); promises.push(processFile(images[i], inFile, outFile));
const cast = human.tf.cast(expand, 'float32'); if (i % poolSize === 0) await Promise.all(promises);
return cast;
});
log.state('Loaded image:', inFile, tensor.shape);
const result = await human.detect(tensor);
tf.dispose(tensor);
log.data(`Detected: ${image}:`, 'Face:', result.face.length, 'Body:', result.body.length, 'Hand:', result.hand.length, 'Objects:', result.object.length, 'Gestures:', result.gesture.length);
const outputCanvas = new canvas.Canvas(tensor.shape[2], tensor.shape[1]); // create canvas
const outputCtx = outputCanvas.getContext('2d');
const inputImage = await canvas.loadImage(buffer); // load image using canvas library
outputCtx.drawImage(inputImage, 0, 0); // draw input image onto canvas
human.draw.all(outputCanvas, result); // use human build-in method to draw results as overlays on canvas
const outFile = path.join(outDir, image);
const outStream = fs.createWriteStream(outFile); // write canvas to new image file
outStream.on('finish', () => log.state('Output image:', outFile, outputCanvas.width, outputCanvas.height));
outStream.on('error', (err) => log.error('Output error:', outFile, err));
const stream = outputCanvas.createJPEGStream({ quality: 0.5, progressive: true, chromaSubsampling: true });
stream.pipe(outStream);
} }
await Promise.all(promises);
const t1 = performance.now();
log.info(`Processed ${images.length} images in ${Math.round(t1 - t0)} ms`);
} }
main(); main();

View File

@ -24,13 +24,13 @@
a:hover { color: lightskyblue; text-decoration: none; } a:hover { color: lightskyblue; text-decoration: none; }
.row { width: 90vw; margin: auto; margin-top: 100px; text-align: center; } .row { width: 90vw; margin: auto; margin-top: 100px; text-align: center; }
</style> </style>
</head> </head>
<body> <body>
<div class="row text-center"> <div class="row text-center">
<h1> <h1>
<a href="/">Human: Offline</a><br> <a href="/">Human: Offline</a><br>
<img alt="icon" src="../assets/icon.png"> <img alt="icon" src="../assets/icon.png">
</h1> </h1>
</div> </div>
</body> </body>
</html> </html>

View File

@ -0,0 +1,61 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Human Demo</title>
<meta name="viewport" content="width=device-width, shrink-to-fit=yes">
<meta name="mobile-web-app-capable" content="yes">
<meta name="application-name" content="Human Demo">
<meta name="keywords" content="Human Demo">
<meta name="description" content="Human Demo; Author: Vladimir Mandic <mandic00@live.com>">
<link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../favicon.ico" type="image/x-icon">
<link rel="icon" sizes="256x256" href="../assets/icons/dash-256.png">
<link rel="apple-touch-icon" href="../assets/icons/dash-256.png">
<link rel="apple-touch-startup-image" href="../assets/icons/dash-256.png">
<style>
@font-face { font-family: 'CenturyGothic'; font-display: swap; font-style: normal; font-weight: 400; src: local('CenturyGothic'), url('../assets/century-gothic.ttf') format('truetype'); }
html { font-size: 18px; }
body { font-size: 1rem; font-family: "CenturyGothic", "Segoe UI", sans-serif; font-variant: small-caps; width: -webkit-fill-available; height: 100%; background: black; color: white; overflow: hidden; margin: 0; }
select { font-size: 1rem; font-family: "CenturyGothic", "Segoe UI", sans-serif; font-variant: small-caps; background: gray; color: white; border: none; }
</style>
<script src="../segmentation/index.js" type="module"></script>
</head>
<body>
<noscript><h1>javascript is required</h1></noscript>
<nav>
<div id="nav" class="nav"></div>
</nav>
<header>
<div id="header" class="header" style="position: fixed; top: 0; right: 0; padding: 4px; margin: 16px; background: rgba(0, 0, 0, 0.5); z-index: 10; line-height: 2rem;">
<label for="mode">mode</label>
<select id="mode" name="mode">
<option value="default">remove background</option>
<option value="alpha">draw alpha channel</option>
<option value="foreground">full foreground</option>
<option value="state">recurrent state</option>
</select><br>
<label for="composite">composite</label>
<select id="composite" name="composite"></select><br>
<label for="ratio">downsample ratio</label>
<input type="range" name="ratio" id="ratio" min="0.1" max="1" value="0.5" step="0.05">
<div id="fps" style="margin-top: 8px"></div>
</div>
</header>
<main>
<div id="main" class="main">
<video id="webcam" style="position: fixed; top: 0; left: 0; width: 50vw; height: 50vh"></video>
<img id="background" alt="background" style="position: fixed; top: 0; right: 0; width: 50vw; height: 50vh" controls></img>
<canvas id="output" style="position: fixed; bottom: 0; left: 0; height: 50vh"></canvas>
<canvas id="merge" style="position: fixed; bottom: 0; right: 0; height: 50vh"></canvas>
</div>
</main>
<footer>
<div id="footer" class="footer"></div>
</footer>
<aside>
<div id="aside" class="aside"></div>
</aside>
</body>
</html>

View File

@ -0,0 +1,99 @@
/**
* Human demo for browsers
* @default Human Library
* @summary <https://github.com/vladmandic/human>
* @author <https://github.com/vladmandic>
* @copyright <https://github.com/vladmandic>
* @license MIT
*/
import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
const humanConfig = { // user configuration for human, used to fine-tune behavior
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
filter: { enabled: true, equalization: false, flip: false },
face: { enabled: false },
body: { enabled: false },
hand: { enabled: false },
object: { enabled: false },
gesture: { enabled: false },
segmentation: {
enabled: true,
modelPath: 'rvm.json', // can use rvm, selfie or meet
ratio: 0.5,
mode: 'default',
},
};
const backgroundImage = '../../samples/in/background.jpg';
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
const log = (...msg) => console.log(...msg); // eslint-disable-line no-console
async function main() {
// gather dom elements
const dom = {
background: document.getElementById('background'),
webcam: document.getElementById('webcam'),
output: document.getElementById('output'),
merge: document.getElementById('merge'),
mode: document.getElementById('mode'),
composite: document.getElementById('composite'),
ratio: document.getElementById('ratio'),
fps: document.getElementById('fps'),
};
// set defaults
dom.fps.innerText = 'initializing';
dom.ratio.valueAsNumber = human.config.segmentation.ratio;
dom.background.src = backgroundImage;
dom.composite.innerHTML = ['source-atop', 'color', 'color-burn', 'color-dodge', 'copy', 'darken', 'destination-atop', 'destination-in', 'destination-out', 'destination-over', 'difference', 'exclusion', 'hard-light', 'hue', 'lighten', 'lighter', 'luminosity', 'multiply', 'overlay', 'saturation', 'screen', 'soft-light', 'source-in', 'source-out', 'source-over', 'xor'].map((gco) => `<option value="${gco}">${gco}</option>`).join(''); // eslint-disable-line max-len
const ctxMerge = dom.merge.getContext('2d');
log('human version:', human.version, '| tfjs version:', human.tf.version['tfjs-core']);
log('platform:', human.env.platform, '| agent:', human.env.agent);
await human.load(); // preload all models
log('backend:', human.tf.getBackend(), '| available:', human.env.backends);
log('models stats:', human.models.stats());
log('models loaded:', human.models.loaded());
await human.warmup(); // warmup function to initialize backend for future faster detection
const numTensors = human.tf.engine().state.numTensors;
// initialize webcam
dom.webcam.onplay = () => { // start processing on video play
log('start processing');
dom.output.width = human.webcam.width;
dom.output.height = human.webcam.height;
dom.merge.width = human.webcam.width;
dom.merge.height = human.webcam.height;
loop(); // eslint-disable-line no-use-before-define
};
await human.webcam.start({ element: dom.webcam, crop: true, width: window.innerWidth / 2, height: window.innerHeight / 2 }); // use human webcam helper methods and associate webcam stream with a dom element
if (!human.webcam.track) dom.fps.innerText = 'webcam error';
// processing loop
async function loop() {
if (!human.webcam.element || human.webcam.paused) return; // check if webcam is valid and playing
human.config.segmentation.mode = dom.mode.value; // get segmentation mode from ui
human.config.segmentation.ratio = dom.ratio.valueAsNumber; // get segmentation downsample ratio from ui
const t0 = Date.now();
const rgba = await human.segmentation(human.webcam.element, human.config); // run model and process results
const t1 = Date.now();
if (!rgba) {
dom.fps.innerText = 'error';
return;
}
dom.fps.innerText = `fps: ${Math.round(10000 / (t1 - t0)) / 10}`; // mark performance
human.draw.tensor(rgba, dom.output); // draw raw output
human.tf.dispose(rgba); // dispose tensors
ctxMerge.globalCompositeOperation = 'source-over';
ctxMerge.drawImage(dom.background, 0, 0); // draw original video to first stacked canvas
ctxMerge.globalCompositeOperation = dom.composite.value;
ctxMerge.drawImage(dom.output, 0, 0); // draw processed output to second stacked canvas
if (numTensors !== human.tf.engine().state.numTensors) log({ leak: human.tf.engine().state.numTensors - numTensors }); // check for memory leaks
requestAnimationFrame(loop);
}
}
window.onload = main;

View File

@ -1,28 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Human</title>
<meta name="viewport" content="width=device-width" id="viewport">
<meta name="keywords" content="Human">
<meta name="application-name" content="Human">
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="theme-color" content="#000000">
<link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../../assets/icon.png">
<script src="./index.js" type="module"></script>
<style>
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../assets/lato-light.woff2') }
html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
body { margin: 0; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; text-align: center; }
body::-webkit-scrollbar { display: none; }
</style>
</head>
<body>
<canvas id="canvas" style="margin: 0 auto"></canvas>
<video id="video" playsinline style="display: none"></video>
<div id="fps" style="position: absolute; top: 20px; right: 20px; background-color: grey; padding: 8px"></div>
</body>
</html>

View File

@ -1,107 +0,0 @@
/**
* Human demo for browsers
*
* @description Simple Human demo for browsers using WebCam or WebRTC
*
* @configuration
* config={}: contains all model configuration used by human
*/
import Human from '../../dist/human.esm.js'; // equivalent of @vladmandic/human
import webRTC from '../helpers/webrtc.js'; // handle webrtc handshake and connects to webrtc stream
const config = { // use default values for everything just specify models location
modelBasePath: '../../models',
};
const human = new Human(config);
const webrtc = {
enabled: false, // use webrtc or use webcam if disabled
server: 'http://human.local:8002',
stream: 'reowhite',
};
// eslint-disable-next-line no-console
const log = (...msg) => console.log(...msg);
/** @type {HTMLVideoElement} */
// @ts-ignore
const video = document.getElementById('video') || document.createElement('video'); // used as input
/** @type {HTMLCanvasElement} */
// @ts-ignore
const canvas = document.getElementById('canvas') || document.createElement('canvas'); // used as output
// @ts-ignore
const fps = { detect: 0, draw: 0 };
fps.el = document.getElementById('fps') || document.createElement('div'); // used as draw fps counter
async function webCam() {
const constraints = { audio: false, video: { facingMode: 'user', resizeMode: 'none', width: { ideal: document.body.clientWidth } } }; // set preffered camera options
const stream = await navigator.mediaDevices.getUserMedia(constraints); // get webcam stream that matches constraints
const ready = new Promise((resolve) => { video.onloadeddata = () => resolve(true); }); // resolve when stream is ready
video.srcObject = stream; // assign stream to video element
video.play(); // start stream
await ready; // wait until stream is ready
canvas.width = video.videoWidth; // resize output canvas to match input
canvas.height = video.videoHeight;
log('video stream:', video.srcObject, 'track state:', video.srcObject.getVideoTracks()[0].readyState, 'stream state:', video.readyState);
canvas.onclick = () => { // play or pause on mouse click
if (video.paused) video.play();
else video.pause();
};
}
// eslint-disable-next-line no-unused-vars
let result;
async function detectionLoop() {
const t0 = performance.now();
if (!video.paused) result = await human.detect(video); // updates result every time detection completes, skip if video is paused
const t1 = performance.now();
fps.detect = 1000 / (t1 - t0);
// eslint-disable-next-line @typescript-eslint/no-unused-vars
requestAnimationFrame(detectionLoop); // run in loop
}
// eslint-disable-next-line no-unused-vars
async function drawLoop() {
const t0 = performance.now();
if (!video.paused) { // skip redraw if video is paused
const interpolated = await human.next(result); // interpolates results based on last known results
await human.draw.canvas(video, canvas); // draw input video to output canvas
await human.draw.all(canvas, interpolated); // draw results as overlay on output canvas
}
const t1 = performance.now();
fps.draw = 1000 / (t1 - t0);
fps.el.innerText = video.paused ? 'paused' : `${fps.detect.toFixed(1)} / ${fps.draw.toFixed(1)}`;
// eslint-disable-next-line @typescript-eslint/no-unused-vars
requestAnimationFrame(drawLoop); // run in loop
}
// eslint-disable-next-line no-unused-vars
async function singleLoop() {
const t0 = performance.now();
result = await human.detect(video); // updates result every time detection completes
await human.draw.canvas(video, canvas); // draw input video to output canvas
await human.draw.all(canvas, result); // draw results as overlay on output canvas
const t1 = performance.now();
fps.detect = 1000 / (t1 - t0);
fps.el.innerText = video.paused ? 'paused' : `${fps.detect.toFixed(1)}`;
// eslint-disable-next-line @typescript-eslint/no-unused-vars
requestAnimationFrame(singleLoop); // run in loop
}
async function main() {
await human.load(); // not required, pre-loads all models
await human.warmup(); // not required, warms up all models
if (webrtc.enabled) await webRTC(webrtc.server, webrtc.stream, video); // setup webrtc as input stream, uses helper implementation in
else await webCam(); // setup webcam as input stream
// preferred run in two loops, one for actual detection and one that draws interpolated results on screen so results appear much smoother
await detectionLoop();
await drawLoop();
// alternative run in single loop where we run detection and then draw results
// await singleLoop();
}
window.onload = main;

28
demo/tracker/README.md Normal file
View File

@ -0,0 +1,28 @@
## Tracker
### Based on
<https://github.com/opendatacam/node-moving-things-tracker>
### Build
- remove reference to `lodash`:
> `isEqual` in <tracker.js>
- replace external lib:
> curl https://raw.githubusercontent.com/ubilabs/kd-tree-javascript/master/kdTree.js -o lib/kdTree-min.js
- build with `esbuild`:
> node_modules/.bin/esbuild --bundle tracker.js --format=esm --platform=browser --target=esnext --keep-names --tree-shaking=false --analyze --outfile=/home/vlado/dev/human/demo/tracker/tracker.js --banner:js="/* eslint-disable */"
### Usage
computeDistance(item1, item2)
disableKeepInMemory()
enableKeepInMemory()
getAllTrackedItems()
getJSONDebugOfTrackedItems(roundInt = true)
getJSONOfAllTrackedItems()
getJSONOfTrackedItems(roundInt = true)
getTrackedItemsInMOTFormat(frameNb)
reset()
setParams(newParams)
updateTrackedItemsWithNewFrame(detectionsOfThisFrame, frameNb)

65
demo/tracker/index.html Normal file
View File

@ -0,0 +1,65 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Human</title>
<meta name="viewport" content="width=device-width" id="viewport">
<meta name="keywords" content="Human">
<meta name="application-name" content="Human">
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="theme-color" content="#000000">
<link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../../assets/icon.png">
<script src="./index.js" type="module"></script>
<style>
html { font-family: 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
body { margin: 0; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
body::-webkit-scrollbar { display: none; }
input[type="file"] { font-family: 'Segoe UI'; font-size: 14px; font-variant: small-caps; }
::-webkit-file-upload-button { background: #333333; color: white; border: 0; border-radius: 0; padding: 6px 16px; box-shadow: 4px 4px 4px #222222; font-family: 'Segoe UI'; font-size: 14px; font-variant: small-caps; }
</style>
</head>
<body>
<div style="display: flex">
<video id="video" playsinline style="width: 25vw" controls controlslist="nofullscreen nodownload noremoteplayback" disablepictureinpicture loop></video>
<canvas id="canvas" style="width: 75vw"></canvas>
</div>
<div class="uploader" style="padding: 8px">
<input type="file" name="inputvideo" id="inputvideo" accept="video/*"></input>
<input type="checkbox" id="interpolation" name="interpolation"></input>
<label for="tracker">interpolation</label>
</div>
<form id="config" style="padding: 8px; line-height: 1.6rem;">
tracker |
<input type="checkbox" id="tracker" name="tracker" checked></input>
<label for="tracker">enabled</label> |
<input type="checkbox" id="keepInMemory" name="keepInMemory"></input>
<label for="keepInMemory">keepInMemory</label> |
<br>
tracker source |
<input type="radio" id="box-face" name="box" value="face" checked>
<label for="box-face">face</label> |
<input type="radio" id="box-body" name="box" value="body">
<label for="box-face">body</label> |
<input type="radio" id="box-object" name="box" value="object">
<label for="box-face">object</label> |
<br>
tracker config |
<input type="range" id="unMatchedFramesTolerance" name="unMatchedFramesTolerance" min="0" max="300" step="1", value="60"></input>
<label for="unMatchedFramesTolerance">unMatchedFramesTolerance</label> |
<input type="range" id="iouLimit" name="unMatchedFramesTolerance" min="0" max="1" step="0.01", value="0.1"></input>
<label for="iouLimit">iouLimit</label> |
<input type="range" id="distanceLimit" name="unMatchedFramesTolerance" min="0" max="1" step="0.01", value="0.1"></input>
<label for="distanceLimit">distanceLimit</label> |
<input type="radio" id="matchingAlgorithm-kdTree" name="matchingAlgorithm" value="kdTree" checked>
<label for="matchingAlgorithm-kdTree">kdTree</label> |
<input type="radio" id="matchingAlgorithm-munkres" name="matchingAlgorithm" value="munkres">
<label for="matchingAlgorithm-kdTree">munkres</label> |
</form>
<pre id="status" style="position: absolute; top: 12px; right: 20px; background-color: grey; padding: 8px; box-shadow: 2px 2px black"></pre>
<pre id="log" style="padding: 8px"></pre>
<div id="performance" style="position: absolute; bottom: 0; width: 100%; padding: 8px; font-size: 0.8rem;"></div>
</body>
</html>

10
demo/tracker/index.js Normal file

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

208
demo/tracker/index.ts Normal file
View File

@ -0,0 +1,208 @@
/**
* Human demo for browsers
* @default Human Library
* @summary <https://github.com/vladmandic/human>
* @author <https://github.com/vladmandic>
* @copyright <https://github.com/vladmandic>
* @license MIT
*/
import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
import tracker from './tracker.js';
const humanConfig: Partial<H.Config> = { // user configuration for human, used to fine-tune behavior
debug: true,
backend: 'webgl',
// cacheSensitivity: 0,
// cacheModels: false,
// warmup: 'none',
modelBasePath: 'https://vladmandic.github.io/human-models/models',
filter: { enabled: true, equalization: false, flip: false },
face: {
enabled: true,
detector: { rotation: false, maxDetected: 10, minConfidence: 0.3 },
mesh: { enabled: true },
attention: { enabled: false },
iris: { enabled: false },
description: { enabled: false },
emotion: { enabled: false },
antispoof: { enabled: false },
liveness: { enabled: false },
},
body: { enabled: false, maxDetected: 6, modelPath: 'movenet-multipose.json' },
hand: { enabled: false },
object: { enabled: false, maxDetected: 10 },
segmentation: { enabled: false },
gesture: { enabled: false },
};
interface TrackerConfig {
unMatchedFramesTolerance: number, // number of frame when an object is not matched before considering it gone; ignored if fastDelete is set
iouLimit: number, // exclude things from beeing matched if their IOU less than; 1 means total overlap; 0 means no overlap
fastDelete: boolean, // remove new objects immediately if they could not be matched in the next frames; if set, ignores unMatchedFramesTolerance
distanceLimit: number, // distance limit for matching; if values need to be excluded from matching set their distance to something greater than the distance limit
matchingAlgorithm: 'kdTree' | 'munkres', // algorithm used to match tracks with new detections
}
interface TrackerResult {
id: number,
confidence: number,
bearing: number,
isZombie: boolean,
name: string,
x: number,
y: number,
w: number,
h: number,
}
const trackerConfig: TrackerConfig = {
unMatchedFramesTolerance: 100,
iouLimit: 0.05,
fastDelete: false,
distanceLimit: 1e4,
matchingAlgorithm: 'kdTree',
};
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
const dom = { // grab instances of dom objects so we dont have to look them up later
video: document.getElementById('video') as HTMLVideoElement,
canvas: document.getElementById('canvas') as HTMLCanvasElement,
log: document.getElementById('log') as HTMLPreElement,
fps: document.getElementById('status') as HTMLPreElement,
tracker: document.getElementById('tracker') as HTMLInputElement,
interpolation: document.getElementById('interpolation') as HTMLInputElement,
config: document.getElementById('config') as HTMLFormElement,
ctx: (document.getElementById('canvas') as HTMLCanvasElement).getContext('2d') as CanvasRenderingContext2D,
};
const timestamp = { detect: 0, draw: 0, tensors: 0, start: 0 }; // holds information used to calculate performance and possible memory leaks
const fps = { detectFPS: 0, drawFPS: 0, frames: 0, averageMs: 0 }; // holds calculated fps information for both detect and screen refresh
const log = (...msg) => { // helper method to output messages
dom.log.innerText += msg.join(' ') + '\n';
console.log(...msg); // eslint-disable-line no-console
};
const status = (msg) => dom.fps.innerText = msg; // print status element
async function detectionLoop() { // main detection loop
if (!dom.video.paused && dom.video.readyState >= 2) {
if (timestamp.start === 0) timestamp.start = human.now();
// log('profiling data:', await human.profile(dom.video));
await human.detect(dom.video, humanConfig); // actual detection; were not capturing output in a local variable as it can also be reached via human.result
const tensors = human.tf.memory().numTensors; // check current tensor usage for memory leaks
if (tensors - timestamp.tensors !== 0) log('allocated tensors:', tensors - timestamp.tensors); // printed on start and each time there is a tensor leak
timestamp.tensors = tensors;
fps.detectFPS = Math.round(1000 * 1000 / (human.now() - timestamp.detect)) / 1000;
fps.frames++;
fps.averageMs = Math.round(1000 * (human.now() - timestamp.start) / fps.frames) / 1000;
}
timestamp.detect = human.now();
requestAnimationFrame(detectionLoop); // start new frame immediately
}
function drawLoop() { // main screen refresh loop
if (!dom.video.paused && dom.video.readyState >= 2) {
const res: H.Result = dom.interpolation.checked ? human.next(human.result) : human.result; // interpolate results if enabled
let tracking: H.FaceResult[] | H.BodyResult[] | H.ObjectResult[] = [];
if (human.config.face.enabled) tracking = res.face;
else if (human.config.body.enabled) tracking = res.body;
else if (human.config.object.enabled) tracking = res.object;
else log('unknown object type');
let data: TrackerResult[] = [];
if (dom.tracker.checked) {
const items = tracking.map((obj) => ({
x: obj.box[0] + obj.box[2] / 2,
y: obj.box[1] + obj.box[3] / 2,
w: obj.box[2],
h: obj.box[3],
name: obj.label || (human.config.face.enabled ? 'face' : 'body'),
confidence: obj.score,
}));
tracker.updateTrackedItemsWithNewFrame(items, fps.frames);
data = tracker.getJSONOfTrackedItems(true) as TrackerResult[];
}
human.draw.canvas(dom.video, dom.canvas); // copy input video frame to output canvas
for (let i = 0; i < tracking.length; i++) {
// @ts-ignore
const name = tracking[i].label || (human.config.face.enabled ? 'face' : 'body');
dom.ctx.strokeRect(tracking[i].box[0], tracking[i].box[1], tracking[i].box[1], tracking[i].box[2]);
dom.ctx.fillText(`id: ${tracking[i].id} ${Math.round(100 * tracking[i].score)}% ${name}`, tracking[i].box[0] + 4, tracking[i].box[1] + 16);
if (data[i]) {
dom.ctx.fillText(`t: ${data[i].id} ${Math.round(100 * data[i].confidence)}% ${data[i].name} ${data[i].isZombie ? 'zombie' : ''}`, tracking[i].box[0] + 4, tracking[i].box[1] + 34);
}
}
}
const now = human.now();
fps.drawFPS = Math.round(1000 * 1000 / (now - timestamp.draw)) / 1000;
timestamp.draw = now;
status(dom.video.paused ? 'paused' : `fps: ${fps.detectFPS.toFixed(1).padStart(5, ' ')} detect | ${fps.drawFPS.toFixed(1).padStart(5, ' ')} draw`); // write status
setTimeout(drawLoop, 30); // use to slow down refresh from max refresh rate to target of 30 fps
}
async function handleVideo(file: File) {
const url = URL.createObjectURL(file);
dom.video.src = url;
await dom.video.play();
log('loaded video:', file.name, 'resolution:', [dom.video.videoWidth, dom.video.videoHeight], 'duration:', dom.video.duration);
dom.canvas.width = dom.video.videoWidth;
dom.canvas.height = dom.video.videoHeight;
dom.ctx.strokeStyle = 'white';
dom.ctx.fillStyle = 'white';
dom.ctx.font = '16px Segoe UI';
dom.video.playbackRate = 0.25;
}
function initInput() {
document.body.addEventListener('dragenter', (evt) => evt.preventDefault());
document.body.addEventListener('dragleave', (evt) => evt.preventDefault());
document.body.addEventListener('dragover', (evt) => evt.preventDefault());
document.body.addEventListener('drop', async (evt) => {
evt.preventDefault();
if (evt.dataTransfer) evt.dataTransfer.dropEffect = 'copy';
const file = evt.dataTransfer?.files?.[0];
if (file) await handleVideo(file);
log(dom.video.readyState);
});
(document.getElementById('inputvideo') as HTMLInputElement).onchange = async (evt) => {
evt.preventDefault();
const file = evt.target?.['files']?.[0];
if (file) await handleVideo(file);
};
dom.config.onchange = () => {
trackerConfig.distanceLimit = (document.getElementById('distanceLimit') as HTMLInputElement).valueAsNumber;
trackerConfig.iouLimit = (document.getElementById('iouLimit') as HTMLInputElement).valueAsNumber;
trackerConfig.unMatchedFramesTolerance = (document.getElementById('unMatchedFramesTolerance') as HTMLInputElement).valueAsNumber;
trackerConfig.unMatchedFramesTolerance = (document.getElementById('unMatchedFramesTolerance') as HTMLInputElement).valueAsNumber;
trackerConfig.matchingAlgorithm = (document.getElementById('matchingAlgorithm-kdTree') as HTMLInputElement).checked ? 'kdTree' : 'munkres';
tracker.setParams(trackerConfig);
if ((document.getElementById('keepInMemory') as HTMLInputElement).checked) tracker.enableKeepInMemory();
else tracker.disableKeepInMemory();
tracker.reset();
log('tracker config change', JSON.stringify(trackerConfig));
humanConfig.face!.enabled = (document.getElementById('box-face') as HTMLInputElement).checked; // eslint-disable-line @typescript-eslint/no-non-null-assertion
humanConfig.body!.enabled = (document.getElementById('box-body') as HTMLInputElement).checked; // eslint-disable-line @typescript-eslint/no-non-null-assertion
humanConfig.object!.enabled = (document.getElementById('box-object') as HTMLInputElement).checked; // eslint-disable-line @typescript-eslint/no-non-null-assertion
};
dom.tracker.onchange = (evt) => {
log('tracker', (evt.target as HTMLInputElement).checked ? 'enabled' : 'disabled');
tracker.setParams(trackerConfig);
tracker.reset();
};
}
async function main() { // main entry point
log('human version:', human.version, '| tfjs version:', human.tf.version['tfjs-core']);
log('platform:', human.env.platform, '| agent:', human.env.agent);
status('loading...');
await human.load(); // preload all models
log('backend:', human.tf.getBackend(), '| available:', human.env.backends);
log('models loaded:', human.models.loaded());
status('initializing...');
await human.warmup(); // warmup function to initialize backend for future faster detection
initInput(); // initialize input
await detectionLoop(); // start detection loop
drawLoop(); // start draw loop
}
window.onload = main;

1201
demo/tracker/tracker.js Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,5 @@
# Human Demo in TypeScript for Browsers
Simple demo app that can be used as a quick-start guide for use of `Human` in browser environments
- `index.ts` is compiled to `index.js` which is loaded from `index.html`

View File

@ -18,16 +18,13 @@
html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; } html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
body { margin: 0; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; } body { margin: 0; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
body::-webkit-scrollbar { display: none; } body::-webkit-scrollbar { display: none; }
.status { position: absolute; width: 100vw; bottom: 10%; text-align: center; font-size: 3rem; font-weight: 100; text-shadow: 2px 2px #303030; }
.log { position: absolute; bottom: 0; margin: 0.4rem 0.4rem 0 0.4rem; font-size: 0.9rem; }
.video { display: none; }
.canvas { margin: 0 auto; }
</style> </style>
</head> </head>
<body> <body>
<div id="status" class="status"></div> <canvas id="canvas" style="margin: 0 auto; width: 100vw"></canvas>
<canvas id="canvas" class="canvas"></canvas> <video id="video" playsinline style="display: none"></video>
<video id="video" playsinline class="video"></video> <pre id="status" style="position: absolute; top: 12px; right: 20px; background-color: grey; padding: 8px; box-shadow: 2px 2px black"></pre>
<div id="log" class="log"></div> <pre id="log" style="padding: 8px"></pre>
<div id="performance" style="position: absolute; bottom: 0; width: 100%; padding: 8px; font-size: 0.8rem;"></div>
</body> </body>
</html> </html>

9
demo/typescript/index.js Normal file
View File

@ -0,0 +1,9 @@
/*
Human
homepage: <https://github.com/vladmandic/human>
author: <https://github.com/vladmandic>'
*/
import*as m from"../../dist/human.esm.js";var v=1920,b={debug:!0,backend:"webgl",modelBasePath:"https://vladmandic.github.io/human-models/models/",filter:{enabled:!0,equalization:!1,flip:!1},face:{enabled:!0,detector:{rotation:!1},mesh:{enabled:!0},attention:{enabled:!1},iris:{enabled:!0},description:{enabled:!0},emotion:{enabled:!0},antispoof:{enabled:!0},liveness:{enabled:!0}},body:{enabled:!1},hand:{enabled:!1},object:{enabled:!1},segmentation:{enabled:!1},gesture:{enabled:!0}},e=new m.Human(b);e.env.perfadd=!1;e.draw.options.font='small-caps 18px "Lato"';e.draw.options.lineHeight=20;e.draw.options.drawPoints=!0;var a={video:document.getElementById("video"),canvas:document.getElementById("canvas"),log:document.getElementById("log"),fps:document.getElementById("status"),perf:document.getElementById("performance")},n={detect:0,draw:0,tensors:0,start:0},s={detectFPS:0,drawFPS:0,frames:0,averageMs:0},o=(...t)=>{a.log.innerText+=t.join(" ")+`
`,console.log(...t)},i=t=>a.fps.innerText=t,g=t=>a.perf.innerText="tensors:"+e.tf.memory().numTensors.toString()+" | performance: "+JSON.stringify(t).replace(/"|{|}/g,"").replace(/,/g," | ");async function f(){if(!a.video.paused){n.start===0&&(n.start=e.now()),await e.detect(a.video);let t=e.tf.memory().numTensors;t-n.tensors!==0&&o("allocated tensors:",t-n.tensors),n.tensors=t,s.detectFPS=Math.round(1e3*1e3/(e.now()-n.detect))/1e3,s.frames++,s.averageMs=Math.round(1e3*(e.now()-n.start)/s.frames)/1e3,s.frames%100===0&&!a.video.paused&&o("performance",{...s,tensors:n.tensors})}n.detect=e.now(),requestAnimationFrame(f)}async function u(){var d,r,c;if(!a.video.paused){let l=e.next(e.result),w=await e.image(a.video);e.draw.canvas(w.canvas,a.canvas);let p={bodyLabels:`person confidence [score] and ${(c=(r=(d=e.result)==null?void 0:d.body)==null?void 0:r[0])==null?void 0:c.keypoints.length} keypoints`};await e.draw.all(a.canvas,l,p),g(l.performance)}let t=e.now();s.drawFPS=Math.round(1e3*1e3/(t-n.draw))/1e3,n.draw=t,i(a.video.paused?"paused":`fps: ${s.detectFPS.toFixed(1).padStart(5," ")} detect | ${s.drawFPS.toFixed(1).padStart(5," ")} draw`),setTimeout(u,30)}async function h(){let d=(await e.webcam.enumerate())[0].deviceId,r=await e.webcam.start({element:a.video,crop:!1,width:v,id:d});o(r),a.canvas.width=e.webcam.width,a.canvas.height=e.webcam.height,a.canvas.onclick=async()=>{e.webcam.paused?await e.webcam.play():e.webcam.pause()}}async function y(){o("human version:",e.version,"| tfjs version:",e.tf.version["tfjs-core"]),o("platform:",e.env.platform,"| agent:",e.env.agent),i("loading..."),await e.load(),o("backend:",e.tf.getBackend(),"| available:",e.env.backends),o("models stats:",e.models.stats()),o("models loaded:",e.models.loaded()),o("environment",e.env),i("initializing..."),await e.warmup(),await h(),await f(),await u()}window.onload=y;
//# sourceMappingURL=index.js.map

File diff suppressed because one or more lines are too long

119
demo/typescript/index.ts Normal file
View File

@ -0,0 +1,119 @@
/**
* Human demo for browsers
* @default Human Library
* @summary <https://github.com/vladmandic/human>
* @author <https://github.com/vladmandic>
* @copyright <https://github.com/vladmandic>
* @license MIT
*/
import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
const width = 1920; // used by webcam config as well as human maximum resultion // can be anything, but resolutions higher than 4k will disable internal optimizations
const humanConfig: Partial<H.Config> = { // user configuration for human, used to fine-tune behavior
debug: true,
backend: 'webgl',
// cacheSensitivity: 0,
// cacheModels: false,
// warmup: 'none',
// modelBasePath: '../../models',
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
filter: { enabled: true, equalization: false, flip: false },
face: { enabled: true, detector: { rotation: false }, mesh: { enabled: true }, attention: { enabled: false }, iris: { enabled: true }, description: { enabled: true }, emotion: { enabled: true }, antispoof: { enabled: true }, liveness: { enabled: true } },
body: { enabled: false },
hand: { enabled: false },
object: { enabled: false },
segmentation: { enabled: false },
gesture: { enabled: true },
};
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
human.env.perfadd = false; // is performance data showing instant or total values
human.draw.options.font = 'small-caps 18px "Lato"'; // set font used to draw labels when using draw methods
human.draw.options.lineHeight = 20;
human.draw.options.drawPoints = true; // draw points on face mesh
// human.draw.options.fillPolygons = true;
const dom = { // grab instances of dom objects so we dont have to look them up later
video: document.getElementById('video') as HTMLVideoElement,
canvas: document.getElementById('canvas') as HTMLCanvasElement,
log: document.getElementById('log') as HTMLPreElement,
fps: document.getElementById('status') as HTMLPreElement,
perf: document.getElementById('performance') as HTMLDivElement,
};
const timestamp = { detect: 0, draw: 0, tensors: 0, start: 0 }; // holds information used to calculate performance and possible memory leaks
const fps = { detectFPS: 0, drawFPS: 0, frames: 0, averageMs: 0 }; // holds calculated fps information for both detect and screen refresh
const log = (...msg) => { // helper method to output messages
dom.log.innerText += msg.join(' ') + '\n';
console.log(...msg); // eslint-disable-line no-console
};
const status = (msg) => dom.fps.innerText = msg; // print status element
const perf = (msg) => dom.perf.innerText = 'tensors:' + human.tf.memory().numTensors.toString() + ' | performance: ' + JSON.stringify(msg).replace(/"|{|}/g, '').replace(/,/g, ' | '); // print performance element
async function detectionLoop() { // main detection loop
if (!dom.video.paused) {
if (timestamp.start === 0) timestamp.start = human.now();
// log('profiling data:', await human.profile(dom.video));
await human.detect(dom.video); // actual detection; were not capturing output in a local variable as it can also be reached via human.result
const tensors = human.tf.memory().numTensors; // check current tensor usage for memory leaks
if (tensors - timestamp.tensors !== 0) log('allocated tensors:', tensors - timestamp.tensors); // printed on start and each time there is a tensor leak
timestamp.tensors = tensors;
fps.detectFPS = Math.round(1000 * 1000 / (human.now() - timestamp.detect)) / 1000;
fps.frames++;
fps.averageMs = Math.round(1000 * (human.now() - timestamp.start) / fps.frames) / 1000;
if (fps.frames % 100 === 0 && !dom.video.paused) log('performance', { ...fps, tensors: timestamp.tensors });
}
timestamp.detect = human.now();
requestAnimationFrame(detectionLoop); // start new frame immediately
}
async function drawLoop() { // main screen refresh loop
if (!dom.video.paused) {
const interpolated = human.next(human.result); // smoothen result using last-known results
const processed = await human.image(dom.video); // get current video frame, but enhanced with human.filters
human.draw.canvas(processed.canvas as HTMLCanvasElement, dom.canvas);
const opt: Partial<H.DrawOptions> = { bodyLabels: `person confidence [score] and ${human.result?.body?.[0]?.keypoints.length} keypoints` };
await human.draw.all(dom.canvas, interpolated, opt); // draw labels, boxes, lines, etc.
perf(interpolated.performance); // write performance data
}
const now = human.now();
fps.drawFPS = Math.round(1000 * 1000 / (now - timestamp.draw)) / 1000;
timestamp.draw = now;
status(dom.video.paused ? 'paused' : `fps: ${fps.detectFPS.toFixed(1).padStart(5, ' ')} detect | ${fps.drawFPS.toFixed(1).padStart(5, ' ')} draw`); // write status
setTimeout(drawLoop, 30); // use to slow down refresh from max refresh rate to target of 30 fps
}
async function webCam() {
const devices = await human.webcam.enumerate();
const id = devices[0].deviceId; // use first available video source
const webcamStatus = await human.webcam.start({ element: dom.video, crop: false, width, id }); // use human webcam helper methods and associate webcam stream with a dom element
log(webcamStatus);
dom.canvas.width = human.webcam.width;
dom.canvas.height = human.webcam.height;
dom.canvas.onclick = async () => { // pause when clicked on screen and resume on next click
if (human.webcam.paused) await human.webcam.play();
else human.webcam.pause();
};
}
async function main() { // main entry point
log('human version:', human.version, '| tfjs version:', human.tf.version['tfjs-core']);
log('platform:', human.env.platform, '| agent:', human.env.agent);
status('loading...');
await human.load(); // preload all models
log('backend:', human.tf.getBackend(), '| available:', human.env.backends);
log('models stats:', human.models.stats());
log('models loaded:', human.models.loaded());
log('environment', human.env);
status('initializing...');
await human.warmup(); // warmup function to initialize backend for future faster detection
await webCam(); // start webcam
await detectionLoop(); // start detection loop
await drawLoop(); // start draw loop
}
window.onload = main;

58
demo/video/index.html Normal file
View File

@ -0,0 +1,58 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Human</title>
<meta name="viewport" content="width=device-width" id="viewport">
<meta name="keywords" content="Human">
<meta name="description" content="Human: Demo; Author: Vladimir Mandic <https://github.com/vladmandic>">
<link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<style>
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
body { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; margin: 0; background: black; color: white; overflow: hidden; width: 100vw; height: 100vh; }
</style>
</head>
<body>
<canvas id="canvas" style="margin: 0 auto; width: 100%"></canvas>
<pre id="log" style="padding: 8px; position: fixed; bottom: 0"></pre>
<script type="module">
import * as H from '../../dist/human.esm.js'; // equivalent of import @vladmandic/Human
const humanConfig = { // user configuration for human, used to fine-tune behavior
modelBasePath: '../../models', // models can be loaded directly from cdn as well
filter: { enabled: true, equalization: true, flip: false },
face: { enabled: true, detector: { rotation: false }, mesh: { enabled: true }, attention: { enabled: false }, iris: { enabled: true }, description: { enabled: true }, emotion: { enabled: true } },
body: { enabled: true },
hand: { enabled: true },
gesture: { enabled: true },
object: { enabled: false },
segmentation: { enabled: false },
};
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
const canvas = document.getElementById('canvas'); // output canvas to draw both webcam and detection results
async function drawLoop() { // main screen refresh loop
const interpolated = human.next(); // get smoothened result using last-known results which are continously updated based on input webcam video
human.draw.canvas(human.webcam.element, canvas); // draw webcam video to screen canvas // better than using procesed image as this loop happens faster than processing loop
await human.draw.all(canvas, interpolated); // draw labels, boxes, lines, etc.
setTimeout(drawLoop, 30); // use to slow down refresh from max refresh rate to target of 1000/30 ~ 30 fps
}
async function main() { // main entry point
document.getElementById('log').innerHTML = `human version: ${human.version} | tfjs version: ${human.tf.version['tfjs-core']}<br>platform: ${human.env.platform} | agent ${human.env.agent}`;
await human.webcam.start({ crop: true }); // find webcam and start it
human.video(human.webcam.element); // instruct human to continously detect video frames
canvas.width = human.webcam.width; // set canvas resolution to input webcam native resolution
canvas.height = human.webcam.height;
canvas.onclick = async () => { // pause when clicked on screen and resume on next click
if (human.webcam.paused) await human.webcam.play();
else human.webcam.pause();
};
await drawLoop(); // start draw loop
}
window.onload = main;
</script>
</body>
</html>

View File

@ -1,276 +0,0 @@
/**
* Human demo for browsers
*
* @description Experimental Demo app for Human using WebGPU
*
*/
// @ts-nocheck // typescript checks disabled as this is pure javascript
import Human from '../../dist/human.esm.js';
import GLBench from '../helpers/gl-bench.js';
const workerJS = './worker.js';
const backend = 'webgpu';
const config = {
main: { // processes input and runs gesture analysis
warmup: 'none',
backend,
modelBasePath: '../../models/',
async: false,
filter: { enabled: true },
face: { enabled: false },
object: { enabled: false },
gesture: { enabled: true },
hand: { enabled: false },
body: { enabled: false },
segmentation: { enabled: false },
},
face: { // runs all face models
warmup: 'none',
backend,
modelBasePath: '../../models/',
async: false,
filter: { enabled: false },
face: { enabled: true,
detector: { return: false, rotation: false },
mesh: { enabled: true },
iris: { enabled: false },
description: { enabled: true },
emotion: { enabled: false },
},
object: { enabled: false },
gesture: { enabled: false },
hand: { enabled: false },
body: { enabled: false },
segmentation: { enabled: false },
},
body: { // runs body model
warmup: 'none',
backend,
modelBasePath: '../../models/',
async: false,
filter: { enabled: false },
face: { enabled: false },
object: { enabled: false },
gesture: { enabled: false },
hand: { enabled: false },
body: { enabled: true },
segmentation: { enabled: false },
},
hand: { // runs hands model
warmup: 'none',
backend,
modelBasePath: '../../models/',
async: false,
filter: { enabled: false },
face: { enabled: false },
object: { enabled: false },
gesture: { enabled: false },
hand: { enabled: true, rotation: false },
body: { enabled: false },
segmentation: { enabled: false },
},
object: { // runs object model
warmup: 'none',
backend,
modelBasePath: '../../models/',
async: false,
filter: { enabled: false },
face: { enabled: false },
object: { enabled: false },
gesture: { enabled: false },
hand: { enabled: false },
body: { enabled: false },
segmentation: { enabled: false },
},
};
let human;
let canvas;
let video;
let bench;
const busy = {
face: false,
hand: false,
body: false,
object: false,
};
const workers = {
face: null,
body: null,
hand: null,
object: null,
};
const time = {
main: 0,
draw: 0,
face: '[warmup]',
body: '[warmup]',
hand: '[warmup]',
object: '[warmup]',
};
const start = {
main: 0,
draw: 0,
face: 0,
body: 0,
hand: 0,
object: 0,
};
const result = { // initialize empty result object which will be partially filled with results from each thread
performance: {},
hand: [],
body: [],
face: [],
object: [],
};
function log(...msg) {
const dt = new Date();
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
// eslint-disable-next-line no-console
console.log(ts, ...msg);
}
async function drawResults() {
start.draw = performance.now();
const interpolated = human.next(result);
await human.draw.all(canvas, interpolated);
time.draw = Math.round(1 + performance.now() - start.draw);
const fps = Math.round(10 * 1000 / time.main) / 10;
const draw = Math.round(10 * 1000 / time.draw) / 10;
document.getElementById('log').innerText = `Human: version ${human.version} | Performance: Main ${time.main}ms Face: ${time.face}ms Body: ${time.body}ms Hand: ${time.hand}ms Object ${time.object}ms | FPS: ${fps} / ${draw}`;
requestAnimationFrame(drawResults);
}
async function receiveMessage(msg) {
result[msg.data.type] = msg.data.result;
busy[msg.data.type] = false;
time[msg.data.type] = Math.round(performance.now() - start[msg.data.type]);
}
async function runDetection() {
start.main = performance.now();
if (!bench) {
bench = new GLBench(null, { trackGPU: false, chartHz: 20, chartLen: 20 });
bench.begin();
}
const ctx = canvas.getContext('2d');
// const image = await human.image(video);
// ctx.drawImage(image.canvas, 0, 0, canvas.width, canvas.height);
ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
if (!busy.face) {
busy.face = true;
start.face = performance.now();
workers.face.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.face, type: 'face' }, [imageData.data.buffer.slice(0)]);
}
if (!busy.body) {
busy.body = true;
start.body = performance.now();
workers.body.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.body, type: 'body' }, [imageData.data.buffer.slice(0)]);
}
if (!busy.hand) {
busy.hand = true;
start.hand = performance.now();
workers.hand.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.hand, type: 'hand' }, [imageData.data.buffer.slice(0)]);
}
if (!busy.object) {
busy.object = true;
start.object = performance.now();
workers.object.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.object, type: 'object' }, [imageData.data.buffer.slice(0)]);
}
time.main = Math.round(performance.now() - start.main);
bench.nextFrame();
requestAnimationFrame(runDetection);
}
async function setupCamera() {
video = document.getElementById('video');
canvas = document.getElementById('canvas');
const output = document.getElementById('log');
let stream;
const constraints = {
audio: false,
video: {
facingMode: 'user',
resizeMode: 'crop-and-scale',
width: { ideal: document.body.clientWidth },
// height: { ideal: document.body.clientHeight }, // not set as we're using aspectRation to get height instead
aspectRatio: document.body.clientWidth / document.body.clientHeight,
},
};
// enumerate devices for diag purposes
navigator.mediaDevices.enumerateDevices().then((devices) => log('enumerated devices:', devices));
log('camera constraints', constraints);
try {
stream = await navigator.mediaDevices.getUserMedia(constraints);
} catch (err) {
output.innerText += `\n${err.name}: ${err.message}`;
status(err.name);
log('camera error:', err);
}
const tracks = stream.getVideoTracks();
log('enumerated viable tracks:', tracks);
const track = stream.getVideoTracks()[0];
const settings = track.getSettings();
log('selected video source:', track, settings);
const promise = !stream || new Promise((resolve) => {
video.onloadeddata = () => {
if (settings.width > settings.height) canvas.style.width = '100vw';
else canvas.style.height = '100vh';
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
video.play();
resolve();
};
});
// attach input to video element
if (stream) video.srcObject = stream;
return promise;
}
async function startWorkers() {
if (!workers.face) workers.face = new Worker(workerJS);
if (!workers.body) workers.body = new Worker(workerJS);
if (!workers.hand) workers.hand = new Worker(workerJS);
if (!workers.object) workers.object = new Worker(workerJS);
workers.face.onmessage = receiveMessage;
workers.body.onmessage = receiveMessage;
workers.hand.onmessage = receiveMessage;
workers.object.onmessage = receiveMessage;
}
async function main() {
window.addEventListener('unhandledrejection', (evt) => {
// eslint-disable-next-line no-console
console.error(evt.reason || evt);
document.getElementById('log').innerHTML = evt.reason.message || evt.reason || evt;
status('exception error');
evt.preventDefault();
});
if (typeof Worker === 'undefined' || typeof OffscreenCanvas === 'undefined') {
status('workers are not supported');
return;
}
human = new Human(config.main);
document.getElementById('log').innerText = `Human: version ${human.version}`;
await startWorkers();
await setupCamera();
runDetection();
drawResults();
}
window.onload = main;

View File

@ -1,21 +0,0 @@
/// <reference lib="webworker" />
// import Human from '../../dist/human.esm'; // load Human using IIFE script as Chome Mobile does not support Modules as Workers
self.importScripts('../../assets/tf.es2017.js');
self.importScripts('../../assets/tf-backend-webgpu.es2017.js');
self.importScripts('../../dist/human.js');
let human;
onmessage = async (msg) => {
// received from index.js using:
// worker.postMessage({ image: image.data.buffer, width: canvas.width, height: canvas.height, config }, [image.data.buffer]);
// @ts-ignore // Human is registered as global namespace using IIFE script
// eslint-disable-next-line no-undef, new-cap
if (!human) human = new Human.default(msg.data.config);
const image = new ImageData(new Uint8ClampedArray(msg.data.image), msg.data.width, msg.data.height);
let result = {};
result = await human.detect(image, msg.data.config);
postMessage({ result: result[msg.data.type], type: msg.data.type });
};

1
dist/human.d.ts vendored Normal file
View File

@ -0,0 +1 @@
export * from '../types/human';

1
dist/human.esm-nobundle.d.ts vendored Normal file
View File

@ -0,0 +1 @@
export * from '../types/human';

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

1
dist/human.esm.d.ts vendored Normal file
View File

@ -0,0 +1 @@
export * from '../types/human';

96349
dist/human.esm.js vendored

File diff suppressed because one or more lines are too long

Some files were not shown because too many files have changed in this diff Show More