Compare commits

..

No commits in common. "1.0.1" and "main" have entirely different histories.
1.0.1 ... main

709 changed files with 118263 additions and 81184 deletions

27
.api-extractor.json Normal file
View File

@ -0,0 +1,27 @@
{
"$schema": "https://developer.microsoft.com/json-schemas/api-extractor/v7/api-extractor.schema.json",
"mainEntryPointFilePath": "types/lib/src/human.d.ts",
"compiler": {
"skipLibCheck": true
},
"newlineKind": "lf",
"dtsRollup": {
"enabled": true,
"untrimmedFilePath": "types/human.d.ts"
},
"docModel": { "enabled": false },
"tsdocMetadata": { "enabled": false },
"apiReport": { "enabled": false },
"messages": {
"compilerMessageReporting": {
"default": { "logLevel": "warning" }
},
"extractorMessageReporting": {
"default": { "logLevel": "warning" },
"ae-missing-release-tag": { "logLevel": "none" }
},
"tsdocMessageReporting": {
"default": { "logLevel": "warning" }
}
}
}

181
.build.json Normal file
View File

@ -0,0 +1,181 @@
{
"log": {
"enabled": true,
"debug": false,
"console": true,
"output": "test/build.log"
},
"profiles": {
"production": ["clean", "compile", "typings", "typedoc", "lint", "changelog"],
"development": ["serve", "watch", "compile"],
"serve": ["serve"],
"clean": ["clean"]
},
"clean": {
"locations": ["dist/*", "types/*", "typedoc/*"]
},
"lint": {
"locations": [ "**/*.json", "src/**/*.ts", "test/**/*.js", "demo/**/*.js", "**/*.md" ],
"rules": { }
},
"changelog": {
"log": "CHANGELOG.md"
},
"serve": {
"sslKey": "node_modules/@vladmandic/build/cert/https.key",
"sslCrt": "node_modules/@vladmandic/build/cert/https.crt",
"httpPort": 8000,
"httpsPort": 8001,
"documentRoot": ".",
"defaultFolder": "demo",
"defaultFile": "index.html"
},
"build": {
"global": {
"target": "es2018",
"sourcemap": false,
"treeShaking": true,
"ignoreAnnotations": true,
"banner": { "js": "/*\n Human\n homepage: <https://github.com/vladmandic/human>\n author: <https://github.com/vladmandic>'\n*/\n" }
},
"targets": [
{
"name": "tfjs/browser/version",
"platform": "browser",
"format": "esm",
"input": "tfjs/tf-version.ts",
"output": "dist/tfjs.version.js"
},
{
"name": "tfjs/nodejs/cpu",
"platform": "node",
"format": "cjs",
"input": "tfjs/tf-node.ts",
"output": "dist/tfjs.esm.js",
"external": ["@tensorflow"]
},
{
"name": "human/nodejs/cpu",
"platform": "node",
"format": "cjs",
"input": "src/human.ts",
"output": "dist/human.node.js",
"external": ["@tensorflow"]
},
{
"name": "tfjs/nodejs/gpu",
"platform": "node",
"format": "cjs",
"input": "tfjs/tf-node-gpu.ts",
"output": "dist/tfjs.esm.js",
"external": ["@tensorflow"]
},
{
"name": "human/nodejs/gpu",
"platform": "node",
"format": "cjs",
"input": "src/human.ts",
"output": "dist/human.node-gpu.js",
"external": ["@tensorflow"]
},
{
"name": "tfjs/nodejs/wasm",
"platform": "node",
"format": "cjs",
"input": "tfjs/tf-node-wasm.ts",
"output": "dist/tfjs.esm.js",
"minify": false,
"external": ["@tensorflow"]
},
{
"name": "human/nodejs/wasm",
"platform": "node",
"format": "cjs",
"input": "src/human.ts",
"output": "dist/human.node-wasm.js",
"external": ["@tensorflow"]
},
{
"name": "tfjs/browser/esm/nobundle",
"platform": "browser",
"format": "esm",
"input": "tfjs/tf-browser.ts",
"output": "dist/tfjs.esm.js",
"external": ["@tensorflow"]
},
{
"name": "human/browser/esm/nobundle",
"platform": "browser",
"format": "esm",
"input": "src/human.ts",
"output": "dist/human.esm-nobundle.js",
"sourcemap": false,
"external": ["@tensorflow"]
},
{
"name": "tfjs/browser/esm/bundle",
"platform": "browser",
"format": "esm",
"input": "tfjs/tf-browser.ts",
"output": "dist/tfjs.esm.js",
"sourcemap": false,
"minify": true
},
{
"name": "human/browser/iife/bundle",
"platform": "browser",
"format": "iife",
"input": "src/human.ts",
"output": "dist/human.js",
"minify": true,
"globalName": "Human",
"external": ["@tensorflow"]
},
{
"name": "human/browser/esm/bundle",
"platform": "browser",
"format": "esm",
"input": "src/human.ts",
"output": "dist/human.esm.js",
"sourcemap": true,
"minify": false,
"external": ["@tensorflow"],
"typings": "types/lib",
"typedoc": "typedoc"
},
{
"name": "demo/typescript",
"platform": "browser",
"format": "esm",
"input": "demo/typescript/index.ts",
"output": "demo/typescript/index.js",
"sourcemap": true,
"external": ["*/human.esm.js"]
},
{
"name": "demo/faceid",
"platform": "browser",
"format": "esm",
"input": "demo/faceid/index.ts",
"output": "demo/faceid/index.js",
"sourcemap": true,
"external": ["*/human.esm.js"]
},
{
"name": "demo/tracker",
"platform": "browser",
"format": "esm",
"input": "demo/tracker/index.ts",
"output": "demo/tracker/index.js",
"sourcemap": true,
"external": ["*/human.esm.js"]
}
]
},
"watch": {
"locations": [ "src/**/*", "tfjs/**/*", "demo/**/*.ts" ]
},
"typescript": {
"allowJs": false
}
}

View File

@ -1,73 +1,221 @@
{ {
"globals": {}, "globals": {
"env": {
"browser": true,
"commonjs": true,
"es6": true,
"node": true,
"jquery": true,
"es2020": true
}, },
"parser": "@typescript-eslint/parser",
"parserOptions": { "ecmaVersion": 2020 },
"plugins": [
"@typescript-eslint"
],
"extends": [
"eslint:recommended",
"plugin:import/errors",
"plugin:import/warnings",
"plugin:node/recommended",
"plugin:promise/recommended",
"plugin:json/recommended-with-comments",
"plugin:@typescript-eslint/eslint-recommended",
"plugin:@typescript-eslint/recommended",
"airbnb-base"
],
"ignorePatterns": [ "dist", "assets", "media", "models", "node_modules" ],
"rules": { "rules": {
"@typescript-eslint/ban-ts-comment": "off", "@typescript-eslint/no-require-imports":"off"
"@typescript-eslint/ban-types": "off", },
"@typescript-eslint/explicit-module-boundary-types": "off", "overrides": [
"@typescript-eslint/no-explicit-any": "off", {
"@typescript-eslint/no-var-requires": "off", "files": ["**/*.ts"],
"camelcase": "off", "parser": "@typescript-eslint/parser",
"dot-notation": "off", "parserOptions": { "ecmaVersion": "latest", "project": ["./tsconfig.json"] },
"func-names": "off", "plugins": ["@typescript-eslint"],
"guard-for-in": "off", "env": {
"import/extensions": "off", "browser": true,
"import/no-absolute-path": "off", "commonjs": false,
"import/no-extraneous-dependencies": "off", "node": false,
"import/no-unresolved": "off", "es2021": true
"import/prefer-default-export": "off", },
"lines-between-class-members": "off", "extends": [
"max-len": [1, 275, 3], "airbnb-base",
"newline-per-chained-call": "off", "eslint:recommended",
"no-async-promise-executor": "off", "plugin:@typescript-eslint/eslint-recommended",
"no-await-in-loop": "off", "plugin:@typescript-eslint/recommended",
"no-bitwise": "off", "plugin:@typescript-eslint/recommended-requiring-type-checking",
"no-case-declarations":"off", "plugin:@typescript-eslint/strict",
"no-continue": "off", "plugin:import/recommended",
"no-loop-func": "off", "plugin:promise/recommended"
"no-mixed-operators": "off", ],
"no-param-reassign":"off", "rules": {
"no-plusplus": "off", "@typescript-eslint/ban-ts-comment":"off",
"no-regex-spaces": "off", "@typescript-eslint/dot-notation":"off",
"no-restricted-globals": "off", "@typescript-eslint/no-empty-interface":"off",
"no-restricted-syntax": "off", "@typescript-eslint/no-inferrable-types":"off",
"no-return-assign": "off", "@typescript-eslint/no-misused-promises":"off",
"no-underscore-dangle": "off", "@typescript-eslint/no-unnecessary-condition":"off",
"node/no-missing-import": ["error", { "tryExtensions": [".js", ".json", ".ts"] }], "@typescript-eslint/no-unsafe-argument":"off",
"node/no-unpublished-import": "off", "@typescript-eslint/no-unsafe-assignment":"off",
"node/no-unpublished-require": "off", "@typescript-eslint/no-unsafe-call":"off",
"node/no-unsupported-features/es-syntax": "off", "@typescript-eslint/no-unsafe-member-access":"off",
"node/shebang": "off", "@typescript-eslint/no-unsafe-return":"off",
"object-curly-newline": "off", "@typescript-eslint/no-require-imports":"off",
"prefer-destructuring": "off", "@typescript-eslint/no-empty-object-type":"off",
"prefer-template":"off", "@typescript-eslint/non-nullable-type-assertion-style":"off",
"promise/always-return": "off", "@typescript-eslint/prefer-for-of":"off",
"promise/catch-or-return": "off", "@typescript-eslint/prefer-nullish-coalescing":"off",
"promise/no-nesting": "off", "@typescript-eslint/prefer-ts-expect-error":"off",
"radix": "off" "@typescript-eslint/restrict-plus-operands":"off",
} "@typescript-eslint/restrict-template-expressions":"off",
} "dot-notation":"off",
"guard-for-in":"off",
"import/extensions": ["off", "always"],
"import/no-unresolved":"off",
"import/prefer-default-export":"off",
"lines-between-class-members":"off",
"max-len": [1, 275, 3],
"no-async-promise-executor":"off",
"no-await-in-loop":"off",
"no-bitwise":"off",
"no-continue":"off",
"no-lonely-if":"off",
"no-mixed-operators":"off",
"no-param-reassign":"off",
"no-plusplus":"off",
"no-regex-spaces":"off",
"no-restricted-syntax":"off",
"no-return-assign":"off",
"no-void":"off",
"object-curly-newline":"off",
"prefer-destructuring":"off",
"prefer-template":"off",
"radix":"off"
}
},
{
"files": ["**/*.d.ts"],
"parser": "@typescript-eslint/parser",
"parserOptions": { "ecmaVersion": "latest", "project": ["./tsconfig.json"] },
"plugins": ["@typescript-eslint"],
"env": {
"browser": true,
"commonjs": false,
"node": false,
"es2021": true
},
"extends": [
"airbnb-base",
"eslint:recommended",
"plugin:@typescript-eslint/eslint-recommended",
"plugin:@typescript-eslint/recommended",
"plugin:@typescript-eslint/recommended-requiring-type-checking",
"plugin:@typescript-eslint/strict",
"plugin:import/recommended",
"plugin:promise/recommended"
],
"rules": {
"@typescript-eslint/array-type":"off",
"@typescript-eslint/ban-types":"off",
"@typescript-eslint/consistent-indexed-object-style":"off",
"@typescript-eslint/consistent-type-definitions":"off",
"@typescript-eslint/no-empty-interface":"off",
"@typescript-eslint/no-explicit-any":"off",
"@typescript-eslint/no-invalid-void-type":"off",
"@typescript-eslint/no-unnecessary-type-arguments":"off",
"@typescript-eslint/no-unnecessary-type-constraint":"off",
"comma-dangle":"off",
"indent":"off",
"lines-between-class-members":"off",
"max-classes-per-file":"off",
"max-len":"off",
"no-multiple-empty-lines":"off",
"no-shadow":"off",
"no-use-before-define":"off",
"quotes":"off",
"semi":"off"
}
},
{
"files": ["**/*.js"],
"parserOptions": { "sourceType": "module", "ecmaVersion": "latest" },
"plugins": [],
"env": {
"browser": true,
"commonjs": true,
"node": true,
"es2021": true
},
"extends": [
"airbnb-base",
"eslint:recommended",
"plugin:node/recommended",
"plugin:promise/recommended"
],
"rules": {
"dot-notation":"off",
"import/extensions": ["error", "always"],
"import/no-extraneous-dependencies":"off",
"max-len": [1, 275, 3],
"no-await-in-loop":"off",
"no-bitwise":"off",
"no-continue":"off",
"no-mixed-operators":"off",
"no-param-reassign":"off",
"no-plusplus":"off",
"no-regex-spaces":"off",
"no-restricted-syntax":"off",
"no-return-assign":"off",
"node/no-unsupported-features/es-syntax":"off",
"object-curly-newline":"off",
"prefer-destructuring":"off",
"prefer-template":"off",
"radix":"off"
}
},
{
"files": ["**/*.json"],
"parserOptions": { "ecmaVersion": "latest" },
"plugins": ["json"],
"env": {
"browser": false,
"commonjs": false,
"node": false,
"es2021": false
},
"extends": []
},
{
"files": ["**/*.html"],
"parserOptions": { "sourceType": "module", "ecmaVersion": "latest" },
"parser": "@html-eslint/parser",
"plugins": ["html", "@html-eslint"],
"env": {
"browser": true,
"commonjs": false,
"node": false,
"es2021": false
},
"extends": ["plugin:@html-eslint/recommended"],
"rules": {
"@html-eslint/element-newline":"off",
"@html-eslint/attrs-newline":"off",
"@html-eslint/indent": ["error", 2]
}
},
{
"files": ["**/*.md"],
"plugins": ["markdown"],
"processor": "markdown/markdown",
"rules": {
"no-undef":"off"
}
},
{
"files": ["**/*.md/*.js"],
"rules": {
"@typescript-eslint/no-unused-vars":"off",
"@typescript-eslint/triple-slash-reference":"off",
"import/newline-after-import":"off",
"import/no-unresolved":"off",
"no-console":"off",
"no-global-assign":"off",
"no-multi-spaces":"off",
"no-restricted-globals":"off",
"no-undef":"off",
"no-unused-vars":"off",
"node/no-missing-import":"off",
"node/no-missing-require":"off",
"promise/catch-or-return":"off"
}
}
],
"ignorePatterns": [
"node_modules",
"assets",
"dist",
"demo/helpers/*.js",
"demo/typescript/*.js",
"demo/faceid/*.js",
"demo/tracker/*.js",
"typedoc"
]
}

11
.github/FUNDING.yml vendored Normal file
View File

@ -0,0 +1,11 @@
github: [vladmandic]
patreon: # Replace with a single Patreon username
open_collective: # Replace with a single Open Collective username
ko_fi: # Replace with a single Ko-fi username
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']

View File

@ -13,11 +13,23 @@ assignees: vladmandic
**Expected Behavior** **Expected Behavior**
**Environment **Environment**
- Browser or NodeJS and version (e.g. NodeJS 14.15 or Chrome 86)
- OS and Hardware platform (e.g. Windows 10, Ubuntu Linux on x64, Android 10) - Human library version?
- Packager (if any) (e.g, webpack, rollup, parcel, esbuild, etc.) - Built-in demo or custom code?
- Type of module used (e.g. `js`, `esm`, `esm-nobundle`)?
- TensorFlow/JS version (if not using bundled module)?
- Browser or NodeJS and version (e.g. *NodeJS 14.15* or *Chrome 89*)?
- OS and Hardware platform (e.g. *Windows 10*, *Ubuntu Linux on x64*, *Android 10*)?
- Packager (if any) (e.g, *webpack*, *rollup*, *parcel*, *esbuild*, etc.)?
- Framework (if any) (e.g. *React*, *NextJS*, etc.)?
**Diagnostics**
- Check out any applicable [diagnostic steps](https://github.com/vladmandic/human/wiki/Diag)
**Additional** **Additional**
- For installation or startup issues include your `package.json` - For installation or startup issues include your `package.json`
- For usage issues, it is recommended to post your code as [gist](https://gist.github.com/) - For usage issues, it is recommended to post your code as [gist](https://gist.github.com/)
- For general questions, create a [discussion topic](https://github.com/vladmandic/human/discussions)

View File

@ -1,3 +1,3 @@
# Human Library: Pull Request Template # Pull Request Template
<br> <br>

11
.gitignore vendored
View File

@ -1,2 +1,9 @@
node_modules node_modules/
alternative types/lib
pnpm-lock.yaml
package-lock.json
*.swp
samples/**/*.mp4
samples/**/*.webm
temp
tmp

16
.hintrc Normal file
View File

@ -0,0 +1,16 @@
{
"extends": [
"web-recommended"
],
"browserslist": [
"chrome >= 90",
"edge >= 90",
"firefox >= 100",
"android >= 90",
"safari >= 15"
],
"hints": {
"no-inline-styles": "off",
"meta-charset-utf-8": "off"
}
}

View File

@ -1,6 +1,7 @@
{ {
"MD012": false, "MD012": false,
"MD013": false, "MD013": false,
"MD029": false,
"MD033": false, "MD033": false,
"MD036": false, "MD036": false,
"MD041": false "MD041": false

7
.npmignore Normal file
View File

@ -0,0 +1,7 @@
node_modules
pnpm-lock.yaml
samples
typedoc
test
wiki
types/lib

5
.npmrc Normal file
View File

@ -0,0 +1,5 @@
force=true
omit=dev
legacy-peer-deps=true
strict-peer-dependencies=false
node-options='--no-deprecation'

10
.vscode/settings.json vendored Normal file
View File

@ -0,0 +1,10 @@
{
"search.exclude": {
"dist/*": true,
"node_modules/*": true,
"types": true,
"typedoc": true,
},
"search.useGlobalIgnoreFiles": true,
"search.useParentIgnoreFiles": true
}

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,33 @@
# Human Library: Code of Conduct # Code of Conduct
Use your best judgement
If it will possibly make others uncomfortable, do not post it
- Be respectful
Disagreement is not an opportunity to attack someone else's thoughts or opinions
Although views may differ, remember to approach every situation with patience and care
- Be considerate
Think about how your contribution will affect others in the community
- Be open minded
Embrace new people and new ideas. Our community is continually evolving and we welcome positive change
Be mindful of your language
Any of the following behavior is unacceptable:
- Offensive comments of any kind
- Threats or intimidation
- Sexually explicit material
- Or any other kinds of harassment
If you believe someone is violating the code of conduct, we ask that you report it
Participants asked to stop any harassing behavior are expected to comply immediately
<br> <br>
## Usage Restrictions
`Human` library does not alow for usage in following scenarios:
- Any life-critical decisions
- Any form of surveillance without consent of the user is explicitly out of scope

View File

@ -1,3 +1,22 @@
# Human Library: Contributing Guidelines # Contributing Guidelines
<br> Pull requests from everyone are welcome
Procedure for contributing:
- Create a fork of the repository on github
In a top right corner of a GitHub, select "Fork"
Its recommended to fork latest version from main branch to avoid any possible conflicting code updates
- Clone your forked repository to your local system
`git clone https://github.com/<your-username>/<your-fork>
- Make your changes
- Test your changes against code guidelines
`npm run lint`
- Test your changes in Browser and NodeJS
`npm run dev` and naviate to https://localhost:10031
`node test/test-node.js`
- Push changes to your fork
Exclude files in `/dist', '/types', '/typedoc' from the commit as they are dynamically generated during build
- Submit a PR (pull request)
Your pull request will be reviewed and pending review results, merged into main branch

View File

@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2020 Vladimir Mandic Copyright (c) Vladimir Mandic
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

461
README.md
View File

@ -1,80 +1,144 @@
![Version](https://img.shields.io/github/package-json/v/vladmandic/human?style=flat-square) [![](https://img.shields.io/static/v1?label=Sponsor&message=%E2%9D%A4&logo=GitHub&color=%23fe8e86)](https://github.com/sponsors/vladmandic)
![Last Commit](https://img.shields.io/github/last-commit/vladmandic/human?style=flat-square) ![Git Version](https://img.shields.io/github/package-json/v/vladmandic/human?style=flat-square&svg=true&label=git)
![License](https://img.shields.io/github/license/vladmandic/human?style=flat-square) ![NPM Version](https://img.shields.io/npm/v/@vladmandic/human.png?style=flat-square)
![GitHub Status Checks](https://img.shields.io/github/checks-status/vladmandic/human/main?style=flat-square]) ![Last Commit](https://img.shields.io/github/last-commit/vladmandic/human?style=flat-square&svg=true)
![Vulnerabilities](https://img.shields.io/snyk/vulnerabilities/github/vladmandic/human?style=flat-square) ![License](https://img.shields.io/github/license/vladmandic/human?style=flat-square&svg=true)
![GitHub Status Checks](https://img.shields.io/github/checks-status/vladmandic/human/main?style=flat-square&svg=true)
# Human Library # Human Library
**3D Face Detection & Rotation Tracking, Face Embedding & Recognition,** **AI-powered 3D Face Detection & Rotation Tracking, Face Description & Recognition,**
**Body Pose Tracking, Hand & Finger Tracking,** **Body Pose Tracking, 3D Hand & Finger Tracking, Iris Analysis,**
**Iris Analysis, Age & Gender & Emotion Prediction** **Age & Gender & Emotion Prediction, Gaze Tracking, Gesture Recognition, Body Segmentation**
**& Gesture Recognition**
<br> <br>
Native JavaScript module using TensorFlow/JS Machine Learning library ## Highlights
Compatible with *Browser*, *WebWorker* and *NodeJS* execution on both Windows and Linux
- Browser/WebWorker: Compatible with *CPU*, *WebGL*, *WASM* and *WebGPU* backends - Compatible with most server-side and client-side environments and frameworks
- NodeJS: Compatible with software *tfjs-node* and CUDA accelerated backends *tfjs-node-gpu* - Combines multiple machine learning models which can be switched on-demand depending on the use-case
- Related models are executed in an attention pipeline to provide details when needed
- Optimized input pre-processing that can enhance image quality of any type of inputs
- Detection of frame changes to trigger only required models for improved performance
- Intelligent temporal interpolation to provide smooth results regardless of processing performance
- Simple unified API
- Built-in Image, Video and WebCam handling
Check out [**Live Demo**](https://vladmandic.github.io/human/demo/index.html) for processing of live WebCam video or static images [*Jump to Quick Start*](#quick-start)
<br> <br>
## Compatibility
**Browser**:
- Compatible with both desktop and mobile platforms
- Compatible with *WebGPU*, *WebGL*, *WASM*, *CPU* backends
- Compatible with *WebWorker* execution
- Compatible with *WebView*
- Primary platform: *Chromium*-based browsers
- Secondary platform: *Firefox*, *Safari*
**NodeJS**:
- Compatibile with *WASM* backend for executions on architectures where *tensorflow* binaries are not available
- Compatible with *tfjs-node* using software execution via *tensorflow* shared libraries
- Compatible with *tfjs-node* using GPU-accelerated execution via *tensorflow* shared libraries and nVidia CUDA
- Supported versions are from **14.x** to **22.x**
- NodeJS version **23.x** is not supported due to breaking changes and issues with `@tensorflow/tfjs`
<br>
## Releases
- [Release Notes](https://github.com/vladmandic/human/releases)
- [NPM Link](https://www.npmjs.com/package/@vladmandic/human)
## Demos
*Check out [**Simple Live Demo**](https://vladmandic.github.io/human/demo/typescript/index.html) fully annotated app as a good start starting point ([html](https://github.com/vladmandic/human/blob/main/demo/typescript/index.html))([code](https://github.com/vladmandic/human/blob/main/demo/typescript/index.ts))*
*Check out [**Main Live Demo**](https://vladmandic.github.io/human/demo/index.html) app for advanced processing of of webcam, video stream or images static images with all possible tunable options*
- To start video detection, simply press *Play*
- To process images, simply drag & drop in your Browser window
- Note: For optimal performance, select only models you'd like to use
- Note: If you have modern GPU, *WebGL* (default) backend is preferred, otherwise select *WASM* backend
<br>
- [**List of all Demo applications**](https://github.com/vladmandic/human/wiki/Demos)
- [**Live Examples galery**](https://vladmandic.github.io/human/samples/index.html)
### Browser Demos
*All browser demos are self-contained without any external dependencies*
- **Full** [[*Live*]](https://vladmandic.github.io/human/demo/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo): Main browser demo app that showcases all Human capabilities
- **Simple** [[*Live*]](https://vladmandic.github.io/human/demo/typescript/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/typescript): Simple demo in WebCam processing demo in TypeScript
- **Embedded** [[*Live*]](https://vladmandic.github.io/human/demo/video/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/video/index.html): Even simpler demo with tiny code embedded in HTML file
- **Face Detect** [[*Live*]](https://vladmandic.github.io/human/demo/facedetect/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facedetect): Extract faces from images and processes details
- **Face Match** [[*Live*]](https://vladmandic.github.io/human/demo/facematch/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facematch): Extract faces from images, calculates face descriptors and similarities and matches them to known database
- **Face ID** [[*Live*]](https://vladmandic.github.io/human/demo/faceid/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/faceid): Runs multiple checks to validate webcam input before performing face match to faces in IndexDB
- **Multi-thread** [[*Live*]](https://vladmandic.github.io/human/demo/multithread/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/multithread): Runs each Human module in a separate web worker for highest possible performance
- **NextJS** [[*Live*]](https://vladmandic.github.io/human-next/out/index.html) [[*Details*]](https://github.com/vladmandic/human-next): Use Human with TypeScript, NextJS and ReactJS
- **ElectronJS** [[*Details*]](https://github.com/vladmandic/human-electron): Use Human with TypeScript and ElectonJS to create standalone cross-platform apps
- **3D Analysis with BabylonJS** [[*Live*]](https://vladmandic.github.io/human-motion/src/index.html) [[*Details*]](https://github.com/vladmandic/human-motion): 3D tracking and visualization of heead, face, eye, body and hand
- **VRM Virtual Model Tracking with Three.JS** [[*Live*]](https://vladmandic.github.io/human-three-vrm/src/human-vrm.html) [[*Details*]](https://github.com/vladmandic/human-three-vrm): VR model with head, face, eye, body and hand tracking
- **VRM Virtual Model Tracking with BabylonJS** [[*Live*]](https://vladmandic.github.io/human-bjs-vrm/src/index.html) [[*Details*]](https://github.com/vladmandic/human-bjs-vrm): VR model with head, face, eye, body and hand tracking
### NodeJS Demos
*NodeJS demos may require extra dependencies which are used to decode inputs*
*See header of each demo to see its dependencies as they are not automatically installed with `Human`*
- **Main** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node.js): Process images from files, folders or URLs using native methods
- **Canvas** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-canvas.js): Process image from file or URL and draw results to a new image file using `node-canvas`
- **Video** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-video.js): Processing of video input using `ffmpeg`
- **WebCam** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-webcam.js): Processing of webcam screenshots using `fswebcam`
- **Events** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-event.js): Showcases usage of `Human` eventing to get notifications on processing
- **Similarity** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-similarity.js): Compares two input images for similarity of detected faces
- **Face Match** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facematch/node-match.js): Parallel processing of face **match** in multiple child worker threads
- **Multiple Workers** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/multithread/node-multiprocess.js): Runs multiple parallel `human` by dispaching them to pool of pre-created worker processes
- **Dynamic Load** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Loads Human dynamically with multiple different desired backends
## Project pages ## Project pages
- [**Live Demo**](https://vladmandic.github.io/human/demo/index.html)
- [**Code Repository**](https://github.com/vladmandic/human) - [**Code Repository**](https://github.com/vladmandic/human)
- [**NPM Package**](https://www.npmjs.com/package/@vladmandic/human) - [**NPM Package**](https://www.npmjs.com/package/@vladmandic/human)
- [**Issues Tracker**](https://github.com/vladmandic/human/issues) - [**Issues Tracker**](https://github.com/vladmandic/human/issues)
- [**Change Log**](https://github.com/vladmandic/human/CHANGELOG.md) - [**TypeDoc API Specification - Main class**](https://vladmandic.github.io/human/typedoc/classes/Human.html)
- [**TypeDoc API Specification - Full**](https://vladmandic.github.io/human/typedoc/)
<br> - [**Change Log**](https://github.com/vladmandic/human/blob/main/CHANGELOG.md)
- [**Current To-do List**](https://github.com/vladmandic/human/blob/main/TODO.md)
## Wiki pages ## Wiki pages
- [**Home**](https://github.com/vladmandic/human/wiki) - [**Home**](https://github.com/vladmandic/human/wiki)
- [**Demos**](https://github.com/vladmandic/human/wiki/Demos)
- [**Installation**](https://github.com/vladmandic/human/wiki/Install) - [**Installation**](https://github.com/vladmandic/human/wiki/Install)
- [**Usage & Functions**](https://github.com/vladmandic/human/wiki/Usage) - [**Usage & Functions**](https://github.com/vladmandic/human/wiki/Usage)
- [**Configuration Details**](https://github.com/vladmandic/human/wiki/Configuration) - [**Configuration Details**](https://github.com/vladmandic/human/wiki/Config)
- [**Output Details**](https://github.com/vladmandic/human/wiki/Outputs) - [**Result Details**](https://github.com/vladmandic/human/wiki/Result)
- [**Face Embedding and Recognition**](https://github.com/vladmandic/human/wiki/Embedding) - [**Customizing Draw Methods**](https://github.com/vladmandic/human/wiki/Draw)
- [**Caching & Smoothing**](https://github.com/vladmandic/human/wiki/Caching)
- [**Input Processing**](https://github.com/vladmandic/human/wiki/Image)
- [**Face Recognition & Face Description**](https://github.com/vladmandic/human/wiki/Embedding)
- [**Gesture Recognition**](https://github.com/vladmandic/human/wiki/Gesture) - [**Gesture Recognition**](https://github.com/vladmandic/human/wiki/Gesture)
- [**Common Issues**](https://github.com/vladmandic/human/wiki/Issues)
<br> - [**Background and Benchmarks**](https://github.com/vladmandic/human/wiki/Background)
## Additional notes ## Additional notes
- [**Notes on Backends**](https://github.com/vladmandic/human/wiki/Backends) - [**Comparing Backends**](https://github.com/vladmandic/human/wiki/Backends)
- [**Development Server**](https://github.com/vladmandic/human/wiki/Development-Server) - [**Development Server**](https://github.com/vladmandic/human/wiki/Development-Server)
- [**Build Process**](https://github.com/vladmandic/human/wiki/Build-Process) - [**Build Process**](https://github.com/vladmandic/human/wiki/Build-Process)
- [**Adding Custom Modules**](https://github.com/vladmandic/human/wiki/Module)
- [**Performance Notes**](https://github.com/vladmandic/human/wiki/Performance) - [**Performance Notes**](https://github.com/vladmandic/human/wiki/Performance)
- [**Performance Profiling**](https://github.com/vladmandic/human/wiki/Profiling) - [**Performance Profiling**](https://github.com/vladmandic/human/wiki/Profiling)
- [**Platform Support**](https://github.com/vladmandic/human/wiki/Platforms) - [**Platform Support**](https://github.com/vladmandic/human/wiki/Platforms)
- [**Diagnostic and Performance trace information**](https://github.com/vladmandic/human/wiki/Diag)
- [**Dockerize Human applications**](https://github.com/vladmandic/human/wiki/Docker)
- [**List of Models & Credits**](https://github.com/vladmandic/human/wiki/Models) - [**List of Models & Credits**](https://github.com/vladmandic/human/wiki/Models)
- [**Models Download Repository**](https://github.com/vladmandic/human-models)
<br> - [**Security & Privacy Policy**](https://github.com/vladmandic/human/blob/main/SECURITY.md)
- [**License & Usage Restrictions**](https://github.com/vladmandic/human/blob/main/LICENSE)
## Default models
Default models in Human library are:
- **Face Detection**: MediaPipe BlazeFace-Back
- **Face Mesh**: MediaPipe FaceMesh
- **Face Iris Analysis**: MediaPipe Iris
- **Emotion Detection**: Oarriaga Emotion
- **Gender Detection**: Oarriaga Gender
- **Age Detection**: SSR-Net Age IMDB
- **Body Analysis**: PoseNet
- **Face Embedding**: Sirius-AI MobileFaceNet Embedding
Note that alternative models are provided and can be enabled via configuration
For example, `PoseNet` model can be switched for `BlazePose` model depending on the use case
For more info, see [**Configuration Details**](https://github.com/vladmandic/human/wiki/Configuration) and [**List of Models**](https://github.com/vladmandic/human/wiki/Models)
<br> <br>
@ -82,48 +146,111 @@ For more info, see [**Configuration Details**](https://github.com/vladmandic/hum
*Suggestions are welcome!* *Suggestions are welcome!*
<br><hr><br> <hr><br>
## Options ## App Examples
As presented in the demo application... Visit [Examples gallery](https://vladmandic.github.io/human/samples/index.html) for more examples
[<img src="assets/samples.jpg" width="640"/>](assets/samples.jpg)
![Options visible in demo](assets/screenshot-menu.png)
<br><hr><br>
## Examples
<br> <br>
**Training image:** ## Options
![Example Training Image](assets/screenshot-sample.png) All options as presented in the demo application...
[demo/index.html](demo/index.html)
[<img src="assets/screenshot-menu.png"/>](assets/screenshot-menu.png)
**Using static images:** <br>
![Example Using Image](assets/screenshot-images.jpg) **Results Browser:**
[ *Demo -> Display -> Show Results* ]<br>
[<img src="assets/screenshot-results.png"/>](assets/screenshot-results.png)
**Live WebCam view:** <br>
![Example Using WebCam](assets/screenshot-webcam.jpg) ## Advanced Examples
1. **Face Similarity Matching:**
Extracts all faces from provided input images,
sorts them by similarity to selected face
and optionally matches detected face with database of known people to guess their names
> [demo/facematch](demo/facematch/index.html)
[<img src="assets/screenshot-facematch.jpg" width="640"/>](assets/screenshot-facematch.jpg)
2. **Face Detect:**
Extracts all detect faces from loaded images on-demand and highlights face details on a selected face
> [demo/facedetect](demo/facedetect/index.html)
[<img src="assets/screenshot-facedetect.jpg" width="640"/>](assets/screenshot-facedetect.jpg)
3. **Face ID:**
Performs validation check on a webcam input to detect a real face and matches it to known faces stored in database
> [demo/faceid](demo/faceid/index.html)
[<img src="assets/screenshot-faceid.jpg" width="640"/>](assets/screenshot-faceid.jpg)
<br>
4. **3D Rendering:**
> [human-motion](https://github.com/vladmandic/human-motion)
[<img src="https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-face.jpg" width="640"/>](https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-face.jpg)
[<img src="https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-body.jpg" width="640"/>](https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-body.jpg)
[<img src="https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-hand.jpg" width="640"/>](https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-hand.jpg)
<br>
5. **VR Model Tracking:**
> [human-three-vrm](https://github.com/vladmandic/human-three-vrm)
> [human-bjs-vrm](https://github.com/vladmandic/human-bjs-vrm)
[<img src="https://github.com/vladmandic/human-three-vrm/raw/main/assets/human-vrm-screenshot.jpg" width="640"/>](https://github.com/vladmandic/human-three-vrm/raw/main/assets/human-vrm-screenshot.jpg)
6. **Human as OS native application:**
> [human-electron](https://github.com/vladmandic/human-electron)
<br>
**468-Point Face Mesh Defails:**
(view in full resolution to see keypoints)
[<img src="assets/facemesh.png" width="400"/>](assets/facemesh.png)
<br><hr><br> <br><hr><br>
Example simple app that uses Human to process video input and ## Quick Start
Simply load `Human` (*IIFE version*) directly from a cloud CDN in your HTML file:
(pick one: `jsdelirv`, `unpkg` or `cdnjs`)
```html
<!DOCTYPE HTML>
<script src="https://cdn.jsdelivr.net/npm/@vladmandic/human/dist/human.js"></script>
<script src="https://unpkg.dev/@vladmandic/human/dist/human.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/human/3.0.0/human.js"></script>
```
For details, including how to use `Browser ESM` version or `NodeJS` version of `Human`, see [**Installation**](https://github.com/vladmandic/human/wiki/Install)
<br>
## Code Examples
Simple app that uses Human to process video input and
draw output on screen using internal draw helper functions draw output on screen using internal draw helper functions
```js ```js
import Human from '@vladmandic/human';
// create instance of human with simple configuration using default values // create instance of human with simple configuration using default values
const config = { backend: 'webgl' }; const config = { backend: 'webgl' };
const human = new Human(config); const human = new Human.Human(config);
// select input HTMLVideoElement and output HTMLCanvasElement from page
const inputVideo = document.getElementById('video-id');
const outputCanvas = document.getElementById('canvas-id');
function detectVideo() { function detectVideo() {
// select input HTMLVideoElement and output HTMLCanvasElement from page
const inputVideo = document.getElementById('video-id');
const outputCanvas = document.getElementById('canvas-id');
// perform processing using default configuration // perform processing using default configuration
human.detect(inputVideo).then((result) => { human.detect(inputVideo).then((result) => {
// result object will contain detected details // result object will contain detected details
@ -135,16 +262,208 @@ function detectVideo() {
human.draw.body(outputCanvas, result.body); human.draw.body(outputCanvas, result.body);
human.draw.hand(outputCanvas, result.hand); human.draw.hand(outputCanvas, result.hand);
human.draw.gesture(outputCanvas, result.gesture); human.draw.gesture(outputCanvas, result.gesture);
// loop immediate to next frame // and loop immediate to the next frame
requestAnimationFrame(detectVideo); requestAnimationFrame(detectVideo);
return result;
}); });
} }
detectVideo(); detectVideo();
``` ```
or using `async/await`:
```js
// create instance of human with simple configuration using default values
const config = { backend: 'webgl' };
const human = new Human(config); // create instance of Human
const inputVideo = document.getElementById('video-id');
const outputCanvas = document.getElementById('canvas-id');
async function detectVideo() {
const result = await human.detect(inputVideo); // run detection
human.draw.all(outputCanvas, result); // draw all results
requestAnimationFrame(detectVideo); // run loop
}
detectVideo(); // start loop
```
or using `Events`:
```js
// create instance of human with simple configuration using default values
const config = { backend: 'webgl' };
const human = new Human(config); // create instance of Human
const inputVideo = document.getElementById('video-id');
const outputCanvas = document.getElementById('canvas-id');
human.events.addEventListener('detect', () => { // event gets triggered when detect is complete
human.draw.all(outputCanvas, human.result); // draw all results
});
function detectVideo() {
human.detect(inputVideo) // run detection
.then(() => requestAnimationFrame(detectVideo)); // upon detect complete start processing of the next frame
}
detectVideo(); // start loop
```
or using interpolated results for smooth video processing by separating detection and drawing loops:
```js
const human = new Human(); // create instance of Human
const inputVideo = document.getElementById('video-id');
const outputCanvas = document.getElementById('canvas-id');
let result;
async function detectVideo() {
result = await human.detect(inputVideo); // run detection
requestAnimationFrame(detectVideo); // run detect loop
}
async function drawVideo() {
if (result) { // check if result is available
const interpolated = human.next(result); // get smoothened result using last-known results
human.draw.all(outputCanvas, interpolated); // draw the frame
}
requestAnimationFrame(drawVideo); // run draw loop
}
detectVideo(); // start detection loop
drawVideo(); // start draw loop
```
or same, but using built-in full video processing instead of running manual frame-by-frame loop:
```js
const human = new Human(); // create instance of Human
const inputVideo = document.getElementById('video-id');
const outputCanvas = document.getElementById('canvas-id');
async function drawResults() {
const interpolated = human.next(); // get smoothened result using last-known results
human.draw.all(outputCanvas, interpolated); // draw the frame
requestAnimationFrame(drawResults); // run draw loop
}
human.video(inputVideo); // start detection loop which continously updates results
drawResults(); // start draw loop
```
or using built-in webcam helper methods that take care of video handling completely:
```js
const human = new Human(); // create instance of Human
const outputCanvas = document.getElementById('canvas-id');
async function drawResults() {
const interpolated = human.next(); // get smoothened result using last-known results
human.draw.canvas(outputCanvas, human.webcam.element); // draw current webcam frame
human.draw.all(outputCanvas, interpolated); // draw the frame detectgion results
requestAnimationFrame(drawResults); // run draw loop
}
await human.webcam.start({ crop: true });
human.video(human.webcam.element); // start detection loop which continously updates results
drawResults(); // start draw loop
```
And for even better results, you can run detection in a separate web worker thread
<br><hr><br>
## Inputs
`Human` library can process all known input types:
- `Image`, `ImageData`, `ImageBitmap`, `Canvas`, `OffscreenCanvas`, `Tensor`,
- `HTMLImageElement`, `HTMLCanvasElement`, `HTMLVideoElement`, `HTMLMediaElement`
Additionally, `HTMLVideoElement`, `HTMLMediaElement` can be a standard `<video>` tag that links to:
- WebCam on user's system
- Any supported video type
e.g. `.mp4`, `.avi`, etc.
- Additional video types supported via *HTML5 Media Source Extensions*
e.g.: **HLS** (*HTTP Live Streaming*) using `hls.js` or **DASH** (*Dynamic Adaptive Streaming over HTTP*) using `dash.js`
- **WebRTC** media track using built-in support
<br><hr><br>
## Detailed Usage
- [**Wiki Home**](https://github.com/vladmandic/human/wiki)
- [**List of all available methods, properies and namespaces**](https://github.com/vladmandic/human/wiki/Usage)
- [**TypeDoc API Specification - Main class**](https://vladmandic.github.io/human/typedoc/classes/Human.html)
- [**TypeDoc API Specification - Full**](https://vladmandic.github.io/human/typedoc/)
![typedoc](assets/screenshot-typedoc.png)
<br><hr><br>
## TypeDefs
`Human` is written using TypeScript strong typing and ships with full **TypeDefs** for all classes defined by the library bundled in `types/human.d.ts` and enabled by default
*Note*: This does not include embedded `tfjs`
If you want to use embedded `tfjs` inside `Human` (`human.tf` namespace) and still full **typedefs**, add this code:
> import type * as tfjs from '@vladmandic/human/dist/tfjs.esm';
> const tf = human.tf as typeof tfjs;
This is not enabled by default as `Human` does not ship with full **TFJS TypeDefs** due to size considerations
Enabling `tfjs` TypeDefs as above creates additional project (dev-only as only types are required) dependencies as defined in `@vladmandic/human/dist/tfjs.esm.d.ts`:
> @tensorflow/tfjs-core, @tensorflow/tfjs-converter, @tensorflow/tfjs-backend-wasm, @tensorflow/tfjs-backend-webgl
<br><hr><br>
## Default models
Default models in Human library are:
- **Face Detection**: *MediaPipe BlazeFace Back variation*
- **Face Mesh**: *MediaPipe FaceMesh*
- **Face Iris Analysis**: *MediaPipe Iris*
- **Face Description**: *HSE FaceRes*
- **Emotion Detection**: *Oarriaga Emotion*
- **Body Analysis**: *MoveNet Lightning variation*
- **Hand Analysis**: *HandTrack & MediaPipe HandLandmarks*
- **Body Segmentation**: *Google Selfie*
- **Object Detection**: *CenterNet with MobileNet v3*
Note that alternative models are provided and can be enabled via configuration
For example, body pose detection by default uses *MoveNet Lightning*, but can be switched to *MultiNet Thunder* for higher precision or *Multinet MultiPose* for multi-person detection or even *PoseNet*, *BlazePose* or *EfficientPose* depending on the use case
For more info, see [**Configuration Details**](https://github.com/vladmandic/human/wiki/Configuration) and [**List of Models**](https://github.com/vladmandic/human/wiki/Models)
<br><hr><br>
## Diagnostics
- [How to get diagnostic information or performance trace information](https://github.com/vladmandic/human/wiki/Diag)
<br><hr><br>
`Human` library is written in [TypeScript](https://www.typescriptlang.org/docs/handbook/intro.html) **5.1** using [TensorFlow/JS](https://www.tensorflow.org/js/) **4.10** and conforming to latest `JavaScript` [ECMAScript version 2022](https://262.ecma-international.org/) standard
Build target for distributables is `JavaScript` [EMCAScript version 2018](https://262.ecma-international.org/9.0/)
<br> <br>
![Downloads](https://img.shields.io/npm/dm/@vladmandic/human?style=flat-square) For details see [**Wiki Pages**](https://github.com/vladmandic/human/wiki)
![Stars](https://img.shields.io/github/stars/vladmandic/human?style=flat-square) and [**API Specification**](https://vladmandic.github.io/human/typedoc/classes/Human.html)
![Code Size](https://img.shields.io/github/languages/code-size/vladmandic/human?style=flat-square)
<br>
[![](https://img.shields.io/static/v1?label=Sponsor&message=%E2%9D%A4&logo=GitHub&color=%23fe8e86)](https://github.com/sponsors/vladmandic)
![Stars](https://img.shields.io/github/stars/vladmandic/human?style=flat-square&svg=true)
![Forks](https://badgen.net/github/forks/vladmandic/human)
![Code Size](https://img.shields.io/github/languages/code-size/vladmandic/human?style=flat-square&svg=true)
![CDN](https://data.jsdelivr.com/v1/package/npm/@vladmandic/human/badge)<br>
![Downloads](https://img.shields.io/npm/dw/@vladmandic/human.png?style=flat-square)
![Downloads](https://img.shields.io/npm/dm/@vladmandic/human.png?style=flat-square)
![Downloads](https://img.shields.io/npm/dy/@vladmandic/human.png?style=flat-square)

View File

@ -1,3 +1,32 @@
# Human Library: Security Policy # Security & Privacy Policy
<br> <br>
## Issues
All issues are tracked publicly on GitHub: <https://github.com/vladmandic/human/issues>
<br>
## Vulnerabilities
`Human` library code base and indluded dependencies are automatically scanned against known security vulnerabilities
Any code commit is validated before merge
- [Dependencies](https://github.com/vladmandic/human/security/dependabot)
- [Scanning Alerts](https://github.com/vladmandic/human/security/code-scanning)
<br>
## Privacy
`Human` library and included demo apps:
- Are fully self-contained and does not send or share data of any kind with external targets
- Do not store any user or system data tracking, user provided inputs (images, video) or detection results
- Do not utilize any analytic services (such as Google Analytics)
`Human` library can establish external connections *only* for following purposes and *only* when explicitly configured by user:
- Load models from externally hosted site (e.g. CDN)
- Load inputs for detection from *http & https* sources

41
TODO.md
View File

@ -1,7 +1,38 @@
# To-Do list for Human library # To-Do list for Human library
- Fix BlazeFace NodeJS missing ops ## Work-in-Progress
- Prune pre-packaged models
- Build Face embedding database <hr><br>
- Dynamic sample processing
- Optimize for v1 release ## Known Issues & Limitations
### Face with Attention
`FaceMesh-Attention` is not supported when using `WASM` backend due to missing kernel op in **TFJS**
No issues with default model `FaceMesh`
### Object Detection
`NanoDet` model is not supported when using `WASM` backend due to missing kernel op in **TFJS**
No issues with default model `MB3-CenterNet`
## Body Detection using MoveNet-MultiPose
Model does not return valid detection scores (all other functionality is not impacted)
### Firefox
Running in **web workers** requires `OffscreenCanvas` which is still disabled by default in **Firefox**
Enable via `about:config` -> `gfx.offscreencanvas.enabled`
[Details](https://developer.mozilla.org/en-US/docs/Web/API/OffscreenCanvas#browser_compatibility)
### Safari
No support for running in **web workers** as Safari still does not support `OffscreenCanvas`
[Details](https://developer.mozilla.org/en-US/docs/Web/API/OffscreenCanvas#browser_compatibility)
## React-Native
`Human` support for **React-Native** is best-effort, but not part of the main development focus
<hr><br>

4
assets/README.md Normal file
View File

@ -0,0 +1,4 @@
# Human Library: Static Assets
Static assets used by `Human` library demos and/or referenced by Wiki pages

Binary file not shown.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.1 MiB

After

Width:  |  Height:  |  Size: 595 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 121 KiB

After

Width:  |  Height:  |  Size: 139 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 152 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 141 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 178 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 216 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 206 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 162 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 295 KiB

BIN
assets/samples.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 261 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 47 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 321 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 434 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 58 KiB

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 113 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

BIN
assets/screenshot-vrm.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 177 KiB

Binary file not shown.

Binary file not shown.

153
build.js Normal file
View File

@ -0,0 +1,153 @@
const fs = require('fs');
const path = require('path');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
const Build = require('@vladmandic/build').Build; // eslint-disable-line node/no-unpublished-require
const APIExtractor = require('@microsoft/api-extractor'); // eslint-disable-line node/no-unpublished-require
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
const packageJSON = require('./package.json');
const logFile = 'test/build.log';
const modelsOut = 'models/models.json';
const modelsFolders = [
'./models',
'../human-models/models',
'../blazepose/model/',
'../anti-spoofing/model',
'../efficientpose/models',
'../insightface/models',
'../movenet/models',
'../nanodet/models',
];
const apiExtractorIgnoreList = [ // eslint-disable-line no-unused-vars
'ae-missing-release-tag',
'tsdoc-param-tag-missing-hyphen',
'tsdoc-escape-right-brace',
'tsdoc-undefined-tag',
'tsdoc-escape-greater-than',
'ae-unresolved-link',
'ae-forgotten-export',
'tsdoc-malformed-inline-tag',
'tsdoc-unnecessary-backslash',
];
const regEx = [
{ search: 'types="@webgpu/types/dist"', replace: 'path="../src/types/webgpu.d.ts"' },
{ search: 'types="offscreencanvas"', replace: 'path="../src/types/offscreencanvas.d.ts"' },
];
function copyFile(src, dst) {
if (!fs.existsSync(src)) {
log.warn('Copy:', { input: src, output: dst });
return;
}
log.state('Copy:', { input: src, output: dst });
const buffer = fs.readFileSync(src);
fs.writeFileSync(dst, buffer);
}
function writeFile(str, dst) {
log.state('Write:', { output: dst });
fs.writeFileSync(dst, str);
}
function regExFile(src, entries) {
if (!fs.existsSync(src)) {
log.warn('Filter:', { src });
return;
}
log.state('Filter:', { input: src });
for (const entry of entries) {
const buffer = fs.readFileSync(src, 'UTF-8');
const lines = buffer.split(/\r?\n/);
const out = [];
for (const line of lines) {
if (line.includes(entry.search)) out.push(line.replace(entry.search, entry.replace));
else out.push(line);
}
fs.writeFileSync(src, out.join('\n'));
}
}
async function analyzeModels() {
log.info('Analyze models:', { folders: modelsFolders.length, result: modelsOut });
let totalSize = 0;
const models = {};
const allModels = [];
for (const folder of modelsFolders) {
try {
if (!fs.existsSync(folder)) continue;
const stat = fs.statSync(folder);
if (!stat.isDirectory) continue;
const dir = fs.readdirSync(folder);
const found = dir.map((f) => `file://${folder}/${f}`).filter((f) => f.endsWith('json'));
log.state('Models', { folder, models: found.length });
allModels.push(...found);
} catch {
// log.warn('Cannot enumerate:', modelFolder);
}
}
for (const url of allModels) {
// if (!f.endsWith('.json')) continue;
// const url = `file://${modelsDir}/${f}`;
const model = new tf.GraphModel(url); // create model prototype and decide if load from cache or from original modelurl
model.findIOHandler();
const artifacts = await model.handler.load();
const size = artifacts?.weightData?.byteLength || 0;
totalSize += size;
const name = path.basename(url).replace('.json', '');
if (!models[name]) models[name] = size;
}
const json = JSON.stringify(models, null, 2);
fs.writeFileSync(modelsOut, json);
log.state('Models:', { count: Object.keys(models).length, totalSize });
}
async function main() {
log.logFile(logFile);
log.data('Build', { name: packageJSON.name, version: packageJSON.version });
// run production build
const build = new Build();
await build.run('production');
// patch tfjs typedefs
copyFile('node_modules/@vladmandic/tfjs/types/tfjs-core.d.ts', 'types/tfjs-core.d.ts');
copyFile('node_modules/@vladmandic/tfjs/types/tfjs.d.ts', 'types/tfjs.esm.d.ts');
copyFile('src/types/tsconfig.json', 'types/tsconfig.json');
copyFile('src/types/eslint.json', 'types/.eslintrc.json');
copyFile('src/types/tfjs.esm.d.ts', 'dist/tfjs.esm.d.ts');
regExFile('types/tfjs-core.d.ts', regEx);
// run api-extractor to create typedef rollup
const extractorConfig = APIExtractor.ExtractorConfig.loadFileAndPrepare('.api-extractor.json');
try {
const extractorResult = APIExtractor.Extractor.invoke(extractorConfig, {
localBuild: true,
showVerboseMessages: false,
messageCallback: (msg) => {
msg.handled = true;
if (msg.logLevel === 'none' || msg.logLevel === 'verbose' || msg.logLevel === 'info') return;
if (msg.sourceFilePath?.includes('/node_modules/')) return;
// if (apiExtractorIgnoreList.reduce((prev, curr) => prev || msg.messageId.includes(curr), false)) return; // those are external issues outside of human control
log.data('API', { level: msg.logLevel, category: msg.category, id: msg.messageId, file: msg.sourceFilePath, line: msg.sourceFileLine, text: msg.text });
},
});
log.state('API-Extractor:', { succeeeded: extractorResult.succeeded, errors: extractorResult.errorCount, warnings: extractorResult.warningCount });
} catch (err) {
log.error('API-Extractor:', err);
}
regExFile('types/human.d.ts', regEx);
writeFile('export * from \'../types/human\';', 'dist/human.esm-nobundle.d.ts');
writeFile('export * from \'../types/human\';', 'dist/human.esm.d.ts');
writeFile('export * from \'../types/human\';', 'dist/human.d.ts');
writeFile('export * from \'../types/human\';', 'dist/human.node-gpu.d.ts');
writeFile('export * from \'../types/human\';', 'dist/human.node.d.ts');
writeFile('export * from \'../types/human\';', 'dist/human.node-wasm.d.ts');
// generate model signature
await analyzeModels();
log.info('Human Build complete...', { logFile });
}
main();

179
config.js
View File

@ -1,179 +0,0 @@
/* eslint-disable indent */
/* eslint-disable no-multi-spaces */
export default {
backend: 'webgl', // select tfjs backend to use
// can be 'webgl', 'wasm', 'cpu', or 'humangl' which is a custom version of webgl
// leave as empty string to continue using default backend
// when backend is set outside of Human library
wasmPath: '../assets/', // path for wasm binaries
// only used for backend: wasm
debug: true, // print additional status messages to console
async: true, // execute enabled models in parallel
// this disables per-model performance data but
// slightly increases performance
// cannot be used if profiling is enabled
profile: false, // enable tfjs profiling
// this has significant performance impact
// only enable for debugging purposes
// currently only implemented for age,gender,emotion models
deallocate: false, // aggresively deallocate gpu memory after each usage
// only valid for webgl backend and only during first call
// cannot be changed unless library is reloaded
// this has significant performance impact
// only enable on low-memory devices
scoped: false, // enable scoped runs
// some models *may* have memory leaks,
// this wrapps everything in a local scope at a cost of performance
// typically not needed
videoOptimized: true, // perform additional optimizations when input is video,
// must be disabled for images
// basically this skips object box boundary detection for every n frames
// while maintaining in-box detection since objects cannot move that fast
warmup: 'face', // what to use for human.warmup(), can be 'none', 'face', 'full'
// warmup pre-initializes all models for faster inference but can take
// significant time on startup
filter: {
enabled: true, // enable image pre-processing filters
width: 0, // resize input width
height: 0, // resize input height
// if both width and height are set to 0, there is no resizing
// if just one is set, second one is scaled automatically
// if both are set, values are used as-is
return: true, // return processed canvas imagedata in result
brightness: 0, // range: -1 (darken) to 1 (lighten)
contrast: 0, // range: -1 (reduce contrast) to 1 (increase contrast)
sharpness: 0, // range: 0 (no sharpening) to 1 (maximum sharpening)
blur: 0, // range: 0 (no blur) to N (blur radius in pixels)
saturation: 0, // range: -1 (reduce saturation) to 1 (increase saturation)
hue: 0, // range: 0 (no change) to 360 (hue rotation in degrees)
negative: false, // image negative
sepia: false, // image sepia colors
vintage: false, // image vintage colors
kodachrome: false, // image kodachrome colors
technicolor: false, // image technicolor colors
polaroid: false, // image polaroid camera effect
pixelate: 0, // range: 0 (no pixelate) to N (number of pixels to pixelate)
},
gesture: {
enabled: true, // enable simple gesture recognition
},
face: {
enabled: true, // controls if specified modul is enabled
// face.enabled is required for all face models:
// detector, mesh, iris, age, gender, emotion
// (note: module is not loaded until it is required)
detector: {
modelPath: '../models/blazeface-back.json', // can be 'blazeface-front', 'blazeface-back' or 'faceboxes'
// 'blazeface-front' is blazeface model optimized for large faces such as front-facing camera
// 'blazeface-back' is blazeface model optimized for smaller and/or distanct faces
// 'faceboxes' is alternative model to 'blazeface'
inputSize: 256, // fixed value: 128 for front and 256 for 'back'
rotation: true, // use best-guess rotated face image or just box with rotation as-is
// false means higher performance, but incorrect mesh mapping if face angle is above 20 degrees
maxFaces: 10, // maximum number of faces detected in the input
// should be set to the minimum number for performance
skipFrames: 21, // how many frames to go without re-running the face bounding box detector
// only used for video inputs
// e.g., if model is running st 25 FPS, we can re-use existing bounding
// box for updated face analysis as the head probably hasn't moved much
// in short time (10 * 1/25 = 0.25 sec)
skipInitial: false, // if previous detection resulted in no faces detected,
// should skipFrames be reset immediately
minConfidence: 0.1, // threshold for discarding a prediction
iouThreshold: 0.1, // threshold for deciding whether boxes overlap too much in
// non-maximum suppression (0.1 means drop if overlap 10%)
scoreThreshold: 0.1, // threshold for deciding when to remove boxes based on score
// in non-maximum suppression,
// this is applied on detection objects only and before minConfidence
},
mesh: {
enabled: true,
modelPath: '../models/facemesh.json',
inputSize: 192, // fixed value
},
iris: {
enabled: true,
modelPath: '../models/iris.json',
inputSize: 64, // fixed value
},
age: {
enabled: true,
modelPath: '../models/age-ssrnet-imdb.json',
inputSize: 64, // fixed value
skipFrames: 31, // how many frames to go without re-running the detector
// only used for video inputs
},
gender: {
enabled: true,
minConfidence: 0.1, // threshold for discarding a prediction
modelPath: '../models/gender.json', // can be 'gender' or 'gender-ssrnet-imdb'
inputSize: 64, // fixed value
skipFrames: 32, // how many frames to go without re-running the detector
// only used for video inputs
},
emotion: {
enabled: true,
inputSize: 64, // fixed value
minConfidence: 0.1, // threshold for discarding a prediction
skipFrames: 33, // how many frames to go without re-running the detector
modelPath: '../models/emotion.json',
},
embedding: {
enabled: false,
inputSize: 112, // fixed value
modelPath: '../models/mobilefacenet.json',
},
},
body: {
enabled: true,
modelPath: '../models/posenet.json', // can be 'posenet', 'blazepose' or 'blazepose-upper'
inputSize: 257, // fixed value, 257 for posenet and 256 for blazepose
maxDetections: 10, // maximum number of people detected in the input
// should be set to the minimum number for performance
// only valid for posenet as blazepose only detects single pose
scoreThreshold: 0.3, // threshold for deciding when to remove boxes based on score
// in non-maximum suppression
// only valid for posenet as blazepose only detects single pose
nmsRadius: 20, // radius for deciding points are too close in non-maximum suppression
// only valid for posenet as blazepose only detects single pose
modelType: 'posenet-mobilenet', // can be 'posenet-mobilenet', 'posenet-resnet', 'blazepose'
},
hand: {
enabled: true,
rotation: false, // use best-guess rotated hand image or just box with rotation as-is
// false means higher performance, but incorrect finger mapping if hand is inverted
inputSize: 256, // fixed value
skipFrames: 12, // how many frames to go without re-running the hand bounding box detector
// only used for video inputs
// e.g., if model is running st 25 FPS, we can re-use existing bounding
// box for updated hand skeleton analysis as the hand probably
// hasn't moved much in short time (10 * 1/25 = 0.25 sec)
skipInitial: false, // if previous detection resulted in no faces detected,
// should skipFrames be reset immediately
minConfidence: 0.1, // threshold for discarding a prediction
iouThreshold: 0.1, // threshold for deciding whether boxes overlap too much
// in non-maximum suppression
scoreThreshold: 0.5, // threshold for deciding when to remove boxes based on
// score in non-maximum suppression
maxHands: 1, // maximum number of hands detected in the input
// should be set to the minimum number for performance
landmarks: true, // detect hand landmarks or just hand boundary box
detector: {
modelPath: '../models/handdetect.json',
},
skeleton: {
modelPath: '../models/handskeleton.json',
},
},
};

67
demo/README.md Normal file
View File

@ -0,0 +1,67 @@
# Human Library: Demos
For details on other demos see Wiki: [**Demos**](https://github.com/vladmandic/human/wiki/Demos)
## Main Demo
`index.html`: Full demo using `Human` ESM module running in Browsers,
Includes:
- Selectable inputs:
- Sample images
- Image via drag & drop
- Image via URL param
- WebCam input
- Video stream
- WebRTC stream
- Selectable active `Human` modules
- With interactive module params
- Interactive `Human` image filters
- Selectable interactive `results` browser
- Selectable `backend`
- Multiple execution methods:
- Sync vs Async
- in main thread or web worker
- live on git pages, on user-hosted web server or via included [**micro http2 server**](https://github.com/vladmandic/human/wiki/Development-Server)
### Demo Options
- General `Human` library options
in `index.js:userConfig`
- General `Human` `draw` options
in `index.js:drawOptions`
- Demo PWA options
in `index.js:pwa`
- Demo specific options
in `index.js:ui`
```js
const ui = {
console: true, // log messages to browser console
useWorker: true, // use web workers for processing
buffered: true, // should output be buffered between frames
interpolated: true, // should output be interpolated for smoothness between frames
results: false, // show results tree
useWebRTC: false, // use webrtc as camera source instead of local webcam
};
```
Demo implements several ways to use `Human` library,
### URL Params
Demo app can use URL parameters to override configuration values
For example:
- Force using `WASM` as backend: <https://vladmandic.github.io/human/demo/index.html?backend=wasm>
- Enable `WebWorkers`: <https://vladmandic.github.io/human/demo/index.html?worker=true>
- Skip pre-loading and warming up: <https://vladmandic.github.io/human/demo/index.html?preload=false&warmup=false>
### WebRTC
Note that WebRTC connection requires a WebRTC server that provides a compatible media track such as H.264 video track
For such a WebRTC server implementation see <https://github.com/vladmandic/stream-rtsp> project
that implements a connection to IP Security camera using RTSP protocol and transcodes it to WebRTC
ready to be consumed by a client such as `Human`

View File

@ -1,554 +0,0 @@
import Human from '../dist/human.esm.js'; // equivalent of @vladmandic/human
import Menu from './menu.js';
import GLBench from './gl-bench.js';
const userConfig = { backend: 'webgl' }; // add any user configuration overrides
/*
const userConfig = {
backend: 'wasm',
async: false,
warmup: 'face',
videoOptimized: false,
face: { enabled: true, mesh: { enabled: false }, iris: { enabled: false }, age: { enabled: false }, gender: { enabled: false }, emotion: { enabled: false }, embedding: { enabled: false } },
hand: { enabled: false },
gesture: { enabled: false },
body: { enabled: false, modelType: 'blazepose', modelPath: '../models/blazepose.json' },
};
*/
const human = new Human(userConfig);
// ui options
const ui = {
baseBackground: 'rgba(50, 50, 50, 1)', // 'grey'
crop: true, // video mode crop to size or leave full frame
columns: 2, // when processing sample images create this many columns
facing: true, // camera facing front or back
useWorker: false, // use web workers for processing
worker: 'worker.js',
samples: ['../assets/sample6.jpg', '../assets/sample1.jpg', '../assets/sample4.jpg', '../assets/sample5.jpg', '../assets/sample3.jpg', '../assets/sample2.jpg'],
compare: '../assets/sample-me.jpg',
console: true, // log messages to browser console
maxFPSframes: 10, // keep fps history for how many frames
modelsPreload: true, // preload human models on startup
busy: false, // internal camera busy flag
menuWidth: 0, // internal
menuHeight: 0, // internal
camera: {}, // internal, holds details of webcam details
detectFPS: [], // internal, holds fps values for detection performance
drawFPS: [], // internal, holds fps values for draw performance
buffered: false, // experimental, should output be buffered between frames
drawWarmup: false, // debug only, should warmup image processing be displayed on startup
drawThread: null, // internl, perform draw operations in a separate thread
detectThread: null, // internl, perform detect operations in a separate thread
framesDraw: 0, // internal, statistics on frames drawn
framesDetect: 0, // internal, statistics on frames detected
bench: true, // show gl fps benchmark window
lastFrame: 0, // time of last frame processing
};
// global variables
const menu = {};
let worker;
let bench;
let lastDetectedResult = {};
// helper function: translates json to human readable string
function str(...msg) {
if (!Array.isArray(msg)) return msg;
let line = '';
for (const entry of msg) {
if (typeof entry === 'object') line += JSON.stringify(entry).replace(/{|}|"|\[|\]/g, '').replace(/,/g, ', ');
else line += entry;
}
return line;
}
// helper function: wrapper around console output
function log(...msg) {
const dt = new Date();
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
// eslint-disable-next-line no-console
if (ui.console) console.log(ts, ...msg);
}
function status(msg) {
// eslint-disable-next-line no-console
const div = document.getElementById('status');
if (div) div.innerText = msg;
}
let original;
async function calcSimmilariry(result) {
document.getElementById('compare-container').style.display = human.config.face.embedding.enabled ? 'block' : 'none';
if (!human.config.face.embedding.enabled) return;
if ((result?.face?.length > 0) && (result?.face[0].embedding?.length !== 192)) return;
if (!original) {
original = result;
document.getElementById('compare-canvas').getContext('2d').drawImage(original.canvas, 0, 0, 200, 200);
}
const simmilarity = human.simmilarity(original?.face[0]?.embedding, result?.face[0]?.embedding);
document.getElementById('simmilarity').innerText = `simmilarity: ${Math.trunc(1000 * simmilarity) / 10}%`;
}
// draws processed results and starts processing of a next frame
let lastDraw = performance.now();
async function drawResults(input) {
const result = lastDetectedResult;
const canvas = document.getElementById('canvas');
// update draw fps data
ui.drawFPS.push(1000 / (performance.now() - lastDraw));
if (ui.drawFPS.length > ui.maxFPSframes) ui.drawFPS.shift();
lastDraw = performance.now();
// enable for continous performance monitoring
// console.log(result.performance);
// draw fps chart
await menu.process.updateChart('FPS', ui.detectFPS);
// get updated canvas
if (ui.buffered || !result.canvas) {
const image = await human.image(input);
result.canvas = image.canvas;
human.tf.dispose(image.tensor);
}
// draw image from video
const ctx = canvas.getContext('2d');
ctx.fillStyle = ui.baseBackground;
ctx.fillRect(0, 0, canvas.width, canvas.height);
if (result.canvas) {
if (result.canvas.width !== canvas.width) canvas.width = result.canvas.width;
if (result.canvas.height !== canvas.height) canvas.height = result.canvas.height;
ctx.drawImage(result.canvas, 0, 0, result.canvas.width, result.canvas.height, 0, 0, result.canvas.width, result.canvas.height);
} else {
ctx.drawImage(input, 0, 0, input.width, input.height, 0, 0, canvas.width, canvas.height);
}
// draw all results
human.draw.face(canvas, result.face);
human.draw.body(canvas, result.body);
human.draw.hand(canvas, result.hand);
human.draw.gesture(canvas, result.gesture);
await calcSimmilariry(result);
// update log
const engine = human.tf.engine();
const gpu = engine.backendInstance ? `gpu: ${(engine.backendInstance.numBytesInGPU ? engine.backendInstance.numBytesInGPU : 0).toLocaleString()} bytes` : '';
const memory = `system: ${engine.state.numBytes.toLocaleString()} bytes ${gpu} | tensors: ${engine.state.numTensors.toLocaleString()}`;
const processing = result.canvas ? `processing: ${result.canvas.width} x ${result.canvas.height}` : '';
const avgDetect = Math.trunc(10 * ui.detectFPS.reduce((a, b) => a + b, 0) / ui.detectFPS.length) / 10;
const avgDraw = Math.trunc(10 * ui.drawFPS.reduce((a, b) => a + b, 0) / ui.drawFPS.length) / 10;
const warning = (ui.detectFPS.length > 5) && (avgDetect < 5) ? '<font color="lightcoral">warning: your performance is low: try switching to higher performance backend, lowering resolution or disabling some models</font>' : '';
document.getElementById('log').innerHTML = `
video: ${ui.camera.name} | facing: ${ui.camera.facing} | screen: ${window.innerWidth} x ${window.innerHeight} camera: ${ui.camera.width} x ${ui.camera.height} ${processing}<br>
backend: ${human.tf.getBackend()} | ${memory}<br>
performance: ${str(result.performance)}ms FPS process:${avgDetect} refresh:${avgDraw}<br>
${warning}<br>
`;
ui.framesDraw++;
ui.lastFrame = performance.now();
// if buffered, immediate loop but limit frame rate although it's going to run slower as JS is singlethreaded
if (ui.buffered) {
ui.drawThread = requestAnimationFrame(() => drawResults(input, canvas));
} else if (!ui.buffered && ui.drawThread) {
log('stopping buffered refresh');
cancelAnimationFrame(ui.drawThread);
ui.drawThread = null;
}
}
// setup webcam
async function setupCamera() {
if (ui.busy) return null;
ui.busy = true;
const video = document.getElementById('video');
const canvas = document.getElementById('canvas');
const output = document.getElementById('log');
const live = video.srcObject ? ((video.srcObject.getVideoTracks()[0].readyState === 'live') && (video.readyState > 2) && (!video.paused)) : false;
let msg = '';
status('setting up camera');
// setup webcam. note that navigator.mediaDevices requires that page is accessed via https
if (!navigator.mediaDevices) {
msg = 'camera access not supported';
output.innerText += `\n${msg}`;
log(msg);
status(msg);
ui.busy = false;
return msg;
}
let stream;
const constraints = {
audio: false,
video: { facingMode: ui.facing ? 'user' : 'environment', resizeMode: ui.crop ? 'crop-and-scale' : 'none' },
};
if (window.innerWidth > window.innerHeight) constraints.video.width = { ideal: window.innerWidth };
else constraints.video.height = { ideal: (window.innerHeight - document.getElementById('menubar').offsetHeight) };
try {
stream = await navigator.mediaDevices.getUserMedia(constraints);
} catch (err) {
if (err.name === 'PermissionDeniedError' || err.name === 'NotAllowedError') msg = 'camera permission denied';
else if (err.name === 'SourceUnavailableError') msg = 'camera not available';
else msg = `camera error: ${err.message || err}`;
output.innerText += `\n${msg}`;
status(msg);
log('camera error:', err);
ui.busy = false;
return msg;
}
if (stream) video.srcObject = stream;
else {
ui.busy = false;
return 'camera stream empty';
}
const track = stream.getVideoTracks()[0];
const settings = track.getSettings();
// log('camera constraints:', constraints, 'window:', { width: window.innerWidth, height: window.innerHeight }, 'settings:', settings, 'track:', track);
ui.camera = { name: track.label?.toLowerCase(), width: settings.width, height: settings.height, facing: settings.facingMode === 'user' ? 'front' : 'back' };
return new Promise((resolve) => {
video.onloadeddata = async () => {
video.width = video.videoWidth;
video.height = video.videoHeight;
canvas.width = video.width;
canvas.height = video.height;
canvas.style.width = canvas.width > canvas.height ? '100vw' : '';
canvas.style.height = canvas.width > canvas.height ? '' : '100vh';
ui.menuWidth.input.setAttribute('value', video.width);
ui.menuHeight.input.setAttribute('value', video.height);
// silly font resizing for paint-on-canvas since viewport can be zoomed
if (live) video.play();
// eslint-disable-next-line no-use-before-define
if (live && !ui.detectThread) runHumanDetect(video, canvas);
ui.busy = false;
// do once more because onresize events can be delayed or skipped
// if (video.width > window.innerWidth) await setupCamera();
status('');
resolve();
};
});
}
function initPerfMonitor() {
if (!bench) {
const gl = null;
// cosnt gl = human.tf.engine().backend.gpgpu.gl;
// if (!gl) log('bench cannot get tensorflow webgl context');
bench = new GLBench(gl, {
trackGPU: false, // this is really slow
chartHz: 20,
chartLen: 20,
});
bench.begin();
}
}
// wrapper for worker.postmessage that creates worker if one does not exist
function webWorker(input, image, canvas, timestamp) {
if (!worker) {
// create new webworker and add event handler only once
log('creating worker thread');
worker = new Worker(ui.worker, { type: 'module' });
// after receiving message from webworker, parse&draw results and send new frame for processing
worker.addEventListener('message', (msg) => {
if (msg.data.result.performance && msg.data.result.performance.total) ui.detectFPS.push(1000 / msg.data.result.performance.total);
if (ui.detectFPS.length > ui.maxFPSframes) ui.detectFPS.shift();
if (ui.bench) {
if (!bench) initPerfMonitor();
bench.nextFrame(timestamp);
}
if (document.getElementById('gl-bench')) document.getElementById('gl-bench').style.display = ui.bench ? 'block' : 'none';
lastDetectedResult = msg.data.result;
ui.framesDetect++;
if (!ui.drawThread) drawResults(input);
// eslint-disable-next-line no-use-before-define
ui.detectThread = requestAnimationFrame((now) => runHumanDetect(input, canvas, now));
});
}
// pass image data as arraybuffer to worker by reference to avoid copy
worker.postMessage({ image: image.data.buffer, width: canvas.width, height: canvas.height, userConfig }, [image.data.buffer]);
}
// main processing function when input is webcam, can use direct invocation or web worker
function runHumanDetect(input, canvas, timestamp) {
// if live video
const live = input.srcObject && (input.srcObject.getVideoTracks()[0].readyState === 'live') && (input.readyState > 2) && (!input.paused);
if (!live && input.srcObject) {
// stop ui refresh
if (ui.drawThread) cancelAnimationFrame(ui.drawThread);
if (ui.detectThread) cancelAnimationFrame(ui.detectThread);
ui.drawThread = null;
ui.detectThread = null;
// if we want to continue and camera not ready, retry in 0.5sec, else just give up
if (input.paused) log('camera paused');
else if ((input.srcObject.getVideoTracks()[0].readyState === 'live') && (input.readyState <= 2)) setTimeout(() => runHumanDetect(input, canvas), 500);
else log(`camera not ready: track state: ${input.srcObject?.getVideoTracks()[0].readyState} stream state: ${input.readyState}`);
clearTimeout(ui.drawThread);
ui.drawThread = null;
log('frame statistics: process:', ui.framesDetect, 'refresh:', ui.framesDraw);
log('memory', human.tf.engine().memory());
return;
}
status('');
if (ui.useWorker) {
// get image data from video as we cannot send html objects to webworker
const offscreen = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(canvas.width, canvas.height) : document.createElement('canvas');
offscreen.width = canvas.width;
offscreen.height = canvas.height;
const ctx = offscreen.getContext('2d');
ctx.drawImage(input, 0, 0, input.width, input.height, 0, 0, canvas.width, canvas.height);
const data = ctx.getImageData(0, 0, canvas.width, canvas.height);
// perform detection in worker
webWorker(input, data, canvas, userConfig, timestamp);
} else {
human.detect(input, userConfig).then((result) => {
if (result.performance && result.performance.total) ui.detectFPS.push(1000 / result.performance.total);
if (ui.detectFPS.length > ui.maxFPSframes) ui.detectFPS.shift();
if (ui.bench) {
if (!bench) initPerfMonitor();
bench.nextFrame(timestamp);
}
if (document.getElementById('gl-bench')) document.getElementById('gl-bench').style.display = ui.bench ? 'block' : 'none';
if (result.error) {
log(result.error);
document.getElementById('log').innerText += `\nHuman error: ${result.error}`;
} else {
lastDetectedResult = result;
if (!ui.drawThread) drawResults(input);
ui.framesDetect++;
ui.detectThread = requestAnimationFrame((now) => runHumanDetect(input, canvas, now));
}
});
}
}
// main processing function when input is image, can use direct invocation or web worker
async function processImage(input) {
return new Promise((resolve) => {
const image = new Image();
image.onload = async () => {
log('Processing image:', encodeURI(image.src));
const canvas = document.getElementById('canvas');
image.width = image.naturalWidth;
image.height = image.naturalHeight;
canvas.width = human.config.filter.width && human.config.filter.width > 0 ? human.config.filter.width : image.naturalWidth;
canvas.height = human.config.filter.height && human.config.filter.height > 0 ? human.config.filter.height : image.naturalHeight;
const result = await human.detect(image, userConfig);
lastDetectedResult = result;
await drawResults(image);
const thumb = document.createElement('canvas');
thumb.className = 'thumbnail';
thumb.width = window.innerWidth / (ui.columns + 0.1);
thumb.height = thumb.width * canvas.height / canvas.width;
if (result.face && result.face.length > 0) {
thumb.title = result.face.map((a, i) => `#${i} face: ${Math.trunc(100 * a.faceConfidence)}% box: ${Math.trunc(100 * a.boxConfidence)}% age: ${Math.trunc(a.age)} gender: ${Math.trunc(100 * a.genderConfidence)}% ${a.gender}`).join(' | ');
} else {
thumb.title = 'no face detected';
}
const ctx = thumb.getContext('2d');
ctx.drawImage(canvas, 0, 0, canvas.width, canvas.height, 0, 0, thumb.width, thumb.height);
document.getElementById('samples-container').appendChild(thumb);
image.src = '';
resolve(true);
};
image.src = input;
});
}
// just initialize everything and call main function
async function detectVideo() {
userConfig.videoOptimized = true;
document.getElementById('samples-container').style.display = 'none';
document.getElementById('canvas').style.display = 'block';
const video = document.getElementById('video');
const canvas = document.getElementById('canvas');
if ((video.srcObject !== null) && !video.paused) {
document.getElementById('play').style.display = 'block';
document.getElementById('btnStart').className = 'button button-start';
document.getElementById('btnStart').innerHTML = 'start<br>video';
status('paused');
video.pause();
} else {
const cameraError = await setupCamera();
if (!cameraError) {
document.getElementById('play').style.display = 'none';
for (const m of Object.values(menu)) m.hide();
status('');
document.getElementById('btnStart').className = 'button button-stop';
document.getElementById('btnStart').innerHTML = 'pause<br>video';
await video.play();
if (!ui.detectThread) runHumanDetect(video, canvas);
} else {
status(cameraError);
}
}
}
// just initialize everything and call main function
async function detectSampleImages() {
document.getElementById('play').style.display = 'none';
userConfig.videoOptimized = false;
document.getElementById('canvas').style.display = 'none';
document.getElementById('samples-container').style.display = 'block';
log('Running detection of sample images');
status('processing images');
document.getElementById('samples-container').innerHTML = '';
for (const m of Object.values(menu)) m.hide();
for (const image of ui.samples) await processImage(image);
status('');
}
function setupMenu() {
let x = [];
if (window.innerWidth > 800) {
// initial position of menu items, later it's calculated based on mouse coordinates
x = [`${document.getElementById('btnDisplay').offsetLeft - 50}px`, `${document.getElementById('btnImage').offsetLeft - 50}px`, `${document.getElementById('btnProcess').offsetLeft - 50}px`, `${document.getElementById('btnModel').offsetLeft - 50}px`];
} else {
// absolute minimum spacing for menus
x = ['0rem', '11rem', '21.1rem', '33rem'];
}
menu.display = new Menu(document.body, '', { top: `${document.getElementById('menubar').offsetHeight}px`, left: x[0] });
menu.display.addBool('perf monitor', ui, 'bench', (val) => ui.bench = val);
menu.display.addBool('buffered output', ui, 'buffered', (val) => ui.buffered = val);
menu.display.addBool('crop & scale', ui, 'crop', (val) => {
ui.crop = val;
setupCamera();
});
menu.display.addBool('camera facing', ui, 'facing', (val) => {
ui.facing = val;
setupCamera();
});
menu.display.addHTML('<hr style="border-style: inset; border-color: dimgray">');
menu.display.addBool('use 3D depth', human.draw.options, 'useDepth');
menu.display.addBool('draw with curves', human.draw.options, 'useCurves');
menu.display.addBool('print labels', human.draw.options, 'drawLabels');
menu.display.addBool('draw points', human.draw.options, 'drawPoints');
menu.display.addBool('draw boxes', human.draw.options, 'drawBoxes');
menu.display.addBool('draw polygons', human.draw.options, 'drawPolygons');
menu.display.addBool('fill polygons', human.draw.options, 'fillPolygons');
menu.image = new Menu(document.body, '', { top: `${document.getElementById('menubar').offsetHeight}px`, left: x[1] });
menu.image.addBool('enabled', human.config.filter, 'enabled', (val) => human.config.filter.enabled = val);
ui.menuWidth = menu.image.addRange('image width', human.config.filter, 'width', 0, 3840, 10, (val) => human.config.filter.width = parseInt(val));
ui.menuHeight = menu.image.addRange('image height', human.config.filter, 'height', 0, 2160, 10, (val) => human.config.filter.height = parseInt(val));
menu.image.addHTML('<hr style="border-style: inset; border-color: dimgray">');
menu.image.addRange('brightness', human.config.filter, 'brightness', -1.0, 1.0, 0.05, (val) => human.config.filter.brightness = parseFloat(val));
menu.image.addRange('contrast', human.config.filter, 'contrast', -1.0, 1.0, 0.05, (val) => human.config.filter.contrast = parseFloat(val));
menu.image.addRange('sharpness', human.config.filter, 'sharpness', 0, 1.0, 0.05, (val) => human.config.filter.sharpness = parseFloat(val));
menu.image.addRange('blur', human.config.filter, 'blur', 0, 20, 1, (val) => human.config.filter.blur = parseInt(val));
menu.image.addRange('saturation', human.config.filter, 'saturation', -1.0, 1.0, 0.05, (val) => human.config.filter.saturation = parseFloat(val));
menu.image.addRange('hue', human.config.filter, 'hue', 0, 360, 5, (val) => human.config.filter.hue = parseInt(val));
menu.image.addRange('pixelate', human.config.filter, 'pixelate', 0, 32, 1, (val) => human.config.filter.pixelate = parseInt(val));
menu.image.addHTML('<hr style="border-style: inset; border-color: dimgray">');
menu.image.addBool('negative', human.config.filter, 'negative', (val) => human.config.filter.negative = val);
menu.image.addBool('sepia', human.config.filter, 'sepia', (val) => human.config.filter.sepia = val);
menu.image.addBool('vintage', human.config.filter, 'vintage', (val) => human.config.filter.vintage = val);
menu.image.addBool('kodachrome', human.config.filter, 'kodachrome', (val) => human.config.filter.kodachrome = val);
menu.image.addBool('technicolor', human.config.filter, 'technicolor', (val) => human.config.filter.technicolor = val);
menu.image.addBool('polaroid', human.config.filter, 'polaroid', (val) => human.config.filter.polaroid = val);
menu.process = new Menu(document.body, '', { top: `${document.getElementById('menubar').offsetHeight}px`, left: x[2] });
menu.process.addList('backend', ['cpu', 'webgl', 'wasm', 'humangl'], human.config.backend, (val) => human.config.backend = val);
menu.process.addBool('async operations', human.config, 'async', (val) => human.config.async = val);
// menu.process.addBool('enable profiler', human.config, 'profile', (val) => human.config.profile = val);
// menu.process.addBool('memory shield', human.config, 'deallocate', (val) => human.config.deallocate = val);
menu.process.addBool('use web worker', ui, 'useWorker');
menu.process.addHTML('<hr style="border-style: inset; border-color: dimgray">');
menu.process.addLabel('model parameters');
menu.process.addRange('max objects', human.config.face.detector, 'maxFaces', 1, 50, 1, (val) => {
human.config.face.detector.maxFaces = parseInt(val);
human.config.body.maxDetections = parseInt(val);
human.config.hand.maxHands = parseInt(val);
});
menu.process.addRange('skip frames', human.config.face.detector, 'skipFrames', 0, 50, 1, (val) => {
human.config.face.detector.skipFrames = parseInt(val);
human.config.face.emotion.skipFrames = parseInt(val);
human.config.face.age.skipFrames = parseInt(val);
human.config.hand.skipFrames = parseInt(val);
});
menu.process.addRange('min confidence', human.config.face.detector, 'minConfidence', 0.0, 1.0, 0.05, (val) => {
human.config.face.detector.minConfidence = parseFloat(val);
human.config.face.gender.minConfidence = parseFloat(val);
human.config.face.emotion.minConfidence = parseFloat(val);
human.config.hand.minConfidence = parseFloat(val);
});
menu.process.addRange('score threshold', human.config.face.detector, 'scoreThreshold', 0.1, 1.0, 0.05, (val) => {
human.config.face.detector.scoreThreshold = parseFloat(val);
human.config.hand.scoreThreshold = parseFloat(val);
human.config.body.scoreThreshold = parseFloat(val);
});
menu.process.addRange('overlap', human.config.face.detector, 'iouThreshold', 0.1, 1.0, 0.05, (val) => {
human.config.face.detector.iouThreshold = parseFloat(val);
human.config.hand.iouThreshold = parseFloat(val);
});
menu.process.addBool('detection rotation', human.config.face.detector, 'rotation', (val) => {
human.config.face.detector.rotation = val;
human.config.hand.rotation = val;
});
menu.process.addHTML('<hr style="border-style: inset; border-color: dimgray">');
menu.process.addButton('process sample images', 'process images', () => detectSampleImages());
menu.process.addHTML('<hr style="border-style: inset; border-color: dimgray">');
menu.process.addChart('FPS', 'FPS');
menu.models = new Menu(document.body, '', { top: `${document.getElementById('menubar').offsetHeight}px`, left: x[3] });
menu.models.addBool('face detect', human.config.face, 'enabled', (val) => human.config.face.enabled = val);
menu.models.addBool('face mesh', human.config.face.mesh, 'enabled', (val) => human.config.face.mesh.enabled = val);
menu.models.addBool('face iris', human.config.face.iris, 'enabled', (val) => human.config.face.iris.enabled = val);
menu.models.addBool('face age', human.config.face.age, 'enabled', (val) => human.config.face.age.enabled = val);
menu.models.addBool('face gender', human.config.face.gender, 'enabled', (val) => human.config.face.gender.enabled = val);
menu.models.addBool('face emotion', human.config.face.emotion, 'enabled', (val) => human.config.face.emotion.enabled = val);
menu.models.addHTML('<hr style="border-style: inset; border-color: dimgray">');
menu.models.addBool('body pose', human.config.body, 'enabled', (val) => human.config.body.enabled = val);
menu.models.addBool('hand pose', human.config.hand, 'enabled', (val) => human.config.hand.enabled = val);
menu.models.addHTML('<hr style="border-style: inset; border-color: dimgray">');
menu.models.addBool('gestures', human.config.gesture, 'enabled', (val) => human.config.gesture.enabled = val);
menu.models.addHTML('<hr style="border-style: inset; border-color: dimgray">');
menu.models.addBool('face compare', human.config.face.embedding, 'enabled', (val) => {
human.config.face.embedding.enabled = val;
original = null;
});
document.getElementById('btnDisplay').addEventListener('click', (evt) => menu.display.toggle(evt));
document.getElementById('btnImage').addEventListener('click', (evt) => menu.image.toggle(evt));
document.getElementById('btnProcess').addEventListener('click', (evt) => menu.process.toggle(evt));
document.getElementById('btnModel').addEventListener('click', (evt) => menu.models.toggle(evt));
document.getElementById('btnStart').addEventListener('click', () => detectVideo());
document.getElementById('play').addEventListener('click', () => detectVideo());
}
async function drawWarmup(res) {
const canvas = document.getElementById('canvas');
canvas.width = res.canvas.width;
canvas.height = res.canvas.height;
const ctx = canvas.getContext('2d');
ctx.drawImage(res.canvas, 0, 0, res.canvas.width, res.canvas.height, 0, 0, canvas.width, canvas.height);
await human.draw.all(canvas, res);
}
async function main() {
log('Demo starting ...');
setupMenu();
document.getElementById('log').innerText = `Human: version ${human.version}`;
if (ui.modelsPreload && !ui.useWorker) {
status('loading');
await human.load(userConfig); // this is not required, just pre-loads all models
const loaded = Object.keys(human.models).filter((a) => human.models[a]);
log('Demo loaded models:', loaded);
}
if (!ui.useWorker) {
status('initializing');
const res = await human.warmup(userConfig); // this is not required, just pre-warms all models for faster initial inference
if (res && res.canvas && ui.drawWarmup) await drawWarmup(res);
}
status('human: ready');
document.getElementById('loader').style.display = 'none';
document.getElementById('play').style.display = 'block';
log('Demo ready...');
}
window.onload = main;
window.onresize = setupCamera;

View File

@ -0,0 +1,160 @@
/**
* Human demo for browsers
*
* Demo for face detection
*/
/** @type {Human} */
import { Human } from '../../dist/human.esm.js';
let loader;
const humanConfig = { // user configuration for human, used to fine-tune behavior
cacheSensitivity: 0,
debug: true,
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
filter: { enabled: true, equalization: false, flip: false },
face: {
enabled: true,
detector: { rotation: false, maxDetected: 100, minConfidence: 0.2, return: true, square: false },
iris: { enabled: true },
description: { enabled: true },
emotion: { enabled: true },
antispoof: { enabled: true },
liveness: { enabled: true },
},
body: { enabled: false },
hand: { enabled: false },
object: { enabled: false },
gesture: { enabled: false },
segmentation: { enabled: false },
};
const human = new Human(humanConfig); // new instance of human
export const showLoader = (msg) => { loader.setAttribute('msg', msg); loader.style.display = 'block'; };
export const hideLoader = () => loader.style.display = 'none';
class ComponentLoader extends HTMLElement { // watch for attributes
message = document.createElement('div');
static get observedAttributes() { return ['msg']; }
attributeChangedCallback(_name, _prevVal, currVal) {
this.message.innerHTML = currVal;
}
connectedCallback() { // triggered on insert
this.attachShadow({ mode: 'open' });
const css = document.createElement('style');
css.innerHTML = `
.loader-container { top: 450px; justify-content: center; position: fixed; width: 100%; }
.loader-message { font-size: 1.5rem; padding: 1rem; }
.loader { width: 300px; height: 300px; border: 3px solid transparent; border-radius: 50%; border-top: 4px solid #f15e41; animation: spin 4s linear infinite; position: relative; }
.loader::before, .loader::after { content: ""; position: absolute; top: 6px; bottom: 6px; left: 6px; right: 6px; border-radius: 50%; border: 4px solid transparent; }
.loader::before { border-top-color: #bad375; animation: 3s spin linear infinite; }
.loader::after { border-top-color: #26a9e0; animation: spin 1.5s linear infinite; }
@keyframes spin { from { transform: rotate(0deg); } to { transform: rotate(360deg); } }
`;
const container = document.createElement('div');
container.id = 'loader-container';
container.className = 'loader-container';
loader = document.createElement('div');
loader.id = 'loader';
loader.className = 'loader';
this.message.id = 'loader-message';
this.message.className = 'loader-message';
this.message.innerHTML = '';
container.appendChild(this.message);
container.appendChild(loader);
this.shadowRoot?.append(css, container);
loader = this; // eslint-disable-line @typescript-eslint/no-this-alias
}
}
customElements.define('component-loader', ComponentLoader);
function addFace(face, source) {
const deg = (rad) => Math.round((rad || 0) * 180 / Math.PI);
const canvas = document.createElement('canvas');
const emotion = face.emotion?.map((e) => `${Math.round(100 * e.score)}% ${e.emotion}`) || [];
const rotation = `pitch ${deg(face.rotation?.angle.pitch)}° | roll ${deg(face.rotation?.angle.roll)}° | yaw ${deg(face.rotation?.angle.yaw)}°`;
const gaze = `direction ${deg(face.rotation?.gaze.bearing)}° strength ${Math.round(100 * (face.rotation.gaze.strength || 0))}%`;
canvas.title = `
source: ${source}
score: ${Math.round(100 * face.boxScore)}% detection ${Math.round(100 * face.faceScore)}% analysis
age: ${face.age} years | gender: ${face.gender} score ${Math.round(100 * face.genderScore)}%
emotion: ${emotion.join(' | ')}
head rotation: ${rotation}
eyes gaze: ${gaze}
camera distance: ${face.distance}m | ${Math.round(100 * face.distance / 2.54)}in
check: ${Math.round(100 * face.real)}% real ${Math.round(100 * face.live)}% live
`.replace(/ /g, ' ');
canvas.onclick = (e) => {
e.preventDefault();
document.getElementById('description').innerHTML = canvas.title;
};
human.draw.tensor(face.tensor, canvas);
human.tf.dispose(face.tensor);
return canvas;
}
async function addFaces(imgEl) {
showLoader('human: busy');
const faceEl = document.getElementById('faces');
faceEl.innerHTML = '';
const res = await human.detect(imgEl);
console.log(res); // eslint-disable-line no-console
document.getElementById('description').innerHTML = `detected ${res.face.length} faces`;
for (const face of res.face) {
const canvas = addFace(face, imgEl.src.substring(0, 64));
faceEl.appendChild(canvas);
}
hideLoader();
}
function addImage(imageUri) {
const imgEl = new Image(256, 256);
imgEl.onload = () => {
const images = document.getElementById('images');
images.appendChild(imgEl); // add image if loaded ok
images.scroll(images?.offsetWidth, 0);
};
imgEl.onerror = () => console.error('addImage', { imageUri }); // eslint-disable-line no-console
imgEl.onclick = () => addFaces(imgEl);
imgEl.title = imageUri.substring(0, 64);
imgEl.src = encodeURI(imageUri);
}
async function initDragAndDrop() {
const reader = new FileReader();
reader.onload = async (e) => {
if (e.target.result.startsWith('data:image')) await addImage(e.target.result);
};
document.body.addEventListener('dragenter', (evt) => evt.preventDefault());
document.body.addEventListener('dragleave', (evt) => evt.preventDefault());
document.body.addEventListener('dragover', (evt) => evt.preventDefault());
document.body.addEventListener('drop', async (evt) => {
evt.preventDefault();
evt.dataTransfer.dropEffect = 'copy';
for (const f of evt.dataTransfer.files) reader.readAsDataURL(f);
});
document.body.onclick = (e) => {
if (e.target.localName !== 'canvas') document.getElementById('description').innerHTML = '';
};
}
async function main() {
showLoader('loading models');
await human.load();
showLoader('compiling models');
await human.warmup();
showLoader('loading images');
const images = ['group-1.jpg', 'group-2.jpg', 'group-3.jpg', 'group-4.jpg', 'group-5.jpg', 'group-6.jpg', 'group-7.jpg', 'solvay1927.jpg', 'stock-group-1.jpg', 'stock-group-2.jpg', 'stock-models-6.jpg', 'stock-models-7.jpg'];
const imageUris = images.map((a) => `../../samples/in/${a}`);
for (let i = 0; i < imageUris.length; i++) addImage(imageUris[i]);
initDragAndDrop();
hideLoader();
}
window.onload = main;

View File

@ -0,0 +1,43 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Human</title>
<!-- <meta http-equiv="content-type" content="text/html; charset=utf-8"> -->
<meta name="viewport" content="width=device-width, shrink-to-fit=yes">
<meta name="keywords" content="Human">
<meta name="application-name" content="Human">
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="theme-color" content="#000000">
<link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../../assets/icon.png">
<script src="./facedetect.js" type="module"></script>
<style>
img { object-fit: contain; }
img:hover { filter: grayscale(1); transform: scale(1.08); transition : all 0.3s ease; }
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
html { font-family: 'Lato', 'Segoe UI'; font-size: 24px; font-variant: small-caps; }
body { margin: 24px; background: black; color: white; overflow: hidden; text-align: -webkit-center; width: 100vw; height: 100vh; }
::-webkit-scrollbar { height: 8px; border: 0; border-radius: 0; }
::-webkit-scrollbar-thumb { background: grey }
::-webkit-scrollbar-track { margin: 3px; }
canvas { width: 192px; height: 192px; margin: 2px; padding: 2px; cursor: grab; transform: scale(1.00); transition : all 0.3s ease; }
canvas:hover { filter: grayscale(1); transform: scale(1.08); transition : all 0.3s ease; }
</style>
</head>
<body>
<component-loader></component-loader>
<div style="display: flex">
<div>
<div style="margin: 24px">select image to show detected faces<br>drag & drop to add your images</div>
<div id="images" style="display: flex; width: 98vw; overflow-x: auto; overflow-y: hidden; scroll-behavior: smooth"></div>
</div>
</div>
<div id="list" style="height: 10px"></div>
<div style="margin: 24px">hover or click on face to show details</div>
<div id="faces" style="overflow-y: auto"></div>
<div id="description" style="white-space: pre;"></div>
</body>
</html>

42
demo/faceid/README.md Normal file
View File

@ -0,0 +1,42 @@
# Human Face Recognition: FaceID
`faceid` runs multiple checks to validate webcam input before performing face match
Detected face image and descriptor are stored in client-side IndexDB
## Workflow
- Starts webcam
- Waits until input video contains validated face or timeout is reached
- Number of people
- Face size
- Face and gaze direction
- Detection scores
- Blink detection (including temporal check for blink speed) to verify live input
- Runs `antispoofing` optional module
- Runs `liveness` optional module
- Runs match against database of registered faces and presents best match with scores
## Notes
Both `antispoof` and `liveness` models are tiny and
designed to serve as a quick check when used together with other indicators:
- size below 1MB
- very quick inference times as they are very simple (11 ops for antispoof and 23 ops for liveness)
- trained on low-resolution inputs
### Anti-spoofing Module
- Checks if input is realistic (e.g. computer generated faces)
- Configuration: `human.config.face.antispoof`.enabled
- Result: `human.result.face[0].real` as score
### Liveness Module
- Checks if input has obvious artifacts due to recording (e.g. playing back phone recording of a face)
- Configuration: `human.config.face.liveness`.enabled
- Result: `human.result.face[0].live` as score
### Models
**FaceID** is compatible with
- `faceres.json` (default) perfoms combined age/gender/descriptor analysis
- `faceres-deep.json` higher resolution variation of `faceres`
- `insightface` alternative model for face descriptor analysis
- `mobilefacenet` alternative model for face descriptor analysis

49
demo/faceid/index.html Normal file
View File

@ -0,0 +1,49 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Human: Face Recognition</title>
<meta name="viewport" content="width=device-width" id="viewport">
<meta name="keywords" content="Human">
<meta name="application-name" content="Human">
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="theme-color" content="#000000">
<link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../../assets/icon.png">
<script src="./index.js" type="module"></script>
<style>
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
body { margin: 0; padding: 16px; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
body::-webkit-scrollbar { display: none; }
.button { padding: 2px; cursor: pointer; box-shadow: 2px 2px black; width: 64px; text-align: center; place-content: center; margin-left: 16px; height: 16px; display: none }
.ok { position: absolute; top: 64px; right: 20px; width: 150px; background-color: grey; padding: 4px; color: black; font-size: 14px }
</style>
</head>
<body>
<div style="padding: 8px">
<h1 style="margin: 0">faceid demo using human library</h1>
look directly at camera and make sure that detection passes all of the required tests noted on the right hand side of the screen<br>
if input does not satisfies tests within specific timeout, no image will be selected<br>
once face image is approved, it will be compared with existing face database<br>
you can also store face descriptor with label in a browser's indexdb for future usage<br>
<br>
<i>note: this is not equivalent to full faceid methods as used by modern mobile phones or windows hello<br>
as they rely on additional infrared sensors and depth-sensing and not just camera image for additional levels of security</i>
</div>
<canvas id="canvas" style="padding: 8px"></canvas>
<canvas id="source" style="padding: 8px"></canvas>
<video id="video" playsinline style="display: none"></video>
<pre id="log" style="padding: 8px"></pre>
<div id="match" style="display: none; padding: 8px">
<label for="name">name:</label>
<input id="name" type="text" value="" style="height: 16px; border: none; padding: 2px; margin-left: 8px">
<span id="save" class="button" style="background-color: royalblue">save</span>
<span id="delete" class="button" style="background-color: lightcoral">delete</span>
</div>
<div id="retry" class="button" style="background-color: darkslategray; width: 93%; margin-top: 32px; padding: 12px">retry</div>
<div id="ok"></div>
</body>
</html>

9
demo/faceid/index.js Normal file

File diff suppressed because one or more lines are too long

7
demo/faceid/index.js.map Normal file

File diff suppressed because one or more lines are too long

318
demo/faceid/index.ts Normal file
View File

@ -0,0 +1,318 @@
/**
* Human demo for browsers
* @default Human Library
* @summary <https://github.com/vladmandic/human>
* @author <https://github.com/vladmandic>
* @copyright <https://github.com/vladmandic>
* @license MIT
*/
import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
import * as indexDb from './indexdb'; // methods to deal with indexdb
const humanConfig = { // user configuration for human, used to fine-tune behavior
cacheSensitivity: 0.01,
modelBasePath: '../../models',
filter: { enabled: true, equalization: true }, // lets run with histogram equilizer
debug: true,
face: {
enabled: true,
detector: { rotation: true, return: true, mask: false }, // return tensor is used to get detected face image
description: { enabled: true }, // default model for face descriptor extraction is faceres
// mobilefacenet: { enabled: true, modelPath: 'https://vladmandic.github.io/human-models/models/mobilefacenet.json' }, // alternative model
// insightface: { enabled: true, modelPath: 'https://vladmandic.github.io/insightface/models/insightface-mobilenet-swish.json' }, // alternative model
iris: { enabled: true }, // needed to determine gaze direction
emotion: { enabled: false }, // not needed
antispoof: { enabled: true }, // enable optional antispoof module
liveness: { enabled: true }, // enable optional liveness module
},
body: { enabled: false },
hand: { enabled: false },
object: { enabled: false },
gesture: { enabled: true }, // parses face and iris gestures
};
// const matchOptions = { order: 2, multiplier: 1000, min: 0.0, max: 1.0 }; // for embedding model
const matchOptions = { order: 2, multiplier: 25, min: 0.2, max: 0.8 }; // for faceres model
const options = {
minConfidence: 0.6, // overal face confidence for box, face, gender, real, live
minSize: 224, // min input to face descriptor model before degradation
maxTime: 30000, // max time before giving up
blinkMin: 10, // minimum duration of a valid blink
blinkMax: 800, // maximum duration of a valid blink
threshold: 0.5, // minimum similarity
distanceMin: 0.4, // closest that face is allowed to be to the cammera in cm
distanceMax: 1.0, // farthest that face is allowed to be to the cammera in cm
mask: humanConfig.face.detector.mask,
rotation: humanConfig.face.detector.rotation,
...matchOptions,
};
const ok: Record<string, { status: boolean | undefined, val: number }> = { // must meet all rules
faceCount: { status: false, val: 0 },
faceConfidence: { status: false, val: 0 },
facingCenter: { status: false, val: 0 },
lookingCenter: { status: false, val: 0 },
blinkDetected: { status: false, val: 0 },
faceSize: { status: false, val: 0 },
antispoofCheck: { status: false, val: 0 },
livenessCheck: { status: false, val: 0 },
distance: { status: false, val: 0 },
age: { status: false, val: 0 },
gender: { status: false, val: 0 },
timeout: { status: true, val: 0 },
descriptor: { status: false, val: 0 },
elapsedMs: { status: undefined, val: 0 }, // total time while waiting for valid face
detectFPS: { status: undefined, val: 0 }, // mark detection fps performance
drawFPS: { status: undefined, val: 0 }, // mark redraw fps performance
};
const allOk = () => ok.faceCount.status
&& ok.faceSize.status
&& ok.blinkDetected.status
&& ok.facingCenter.status
&& ok.lookingCenter.status
&& ok.faceConfidence.status
&& ok.antispoofCheck.status
&& ok.livenessCheck.status
&& ok.distance.status
&& ok.descriptor.status
&& ok.age.status
&& ok.gender.status;
const current: { face: H.FaceResult | null, record: indexDb.FaceRecord | null } = { face: null, record: null }; // current face record and matched database record
const blink = { // internal timers for blink start/end/duration
start: 0,
end: 0,
time: 0,
};
// let db: Array<{ name: string, source: string, embedding: number[] }> = []; // holds loaded face descriptor database
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
human.env.perfadd = false; // is performance data showing instant or total values
human.draw.options.font = 'small-caps 18px "Lato"'; // set font used to draw labels when using draw methods
human.draw.options.lineHeight = 20;
const dom = { // grab instances of dom objects so we dont have to look them up later
video: document.getElementById('video') as HTMLVideoElement,
canvas: document.getElementById('canvas') as HTMLCanvasElement,
log: document.getElementById('log') as HTMLPreElement,
fps: document.getElementById('fps') as HTMLPreElement,
match: document.getElementById('match') as HTMLDivElement,
name: document.getElementById('name') as HTMLInputElement,
save: document.getElementById('save') as HTMLSpanElement,
delete: document.getElementById('delete') as HTMLSpanElement,
retry: document.getElementById('retry') as HTMLDivElement,
source: document.getElementById('source') as HTMLCanvasElement,
ok: document.getElementById('ok') as HTMLDivElement,
};
const timestamp = { detect: 0, draw: 0 }; // holds information used to calculate performance and possible memory leaks
let startTime = 0;
const log = (...msg) => { // helper method to output messages
dom.log.innerText += msg.join(' ') + '\n';
console.log(...msg); // eslint-disable-line no-console
};
async function webCam() { // initialize webcam
// @ts-ignore resizeMode is not yet defined in tslib
const cameraOptions: MediaStreamConstraints = { audio: false, video: { facingMode: 'user', resizeMode: 'none', width: { ideal: document.body.clientWidth } } };
const stream: MediaStream = await navigator.mediaDevices.getUserMedia(cameraOptions);
const ready = new Promise((resolve) => { dom.video.onloadeddata = () => resolve(true); });
dom.video.srcObject = stream;
void dom.video.play();
await ready;
dom.canvas.width = dom.video.videoWidth;
dom.canvas.height = dom.video.videoHeight;
dom.canvas.style.width = '50%';
dom.canvas.style.height = '50%';
if (human.env.initial) log('video:', dom.video.videoWidth, dom.video.videoHeight, '|', stream.getVideoTracks()[0].label);
dom.canvas.onclick = () => { // pause when clicked on screen and resume on next click
if (dom.video.paused) void dom.video.play();
else dom.video.pause();
};
}
async function detectionLoop() { // main detection loop
if (!dom.video.paused) {
if (current.face?.tensor) human.tf.dispose(current.face.tensor); // dispose previous tensor
await human.detect(dom.video); // actual detection; were not capturing output in a local variable as it can also be reached via human.result
const now = human.now();
ok.detectFPS.val = Math.round(10000 / (now - timestamp.detect)) / 10;
timestamp.detect = now;
requestAnimationFrame(detectionLoop); // start new frame immediately
}
}
function drawValidationTests() {
let y = 32;
for (const [key, val] of Object.entries(ok)) {
let el = document.getElementById(`ok-${key}`);
if (!el) {
el = document.createElement('div');
el.id = `ok-${key}`;
el.innerText = key;
el.className = 'ok';
el.style.top = `${y}px`;
dom.ok.appendChild(el);
}
if (typeof val.status === 'boolean') el.style.backgroundColor = val.status ? 'lightgreen' : 'lightcoral';
const status = val.status ? 'ok' : 'fail';
el.innerText = `${key}: ${val.val === 0 ? status : val.val}`;
y += 28;
}
}
async function validationLoop(): Promise<H.FaceResult> { // main screen refresh loop
const interpolated = human.next(human.result); // smoothen result using last-known results
human.draw.canvas(dom.video, dom.canvas); // draw canvas to screen
await human.draw.all(dom.canvas, interpolated); // draw labels, boxes, lines, etc.
const now = human.now();
ok.drawFPS.val = Math.round(10000 / (now - timestamp.draw)) / 10;
timestamp.draw = now;
ok.faceCount.val = human.result.face.length;
ok.faceCount.status = ok.faceCount.val === 1; // must be exactly detected face
if (ok.faceCount.status) { // skip the rest if no face
const gestures: string[] = Object.values(human.result.gesture).map((gesture: H.GestureResult) => gesture.gesture); // flatten all gestures
if (gestures.includes('blink left eye') || gestures.includes('blink right eye')) blink.start = human.now(); // blink starts when eyes get closed
if (blink.start > 0 && !gestures.includes('blink left eye') && !gestures.includes('blink right eye')) blink.end = human.now(); // if blink started how long until eyes are back open
ok.blinkDetected.status = ok.blinkDetected.status || (Math.abs(blink.end - blink.start) > options.blinkMin && Math.abs(blink.end - blink.start) < options.blinkMax);
if (ok.blinkDetected.status && blink.time === 0) blink.time = Math.trunc(blink.end - blink.start);
ok.facingCenter.status = gestures.includes('facing center');
ok.lookingCenter.status = gestures.includes('looking center'); // must face camera and look at camera
ok.faceConfidence.val = human.result.face[0].faceScore || human.result.face[0].boxScore || 0;
ok.faceConfidence.status = ok.faceConfidence.val >= options.minConfidence;
ok.antispoofCheck.val = human.result.face[0].real || 0;
ok.antispoofCheck.status = ok.antispoofCheck.val >= options.minConfidence;
ok.livenessCheck.val = human.result.face[0].live || 0;
ok.livenessCheck.status = ok.livenessCheck.val >= options.minConfidence;
ok.faceSize.val = Math.min(human.result.face[0].box[2], human.result.face[0].box[3]);
ok.faceSize.status = ok.faceSize.val >= options.minSize;
ok.distance.val = human.result.face[0].distance || 0;
ok.distance.status = (ok.distance.val >= options.distanceMin) && (ok.distance.val <= options.distanceMax);
ok.descriptor.val = human.result.face[0].embedding?.length || 0;
ok.descriptor.status = ok.descriptor.val > 0;
ok.age.val = human.result.face[0].age || 0;
ok.age.status = ok.age.val > 0;
ok.gender.val = human.result.face[0].genderScore || 0;
ok.gender.status = ok.gender.val >= options.minConfidence;
}
// run again
ok.timeout.status = ok.elapsedMs.val <= options.maxTime;
drawValidationTests();
if (allOk() || !ok.timeout.status) { // all criteria met
dom.video.pause();
return human.result.face[0];
}
ok.elapsedMs.val = Math.trunc(human.now() - startTime);
return new Promise((resolve) => {
setTimeout(async () => {
await validationLoop(); // run validation loop until conditions are met
resolve(human.result.face[0]); // recursive promise resolve
}, 30); // use to slow down refresh from max refresh rate to target of 30 fps
});
}
async function saveRecords() {
if (dom.name.value.length > 0) {
const image = dom.canvas.getContext('2d')?.getImageData(0, 0, dom.canvas.width, dom.canvas.height) as ImageData;
const rec = { id: 0, name: dom.name.value, descriptor: current.face?.embedding as number[], image };
await indexDb.save(rec);
log('saved face record:', rec.name, 'descriptor length:', current.face?.embedding?.length);
log('known face records:', await indexDb.count());
} else {
log('invalid name');
}
}
async function deleteRecord() {
if (current.record && current.record.id > 0) {
await indexDb.remove(current.record);
}
}
async function detectFace() {
dom.canvas.style.height = '';
dom.canvas.getContext('2d')?.clearRect(0, 0, options.minSize, options.minSize);
if (!current?.face?.tensor || !current?.face?.embedding) return false;
console.log('face record:', current.face); // eslint-disable-line no-console
log(`detected face: ${current.face.gender} ${current.face.age || 0}y distance ${100 * (current.face.distance || 0)}cm/${Math.round(100 * (current.face.distance || 0) / 2.54)}in`);
await human.draw.tensor(current.face.tensor, dom.canvas);
if (await indexDb.count() === 0) {
log('face database is empty: nothing to compare face with');
document.body.style.background = 'black';
dom.delete.style.display = 'none';
return false;
}
const db = await indexDb.load();
const descriptors = db.map((rec) => rec.descriptor).filter((desc) => desc.length > 0);
const res = human.match.find(current.face.embedding, descriptors, matchOptions);
current.record = db[res.index] || null;
if (current.record) {
log(`best match: ${current.record.name} | id: ${current.record.id} | similarity: ${Math.round(1000 * res.similarity) / 10}%`);
dom.name.value = current.record.name;
dom.source.style.display = '';
dom.source.getContext('2d')?.putImageData(current.record.image, 0, 0);
}
document.body.style.background = res.similarity > options.threshold ? 'darkgreen' : 'maroon';
return res.similarity > options.threshold;
}
async function main() { // main entry point
ok.faceCount.status = false;
ok.faceConfidence.status = false;
ok.facingCenter.status = false;
ok.blinkDetected.status = false;
ok.faceSize.status = false;
ok.antispoofCheck.status = false;
ok.livenessCheck.status = false;
ok.age.status = false;
ok.gender.status = false;
ok.elapsedMs.val = 0;
dom.match.style.display = 'none';
dom.retry.style.display = 'none';
dom.source.style.display = 'none';
dom.canvas.style.height = '50%';
document.body.style.background = 'black';
await webCam();
await detectionLoop(); // start detection loop
startTime = human.now();
current.face = await validationLoop(); // start validation loop
dom.canvas.width = current.face?.tensor?.shape[1] || options.minSize;
dom.canvas.height = current.face?.tensor?.shape[0] || options.minSize;
dom.source.width = dom.canvas.width;
dom.source.height = dom.canvas.height;
dom.canvas.style.width = '';
dom.match.style.display = 'flex';
dom.save.style.display = 'flex';
dom.delete.style.display = 'flex';
dom.retry.style.display = 'block';
if (!allOk()) { // is all criteria met?
log('did not find valid face');
return false;
}
return detectFace();
}
async function init() {
log('human version:', human.version, '| tfjs version:', human.tf.version['tfjs-core']);
log('options:', JSON.stringify(options).replace(/{|}|"|\[|\]/g, '').replace(/,/g, ' '));
log('initializing webcam...');
await webCam(); // start webcam
log('loading human models...');
await human.load(); // preload all models
log('initializing human...');
log('face embedding model:', humanConfig.face.description.enabled ? 'faceres' : '', humanConfig.face['mobilefacenet']?.enabled ? 'mobilefacenet' : '', humanConfig.face['insightface']?.enabled ? 'insightface' : '');
log('loading face database...');
log('known face records:', await indexDb.count());
dom.retry.addEventListener('click', main);
dom.save.addEventListener('click', saveRecords);
dom.delete.addEventListener('click', deleteRecord);
await human.warmup(); // warmup function to initialize backend for future faster detection
await main();
}
window.onload = init;

65
demo/faceid/indexdb.ts Normal file
View File

@ -0,0 +1,65 @@
let db: IDBDatabase; // instance of indexdb
const database = 'human';
const table = 'person';
export interface FaceRecord { id: number, name: string, descriptor: number[], image: ImageData }
const log = (...msg) => console.log('indexdb', ...msg); // eslint-disable-line no-console
export async function open() {
if (db) return true;
return new Promise((resolve) => {
const request: IDBOpenDBRequest = indexedDB.open(database, 1);
request.onerror = (evt) => log('error:', evt);
request.onupgradeneeded = (evt: IDBVersionChangeEvent) => { // create if doesnt exist
log('create:', evt.target);
db = (evt.target as IDBOpenDBRequest).result;
db.createObjectStore(table, { keyPath: 'id', autoIncrement: true });
};
request.onsuccess = (evt) => { // open
db = (evt.target as IDBOpenDBRequest).result;
log('open:', db);
resolve(true);
};
});
}
export async function load(): Promise<FaceRecord[]> {
const faceDB: FaceRecord[] = [];
if (!db) await open(); // open or create if not already done
return new Promise((resolve) => {
const cursor: IDBRequest = db.transaction([table], 'readwrite').objectStore(table).openCursor(null, 'next');
cursor.onerror = (evt) => log('load error:', evt);
cursor.onsuccess = (evt) => {
if ((evt.target as IDBRequest).result) {
faceDB.push((evt.target as IDBRequest).result.value);
(evt.target as IDBRequest).result.continue();
} else {
resolve(faceDB);
}
};
});
}
export async function count(): Promise<number> {
if (!db) await open(); // open or create if not already done
return new Promise((resolve) => {
const store: IDBRequest = db.transaction([table], 'readwrite').objectStore(table).count();
store.onerror = (evt) => log('count error:', evt);
store.onsuccess = () => resolve(store.result);
});
}
export async function save(faceRecord: FaceRecord) {
if (!db) await open(); // open or create if not already done
const newRecord = { name: faceRecord.name, descriptor: faceRecord.descriptor, image: faceRecord.image }; // omit id as its autoincrement
db.transaction([table], 'readwrite').objectStore(table).put(newRecord);
log('save:', newRecord);
}
export async function remove(faceRecord: FaceRecord) {
if (!db) await open(); // open or create if not already done
db.transaction([table], 'readwrite').objectStore(table).delete(faceRecord.id); // delete based on id
log('delete:', faceRecord);
}

84
demo/facematch/README.md Normal file
View File

@ -0,0 +1,84 @@
# Human Face Recognition & Matching
- **Browser** demo: `index.html` & `facematch.js`:
Loads sample images, extracts faces and runs match and similarity analysis
- **NodeJS** demo `node-match.js` and `node-match-worker.js`
Advanced multithreading demo that runs number of worker threads to process high number of matches
- Sample face database: `faces.json`
<br>
## Browser Face Recognition Demo
- `demo/facematch`: Demo for Browsers that uses all face description and embedding features to
detect, extract and identify all faces plus calculate similarity between them
It highlights functionality such as:
- Loading images
- Extracting faces from images
- Calculating face embedding descriptors
- Finding face similarity and sorting them by similarity
- Finding best face match based on a known list of faces and printing matches
<br>
## NodeJS Multi-Threading Match Solution
### Methods and Properties in `node-match`
- `createBuffer`: create shared buffer array
single copy of data regardless of number of workers
fixed size based on `options.dbMax`
- `appendRecord`: add additional batch of descriptors to buffer
can append batch of records to buffer at anytime
workers are informed of the new content after append has been completed
- `workersStart`: start or expand pool of `threadPoolSize` workers
each worker runs `node-match-worker` and listens for messages from main thread
can shutdown workers or create additional worker threads on-the-fly
safe against workers that exit
- `workersClose`: close workers in a pool
first request workers to exit then terminate after timeout
- `match`: dispach a match job to a worker
returns first match that satisfies `minThreshold`
assigment to workers using round-robin
since timing for each job is near-fixed and predictable
- `getDescriptor`: get descriptor array for a given id from a buffer
- `fuzDescriptor`: small randomize descriptor content for harder match
- `getLabel`: fetch label for resolved descriptor index
- `loadDB`: load face database from a JSON file `dbFile`
extracts descriptors and adds them to buffer
extracts labels and maintains them in main thread
for test purposes loads same database `dbFact` times to create a very large database
`node-match` runs in a listens for messages from workers until `maxJobs` have been reached
### Performance
Linear performance decrease that depends on number of records in database
Non-linear performance that increases with number of worker threads due to communication overhead
- Face dataase with 10k records:
> threadPoolSize: 1 => ~60 ms / match job
> threadPoolSize: 6 => ~25 ms / match job
- Face database with 50k records:
> threadPoolSize: 1 => ~300 ms / match job
> threadPoolSize: 6 => ~100 ms / match job
- Face database with 100k records:
> threadPoolSize: 1 => ~600 ms / match job
> threadPoolSize: 6 => ~200 ms / match job
### Example
> node node-match
<!-- eslint-skip -->
```js
INFO: options: { dbFile: './faces.json', dbMax: 10000, threadPoolSize: 6, workerSrc: './node-match-worker.js', debug: false, minThreshold: 0.9, descLength: 1024 }
DATA: created shared buffer: { maxDescriptors: 10000, totalBytes: 40960000, totalElements: 10240000 }
DATA: db loaded: { existingRecords: 0, newRecords: 5700 }
INFO: starting worker thread pool: { totalWorkers: 6, alreadyActive: 0 }
STATE: submitted: { matchJobs: 100, poolSize: 6, activeWorkers: 6 }
STATE: { matchJobsFinished: 100, totalTimeMs: 1769, averageTimeMs: 17.69 }
INFO: closing workers: { poolSize: 6, activeWorkers: 6 }
```

257
demo/facematch/facematch.js Normal file
View File

@ -0,0 +1,257 @@
/**
* Human demo for browsers
*
* Demo for face descriptor analysis and face similarity analysis
*/
/** @type {Human} */
import { Human } from '../../dist/human.esm.js';
const userConfig = {
backend: 'humangl',
async: true,
warmup: 'none',
cacheSensitivity: 0.01,
debug: true,
modelBasePath: '../../models/',
deallocate: true,
filter: {
enabled: true,
equalization: true,
width: 0,
},
face: {
enabled: true,
detector: { return: true, rotation: true, maxDetected: 50, iouThreshold: 0.01, minConfidence: 0.2 },
mesh: { enabled: true },
iris: { enabled: false },
emotion: { enabled: true },
description: { enabled: true },
},
hand: { enabled: false },
gesture: { enabled: false },
body: { enabled: false },
segmentation: { enabled: false },
};
const human = new Human(userConfig); // new instance of human
const all = []; // array that will hold all detected faces
let db = []; // array that holds all known faces
const minScore = 0.4;
function log(...msg) {
const dt = new Date();
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
console.log(ts, ...msg); // eslint-disable-line no-console
}
function title(msg) {
document.getElementById('title').innerHTML = msg;
}
async function loadFaceMatchDB() {
// download db with known faces
try {
let res = await fetch('/demo/facematch/faces.json');
if (!res || !res.ok) res = await fetch('/human/demo/facematch/faces.json');
db = (res && res.ok) ? await res.json() : [];
log('Loaded Faces DB:', db);
} catch (err) {
log('Could not load faces database', err);
}
}
async function selectFaceCanvas(face) {
// if we have face image tensor, enhance it and display it
let embedding;
document.getElementById('orig').style.filter = 'blur(16px)';
if (face.tensor) {
title('Sorting Faces by Similarity');
const c = document.getElementById('orig');
await human.draw.tensor(face.tensor, c);
const arr = db.map((rec) => rec.embedding);
const res = await human.match.find(face.embedding, arr);
log('Match:', db[res.index].name);
const emotion = face.emotion[0] ? `${Math.round(100 * face.emotion[0].score)}% ${face.emotion[0].emotion}` : 'N/A';
document.getElementById('desc').innerHTML = `
source: ${face.fileName}<br>
match: ${Math.round(1000 * res.similarity) / 10}% ${db[res.index].name}<br>
score: ${Math.round(100 * face.boxScore)}% detection ${Math.round(100 * face.faceScore)}% analysis<br>
age: ${face.age} years<br>
gender: ${Math.round(100 * face.genderScore)}% ${face.gender}<br>
emotion: ${emotion}<br>
`;
embedding = face.embedding.map((a) => parseFloat(a.toFixed(4)));
navigator.clipboard.writeText(`{"name":"unknown", "source":"${face.fileName}", "embedding":[${embedding}]},`);
}
// loop through all canvases that contain faces
const canvases = document.getElementsByClassName('face');
let time = 0;
for (const canvas of canvases) {
// calculate similarity from selected face to current one in the loop
const current = all[canvas.tag.sample][canvas.tag.face];
const similarity = human.match.similarity(face.embedding, current.embedding);
canvas.tag.similarity = similarity;
// get best match
// draw the canvas
await human.draw.tensor(current.tensor, canvas);
const ctx = canvas.getContext('2d');
ctx.font = 'small-caps 1rem "Lato"';
ctx.fillStyle = 'rgba(0, 0, 0, 1)';
ctx.fillText(`${(100 * similarity).toFixed(1)}%`, 3, 23);
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
ctx.fillText(`${(100 * similarity).toFixed(1)}%`, 4, 24);
ctx.font = 'small-caps 0.8rem "Lato"';
ctx.fillText(`${current.age}y ${(100 * (current.genderScore || 0)).toFixed(1)}% ${current.gender}`, 4, canvas.height - 6);
// identify person
ctx.font = 'small-caps 1rem "Lato"';
const start = human.now();
const arr = db.map((rec) => rec.embedding);
const res = await human.match.find(current.embedding, arr);
time += (human.now() - start);
if (res.similarity > minScore) ctx.fillText(`DB: ${(100 * res.similarity).toFixed(1)}% ${db[res.index].name}`, 4, canvas.height - 30);
}
log('Analyzed:', 'Face:', canvases.length, 'DB:', db.length, 'Time:', time);
// sort all faces by similarity
const sorted = document.getElementById('faces');
[...sorted.children]
.sort((a, b) => parseFloat(b.tag.similarity) - parseFloat(a.tag.similarity))
.forEach((canvas) => sorted.appendChild(canvas));
document.getElementById('orig').style.filter = 'blur(0)';
title('Selected Face');
}
async function addFaceCanvas(index, res, fileName) {
all[index] = res.face;
for (const i in res.face) {
if (!res.face[i].tensor) continue; // did not get valid results
if ((res.face[i].faceScore || 0) < human.config.face.detector.minConfidence) continue; // face analysis score too low
all[index][i].fileName = fileName;
const canvas = document.createElement('canvas');
canvas.tag = { sample: index, face: i, source: fileName };
canvas.width = 200;
canvas.height = 200;
canvas.className = 'face';
const emotion = res.face[i].emotion[0] ? `${Math.round(100 * res.face[i].emotion[0].score)}% ${res.face[i].emotion[0].emotion}` : 'N/A';
canvas.title = `
source: ${res.face[i].fileName}
score: ${Math.round(100 * res.face[i].boxScore)}% detection ${Math.round(100 * res.face[i].faceScore)}% analysis
age: ${res.face[i].age} years
gender: ${Math.round(100 * res.face[i].genderScore)}% ${res.face[i].gender}
emotion: ${emotion}
`.replace(/ /g, ' ');
await human.draw.tensor(res.face[i].tensor, canvas);
const ctx = canvas.getContext('2d');
if (!ctx) return;
ctx.font = 'small-caps 0.8rem "Lato"';
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
ctx.fillText(`${res.face[i].age}y ${(100 * (res.face[i].genderScore || 0)).toFixed(1)}% ${res.face[i].gender}`, 4, canvas.height - 6);
const arr = db.map((rec) => rec.embedding);
const result = human.match.find(res.face[i].embedding, arr);
ctx.font = 'small-caps 1rem "Lato"';
if (result.similarity && res.similarity > minScore) ctx.fillText(`${(100 * result.similarity).toFixed(1)}% ${db[result.index].name}`, 4, canvas.height - 30);
document.getElementById('faces').appendChild(canvas);
canvas.addEventListener('click', (evt) => {
log('Select:', 'Image:', evt.target.tag.sample, 'Face:', evt.target.tag.face, 'Source:', evt.target.tag.source, all[evt.target.tag.sample][evt.target.tag.face]);
selectFaceCanvas(all[evt.target.tag.sample][evt.target.tag.face]);
});
}
}
async function addImageElement(index, image, length) {
const faces = all.reduce((prev, curr) => prev += curr.length, 0);
title(`Analyzing Input Images<br> ${Math.round(100 * index / length)}% [${index} / ${length}]<br>Found ${faces} Faces`);
return new Promise((resolve) => {
const img = new Image(128, 128);
img.onload = () => { // must wait until image is loaded
document.getElementById('images').appendChild(img); // and finally we can add it
human.detect(img, userConfig)
.then((res) => { // eslint-disable-line promise/always-return
addFaceCanvas(index, res, image); // then wait until image is analyzed
resolve(true);
})
.catch(() => log('human detect error'));
};
img.onerror = () => {
log('Add image error:', index + 1, image);
resolve(false);
};
img.title = image;
img.src = encodeURI(image);
});
}
function createFaceMatchDB() {
log('Creating Faces DB...');
for (const image of all) {
for (const face of image) db.push({ name: 'unknown', source: face.fileName, embedding: face.embedding });
}
log(db);
}
async function main() {
// pre-load human models
await human.load();
title('Loading Face Match Database');
let images = [];
let dir = [];
// load face descriptor database
await loadFaceMatchDB();
// enumerate all sample images in /assets
title('Enumerating Input Images');
const res = await fetch('/samples/in');
dir = (res && res.ok) ? await res.json() : [];
images = images.concat(dir.filter((img) => (img.endsWith('.jpg') && img.includes('sample'))));
// could not dynamically enumerate images so using static list
if (images.length === 0) {
images = [
'ai-face.jpg', 'ai-upper.jpg', 'ai-body.jpg', 'solvay1927.jpg',
'group-1.jpg', 'group-2.jpg', 'group-3.jpg', 'group-4.jpg', 'group-5.jpg', 'group-6.jpg', 'group-7.jpg',
'person-celeste.jpg', 'person-christina.jpg', 'person-lauren.jpg', 'person-lexi.jpg', 'person-linda.jpg', 'person-nicole.jpg', 'person-tasia.jpg', 'person-tetiana.jpg', 'person-vlado.jpg', 'person-vlado1.jpg', 'person-vlado5.jpg',
'stock-group-1.jpg', 'stock-group-2.jpg',
'stock-models-1.jpg', 'stock-models-2.jpg', 'stock-models-3.jpg', 'stock-models-4.jpg', 'stock-models-5.jpg', 'stock-models-6.jpg', 'stock-models-7.jpg', 'stock-models-8.jpg', 'stock-models-9.jpg',
'stock-teen-1.jpg', 'stock-teen-2.jpg', 'stock-teen-3.jpg', 'stock-teen-4.jpg', 'stock-teen-5.jpg', 'stock-teen-6.jpg', 'stock-teen-7.jpg', 'stock-teen-8.jpg',
'stock-models-10.jpg', 'stock-models-11.jpg', 'stock-models-12.jpg', 'stock-models-13.jpg', 'stock-models-14.jpg', 'stock-models-15.jpg', 'stock-models-16.jpg',
'cgi-model-1.jpg', 'cgi-model-2.jpg', 'cgi-model-3.jpg', 'cgi-model-4.jpg', 'cgi-model-5.jpg', 'cgi-model-6.jpg', 'cgi-model-7.jpg', 'cgi-model-8.jpg', 'cgi-model-9.jpg',
'cgi-model-10.jpg', 'cgi-model-11.jpg', 'cgi-model-12.jpg', 'cgi-model-13.jpg', 'cgi-model-14.jpg', 'cgi-model-15.jpg', 'cgi-model-18.jpg', 'cgi-model-19.jpg',
'cgi-model-20.jpg', 'cgi-model-21.jpg', 'cgi-model-22.jpg', 'cgi-model-23.jpg', 'cgi-model-24.jpg', 'cgi-model-25.jpg', 'cgi-model-26.jpg', 'cgi-model-27.jpg', 'cgi-model-28.jpg', 'cgi-model-29.jpg',
'cgi-model-30.jpg', 'cgi-model-31.jpg', 'cgi-model-33.jpg', 'cgi-model-34.jpg',
'cgi-multiangle-1.jpg', 'cgi-multiangle-2.jpg', 'cgi-multiangle-3.jpg', 'cgi-multiangle-4.jpg', 'cgi-multiangle-6.jpg', 'cgi-multiangle-7.jpg', 'cgi-multiangle-8.jpg', 'cgi-multiangle-9.jpg', 'cgi-multiangle-10.jpg', 'cgi-multiangle-11.jpg',
'stock-emotions-a-1.jpg', 'stock-emotions-a-2.jpg', 'stock-emotions-a-3.jpg', 'stock-emotions-a-4.jpg', 'stock-emotions-a-5.jpg', 'stock-emotions-a-6.jpg', 'stock-emotions-a-7.jpg', 'stock-emotions-a-8.jpg',
'stock-emotions-b-1.jpg', 'stock-emotions-b-2.jpg', 'stock-emotions-b-3.jpg', 'stock-emotions-b-4.jpg', 'stock-emotions-b-5.jpg', 'stock-emotions-b-6.jpg', 'stock-emotions-b-7.jpg', 'stock-emotions-b-8.jpg',
];
// add prefix for gitpages
images = images.map((a) => `../../samples/in/${a}`);
log('Adding static image list:', images);
} else {
log('Discovered images:', images);
}
// images = ['/samples/in/person-lexi.jpg', '/samples/in/person-carolina.jpg', '/samples/in/solvay1927.jpg'];
const t0 = human.now();
for (let i = 0; i < images.length; i++) await addImageElement(i, images[i], images.length);
const t1 = human.now();
// print stats
const num = all.reduce((prev, cur) => prev += cur.length, 0);
log('Extracted faces:', num, 'from images:', all.length, 'time:', Math.round(t1 - t0));
log(human.tf.engine().memory());
// if we didn't download db, generate it from current faces
if (!db || db.length === 0) createFaceMatchDB();
title('');
log('Ready');
human.validate(userConfig);
human.match.similarity([], []);
}
window.onload = main;

81
demo/facematch/faces.json Normal file

File diff suppressed because one or more lines are too long

50
demo/facematch/index.html Normal file
View File

@ -0,0 +1,50 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Human</title>
<!-- <meta http-equiv="content-type" content="text/html; charset=utf-8"> -->
<meta name="viewport" content="width=device-width, shrink-to-fit=yes">
<meta name="keywords" content="Human">
<meta name="application-name" content="Human">
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="theme-color" content="#000000">
<link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../../assets/icon.png">
<script src="./facematch.js" type="module"></script>
<style>
img { object-fit: contain; }
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
html { font-family: 'Lato', 'Segoe UI'; font-size: 24px; font-variant: small-caps; }
body { margin: 24px; background: black; color: white; overflow: hidden; text-align: -webkit-center; min-height: 100%; max-height: 100%; }
::-webkit-scrollbar { height: 8px; border: 0; border-radius: 0; }
::-webkit-scrollbar-thumb { background: grey }
::-webkit-scrollbar-track { margin: 3px; }
.orig { width: 200px; height: 200px; padding-bottom: 20px; filter: blur(16px); transition : all 0.5s ease; }
.text { margin: 24px; }
.face { width: 128px; height: 128px; margin: 2px; padding: 2px; cursor: grab; transform: scale(1.00); transition : all 0.3s ease; }
.face:hover { filter: grayscale(1); transform: scale(1.08); transition : all 0.3s ease; }
</style>
</head>
<body>
<div style="display: block">
<div style="display: flex">
<div style="min-width: 400px">
<div class="text" id="title"></div>
<canvas id="orig" class="orig"></canvas>
<div id="desc" style="font-size: 0.8rem; text-align: left;"></div>
</div>
<div style="width: 20px"></div>
<div>
<div class="text">Input Images</div>
<div id="images" style="display: flex; width: 60vw; overflow-x: auto; overflow-y: hidden; scroll-behavior: smooth"></div>
</div>
</div>
<div id="list" style="height: 10px"></div>
<div class="text">Select person to sort by similarity and get a known face match</div>
<div id="faces" style="height: 50vh; overflow-y: auto"></div>
</div>
</body>
</html>

View File

@ -0,0 +1,76 @@
/**
* Runs in a worker thread started by `node-match` demo app
*
*/
const threads = require('worker_threads');
let debug = false;
/** @type SharedArrayBuffer */
let buffer;
/** @type Float32Array */
let view;
let threshold = 0;
let records = 0;
const descLength = 1024; // descriptor length in bytes
function distance(descBuffer, index, options = { order: 2, multiplier: 20 }) {
const descriptor = new Float32Array(descBuffer);
let sum = 0;
for (let i = 0; i < descriptor.length; i++) {
const diff = (options.order === 2) ? (descriptor[i] - view[index * descLength + i]) : (Math.abs(descriptor[i] - view[index * descLength + i]));
sum += (options.order === 2) ? (diff * diff) : (diff ** options.order);
}
return (options.multiplier || 20) * sum;
}
function match(descBuffer, options = { order: 2, multiplier: 20 }) {
let best = Number.MAX_SAFE_INTEGER;
let index = -1;
for (let i = 0; i < records; i++) {
const res = distance(descBuffer, i, { order: options.order, multiplier: options.multiplier });
if (res < best) {
best = res;
index = i;
}
if (best < threshold || best === 0) break; // short circuit
}
best = (options.order === 2) ? Math.sqrt(best) : best ** (1 / options.order);
const similarity = Math.round(100 * Math.max(0, 100 - best) / 100.0) / 100;
return { index, distance: best, similarity };
}
threads.parentPort?.on('message', (msg) => {
if (typeof msg.descriptor !== 'undefined') { // actual work order to find a match
const t0 = performance.now();
const result = match(msg.descriptor);
const t1 = performance.now();
threads.parentPort?.postMessage({ request: msg.request, time: Math.trunc(t1 - t0), ...result });
return; // short circuit
}
if (msg instanceof SharedArrayBuffer) { // called only once to receive reference to shared array buffer
buffer = msg;
view = new Float32Array(buffer); // initialize f64 view into buffer
if (debug) threads.parentPort?.postMessage(`buffer: ${buffer.byteLength}`);
}
if (typeof msg.records !== 'undefined') { // recived every time when number of records changes
records = msg.records;
if (debug) threads.parentPort?.postMessage(`records: ${records}`);
}
if (typeof msg.debug !== 'undefined') { // set verbose logging
debug = msg.debug;
// if (debug) threads.parentPort?.postMessage(`debug: ${debug}`);
}
if (typeof msg.threshold !== 'undefined') { // set minimum similarity threshold
threshold = msg.threshold;
// if (debug) threads.parentPort?.postMessage(`threshold: ${threshold}`);
}
if (typeof msg.shutdown !== 'undefined') { // got message to close worker
if (debug) threads.parentPort?.postMessage('shutting down');
process.exit(0); // eslint-disable-line no-process-exit
}
});
if (debug) threads.parentPort?.postMessage('started');

View File

@ -0,0 +1,184 @@
/**
* Human demo app for NodeJS that generates random facial descriptors
* and uses NodeJS multi-threading to start multiple threads for face matching
* uses `node-match-worker.js` to perform actual face matching analysis
*/
const fs = require('fs');
const path = require('path');
const threads = require('worker_threads');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// global optinos
const options = {
dbFile: 'demo/facematch/faces.json', // sample face db
dbMax: 10000, // maximum number of records to hold in memory
threadPoolSize: 12, // number of worker threads to create in thread pool
workerSrc: './node-match-worker.js', // code that executes in the worker thread
debug: true, // verbose messages
minThreshold: 0.5, // match returns first record that meets the similarity threshold, set to 0 to always scan all records
descLength: 1024, // descriptor length
};
// test options
const testOptions = {
dbFact: 175, // load db n times to fake huge size
maxJobs: 200, // exit after processing this many jobs
fuzDescriptors: true, // randomize descriptor content before match for harder jobs
};
// global data structures
const data = {
/** @type string[] */
labels: [], // array of strings, length of array serves as overal number of records so has to be maintained carefully
/** @type SharedArrayBuffer | null */
buffer: null,
/** @type Float32Array | null */
view: null,
/** @type threads.Worker[] */
workers: [], // holds instance of workers. worker can be null if exited
requestID: 0, // each request should increment this counter as its used for round robin assignment
};
let t0 = process.hrtime.bigint(); // used for perf counters
const appendRecords = (labels, descriptors) => {
if (!data.view) return 0;
if (descriptors.length !== labels.length) {
log.error('append error:', { descriptors: descriptors.length, labels: labels.length });
}
// if (options.debug) log.state('appending:', { descriptors: descriptors.length, labels: labels.length });
for (let i = 0; i < descriptors.length; i++) {
for (let j = 0; j < descriptors[i].length; j++) {
data.view[data.labels.length * descriptors[i].length + j] = descriptors[i][j]; // add each descriptors element to buffer
}
data.labels.push(labels[i]); // finally add to labels
}
for (const worker of data.workers) { // inform all workers how many records we have
if (worker) worker.postMessage({ records: data.labels.length });
}
return data.labels.length;
};
const getLabel = (index) => data.labels[index];
const getDescriptor = (index) => {
if (!data.view) return [];
const descriptor = [];
for (let i = 0; i < 1024; i++) descriptor.push(data.view[index * options.descLength + i]);
return descriptor;
};
const fuzDescriptor = (descriptor) => {
for (let i = 0; i < descriptor.length; i++) descriptor[i] += Math.random() - 0.5;
return descriptor;
};
const delay = (ms) => new Promise((resolve) => { setTimeout(resolve, ms); });
async function workersClose() {
const current = data.workers.filter((worker) => !!worker).length;
log.info('closing workers:', { poolSize: data.workers.length, activeWorkers: current });
for (const worker of data.workers) {
if (worker) worker.postMessage({ shutdown: true }); // tell worker to exit
}
await delay(250); // wait a little for threads to exit on their own
const remaining = data.workers.filter((worker) => !!worker).length;
if (remaining > 0) {
log.info('terminating remaining workers:', { remaining: current, pool: data.workers.length });
for (const worker of data.workers) {
if (worker) worker.terminate(); // if worker did not exit cleany terminate it
}
}
}
const workerMessage = (index, msg) => {
if (msg.request) {
if (options.debug) log.data('message:', { worker: index, request: msg.request, time: msg.time, label: getLabel(msg.index), similarity: msg.similarity });
if (msg.request >= testOptions.maxJobs) {
const t1 = process.hrtime.bigint();
const elapsed = Math.round(Number(t1 - t0) / 1000 / 1000);
log.state({ matchJobsFinished: testOptions.maxJobs, totalTimeMs: elapsed, averageTimeMs: Math.round(100 * elapsed / testOptions.maxJobs) / 100 });
workersClose();
}
} else {
log.data('message:', { worker: index, msg });
}
};
async function workerClose(id, code) {
const previous = data.workers.filter((worker) => !!worker).length;
delete data.workers[id];
const current = data.workers.filter((worker) => !!worker).length;
if (options.debug) log.state('worker exit:', { id, code, previous, current });
}
async function workersStart(numWorkers) {
const previous = data.workers.filter((worker) => !!worker).length;
log.info('starting worker thread pool:', { totalWorkers: numWorkers, alreadyActive: previous });
for (let i = 0; i < numWorkers; i++) {
if (!data.workers[i]) { // worker does not exist, so create it
const worker = new threads.Worker(path.join(__dirname, options.workerSrc));
worker.on('message', (msg) => workerMessage(i, msg));
worker.on('error', (err) => log.error('worker error:', { err }));
worker.on('exit', (code) => workerClose(i, code));
worker.postMessage(data.buffer); // send buffer to worker
data.workers[i] = worker;
}
data.workers[i]?.postMessage({ records: data.labels.length, threshold: options.minThreshold, debug: options.debug }); // inform worker how many records there are
}
await delay(100); // just wait a bit for everything to settle down
}
const match = (descriptor) => {
// const arr = Float32Array.from(descriptor);
const buffer = new ArrayBuffer(options.descLength * 4);
const view = new Float32Array(buffer);
view.set(descriptor);
const available = data.workers.filter((worker) => !!worker).length; // find number of available workers
if (available > 0) data.workers[data.requestID % available].postMessage({ descriptor: buffer, request: data.requestID }, [buffer]); // round robin to first available worker
else log.error('no available workers');
};
async function loadDB(count) {
const previous = data.labels.length;
if (!fs.existsSync(options.dbFile)) {
log.error('db file does not exist:', options.dbFile);
return;
}
t0 = process.hrtime.bigint();
for (let i = 0; i < count; i++) { // test loop: load entire face db from array of objects n times into buffer
const db = JSON.parse(fs.readFileSync(options.dbFile).toString());
const names = db.map((record) => record.name);
const descriptors = db.map((record) => record.embedding);
appendRecords(names, descriptors);
}
log.data('db loaded:', { existingRecords: previous, newRecords: data.labels.length });
}
async function createBuffer() {
data.buffer = new SharedArrayBuffer(4 * options.dbMax * options.descLength); // preallocate max number of records as sharedarraybuffers cannot grow
data.view = new Float32Array(data.buffer); // create view into buffer
data.labels.length = 0;
log.data('created shared buffer:', { maxDescriptors: (data.view.length || 0) / options.descLength, totalBytes: data.buffer.byteLength, totalElements: data.view.length });
}
async function main() {
log.header();
log.info('options:', options);
await createBuffer(); // create shared buffer array
await loadDB(testOptions.dbFact); // loadDB is a test method that calls actual addRecords
await workersStart(options.threadPoolSize); // can be called at anytime to modify worker pool size
for (let i = 0; i < testOptions.maxJobs; i++) {
const idx = Math.trunc(data.labels.length * Math.random()); // grab a random descriptor index that we'll search for
const descriptor = getDescriptor(idx); // grab a descriptor at index
data.requestID++; // increase request id
if (testOptions.fuzDescriptors) match(fuzDescriptor(descriptor)); // fuz descriptor for harder match
else match(descriptor);
if (options.debug) log.debug('submited job', data.requestID); // we already know what we're searching for so we can compare results
}
log.state('submitted:', { matchJobs: testOptions.maxJobs, poolSize: data.workers.length, activeWorkers: data.workers.filter((worker) => !!worker).length });
}
main();

BIN
demo/favicon.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 256 KiB

3
demo/helpers/README.md Normal file
View File

@ -0,0 +1,3 @@
# Helper libraries
Used by main `Human` demo app

View File

@ -1,10 +1,8 @@
/* eslint-disable max-len */
// based on: https://github.com/munrocket/gl-bench // based on: https://github.com/munrocket/gl-bench
const UICSS = ` const UICSS = `
#gl-bench { position: absolute; right: 1rem; bottom: 1rem; z-index:1000; -webkit-user-select: none; -moz-user-select: none; user-select: none; } #gl-bench { position: absolute; right: 1rem; bottom: 1rem; z-index:1000; -webkit-user-select: none; -moz-user-select: none; user-select: none; }
#gl-bench div { position: relative; display: block; margin: 4px; padding: 0 7px 0 10px; background: darkslategray; border-radius: 0.2rem; cursor: pointer; opacity: 0.9; } #gl-bench div { position: relative; display: block; margin: 4px; padding: 0 2px 0 2px; background: #303030; border-radius: 0.1rem; cursor: pointer; opacity: 0.9; }
#gl-bench svg { height: 60px; margin: 0 0px 0px 4px; } #gl-bench svg { height: 60px; margin: 0 0px 0px 4px; }
#gl-bench text { font-size: 16px; font-family: 'Lato', 'Segoe UI'; dominant-baseline: middle; text-anchor: middle; } #gl-bench text { font-size: 16px; font-family: 'Lato', 'Segoe UI'; dominant-baseline: middle; text-anchor: middle; }
#gl-bench .gl-mem { font-size: 12px; fill: white; } #gl-bench .gl-mem { font-size: 12px; fill: white; }
@ -17,10 +15,10 @@ const UICSS = `
const UISVG = ` const UISVG = `
<div class="gl-box"> <div class="gl-box">
<svg viewBox="0 0 55 60"> <svg viewBox="0 0 60 60">
<text x="27" y="56" class="gl-fps">00 FPS</text> <text x="27" y="56" class="gl-fps">00 FPS</text>
<text x="30" y="8" class="gl-mem"></text> <text x="30" y="8" class="gl-mem"></text>
<rect x="0" y="14" rx="4" ry="4" width="65" height="32"></rect> <rect x="0" y="14" rx="4" ry="4" width="60" height="32"></rect>
<polyline class="gl-chart"></polyline> <polyline class="gl-chart"></polyline>
</svg> </svg>
<svg viewBox="0 0 14 60" class="gl-cpu-svg"> <svg viewBox="0 0 14 60" class="gl-cpu-svg">
@ -38,15 +36,13 @@ const UISVG = `
class GLBench { class GLBench {
/** GLBench constructor /** GLBench constructor
* @param { WebGLRenderingContext | WebGL2RenderingContext } gl context * @param { WebGLRenderingContext | WebGL2RenderingContext | null } gl context
* @param { Object | undefined } settings additional settings * @param { Object | undefined } settings additional settings
*/ */
constructor(gl, settings = {}) { constructor(gl, settings = {}) {
this.css = UICSS; this.css = UICSS;
this.svg = UISVG; this.svg = UISVG;
// eslint-disable-next-line @typescript-eslint/no-empty-function
this.paramLogger = () => {}; this.paramLogger = () => {};
// eslint-disable-next-line @typescript-eslint/no-empty-function
this.chartLogger = () => {}; this.chartLogger = () => {};
this.chartLen = 20; this.chartLen = 20;
this.chartHz = 20; this.chartHz = 20;
@ -93,7 +89,6 @@ class GLBench {
const addProfiler = (fn, self, target) => { const addProfiler = (fn, self, target) => {
const t = self.now(); const t = self.now();
// eslint-disable-next-line prefer-rest-params
fn.apply(target, arguments); fn.apply(target, arguments);
if (self.trackGPU) self.finished.push(glFinish(t, self.activeAccums.slice(0))); if (self.trackGPU) self.finished.push(glFinish(t, self.activeAccums.slice(0)));
}; };
@ -108,13 +103,11 @@ class GLBench {
if (gl[fn]) { if (gl[fn]) {
gl[fn] = addProfiler(gl[fn], this, gl); gl[fn] = addProfiler(gl[fn], this, gl);
} else { } else {
// eslint-disable-next-line no-console
console.log('bench: cannot attach to webgl function'); console.log('bench: cannot attach to webgl function');
} }
/* /*
gl.getExtension = ((fn, self) => { gl.getExtension = ((fn, self) => {
// eslint-disable-next-line prefer-rest-params
const ext = fn.apply(gl, arguments); const ext = fn.apply(gl, arguments);
if (ext) { if (ext) {
['drawElementsInstancedANGLE', 'drawBuffersWEBGL'].forEach((fn2) => { ['drawElementsInstancedANGLE', 'drawBuffersWEBGL'].forEach((fn2) => {
@ -149,7 +142,6 @@ class GLBench {
return (i, cpu, gpu, mem, fps, totalTime, frameId) => { return (i, cpu, gpu, mem, fps, totalTime, frameId) => {
nodes['gl-cpu'][i].style.strokeDasharray = (cpu * 0.27).toFixed(0) + ' 100'; nodes['gl-cpu'][i].style.strokeDasharray = (cpu * 0.27).toFixed(0) + ' 100';
nodes['gl-gpu'][i].style.strokeDasharray = (gpu * 0.27).toFixed(0) + ' 100'; nodes['gl-gpu'][i].style.strokeDasharray = (gpu * 0.27).toFixed(0) + ' 100';
// eslint-disable-next-line no-nested-ternary
nodes['gl-mem'][i].innerHTML = names[i] ? names[i] : (mem ? 'mem: ' + mem.toFixed(0) + 'mb' : ''); nodes['gl-mem'][i].innerHTML = names[i] ? names[i] : (mem ? 'mem: ' + mem.toFixed(0) + 'mb' : '');
nodes['gl-fps'][i].innerHTML = 'FPS: ' + fps.toFixed(1); nodes['gl-fps'][i].innerHTML = 'FPS: ' + fps.toFixed(1);
logger(names[i], cpu, gpu, mem, fps, totalTime, frameId); logger(names[i], cpu, gpu, mem, fps, totalTime, frameId);
@ -163,7 +155,7 @@ class GLBench {
const len = chart.length; const len = chart.length;
for (let j = 0; j < len; j++) { for (let j = 0; j < len; j++) {
const id = (circularId + j + 1) % len; const id = (circularId + j + 1) % len;
if (chart[id] !== undefined) points = points + ' ' + (55 * j / (len - 1)).toFixed(1) + ',' + (45 - chart[id] * 22 / 60 / this.detected).toFixed(1); if (chart[id] !== undefined) points = points + ' ' + (60 * j / (len - 1)).toFixed(1) + ',' + (45 - chart[id] * 0.5 / this.detected).toFixed(1);
} }
nodes['gl-chart'][i].setAttribute('points', points); nodes['gl-chart'][i].setAttribute('points', points);
logger(this.names[i], chart, circularId); logger(this.names[i], chart, circularId);

157
demo/helpers/jsonview.js Normal file
View File

@ -0,0 +1,157 @@
let callbackFunction = null;
function createElement(type, config) {
const htmlElement = document.createElement(type);
if (config === undefined) return htmlElement;
if (config.className) htmlElement.className = config.className;
if (config.content) htmlElement.textContent = config.content;
if (config.style) htmlElement.style = config.style;
if (config.children) config.children.forEach((el) => !el || htmlElement.appendChild(el));
return htmlElement;
}
function createExpandedElement(node) {
const iElem = createElement('i');
if (node.expanded) { iElem.className = 'fas fa-caret-down'; } else { iElem.className = 'fas fa-caret-right'; }
const caretElem = createElement('div', { style: 'width: 18px; text-align: center; cursor: pointer', children: [iElem] });
const handleClick = node.toggle.bind(node);
caretElem.addEventListener('click', handleClick);
const indexElem = createElement('div', { className: 'json json-index', content: node.key });
indexElem.addEventListener('click', handleClick);
const typeElem = createElement('div', { className: 'json json-type', content: node.type });
const keyElem = createElement('div', { className: 'json json-key', content: node.key });
keyElem.addEventListener('click', handleClick);
const sizeElem = createElement('div', { className: 'json json-size' });
sizeElem.addEventListener('click', handleClick);
if (node.type === 'array') {
sizeElem.innerText = `[${node.children.length} items]`;
} else if (node.type === 'object') {
const size = node.children.find((item) => item.key === 'size');
sizeElem.innerText = size ? `{${size.value.toLocaleString()} bytes}` : `{${node.children.length} properties}`;
}
let lineChildren;
if (node.key === null) lineChildren = [caretElem, typeElem, sizeElem];
else if (node.parent.type === 'array') lineChildren = [caretElem, indexElem, sizeElem];
else lineChildren = [caretElem, keyElem, sizeElem];
const lineElem = createElement('div', { className: 'json-line', children: lineChildren });
if (node.depth > 0) lineElem.style = `margin-left: ${node.depth * 24}px;`;
return lineElem;
}
function createNotExpandedElement(node) {
const caretElem = createElement('div', { style: 'width: 18px' });
const keyElem = createElement('div', { className: 'json json-key', content: node.key });
const separatorElement = createElement('div', { className: 'json-separator', content: ':' });
const valueType = ` json-${typeof node.value}`;
const valueContent = node.value.toLocaleString();
const valueElement = createElement('div', { className: `json json-value${valueType}`, content: valueContent });
const lineElem = createElement('div', { className: 'json-line', children: [caretElem, keyElem, separatorElement, valueElement] });
if (node.depth > 0) lineElem.style = `margin-left: ${node.depth * 24}px;`;
return lineElem;
}
function createNode() {
return {
key: '',
parent: {},
value: null,
expanded: false,
type: '',
children: [],
elem: {},
depth: 0,
hideChildren() {
if (Array.isArray(this.children)) {
this.children.forEach((item) => {
item['elem']['classList'].add('hide');
if (item['expanded']) item.hideChildren();
});
}
},
showChildren() {
if (Array.isArray(this.children)) {
this.children.forEach((item) => {
item['elem']['classList'].remove('hide');
if (item['expanded']) item.showChildren();
});
}
},
toggle() {
if (this.expanded) {
this.hideChildren();
const icon = this.elem?.querySelector('.fas');
icon.classList.replace('fa-caret-down', 'fa-caret-right');
if (callbackFunction !== null) callbackFunction(null);
} else {
this.showChildren();
const icon = this.elem?.querySelector('.fas');
icon.classList.replace('fa-caret-right', 'fa-caret-down');
if (this.type === 'object') {
if (callbackFunction !== null) callbackFunction(`${this.parent?.key}/${this.key}`);
}
}
this.expanded = !this.expanded;
},
};
}
function getType(val) {
let type
if (Array.isArray(val)) type = 'array';
else if (val === null) type = 'null';
else type = typeof val;
return type;
}
function traverseObject(obj, parent, filter) {
for (const key in obj) {
const child = createNode();
child.parent = parent;
child.key = key;
child.type = getType(obj[key]);
child.depth = parent.depth + 1;
child.expanded = false;
if (Array.isArray(filter)) {
for (const filtered of filter) {
if (key === filtered) return;
}
}
if (typeof obj[key] === 'object') {
child.children = [];
parent.children.push(child);
traverseObject(obj[key], child, filter);
child.elem = createExpandedElement(child);
} else {
child.value = obj[key];
child.elem = createNotExpandedElement(child);
parent.children.push(child);
}
}
}
function createTree(obj, title, filter) {
const tree = createNode();
tree.type = title;
tree.key = title;
tree.children = [];
tree.expanded = true;
traverseObject(obj, tree, filter);
tree.elem = createExpandedElement(tree);
return tree;
}
function traverseTree(node, callback) {
callback(node);
if (node.children !== null) node.children.forEach((item) => traverseTree(item, callback));
}
async function jsonView(json, element, title = '', filter = []) {
const tree = createTree(json, title, filter);
traverseTree(tree, (node) => {
if (!node.expanded) node.hideChildren();
element.appendChild(node.elem);
});
}
export default jsonView;

View File

@ -2,8 +2,8 @@ let instance = 0;
let CSScreated = false; let CSScreated = false;
let theme = { let theme = {
background: 'darkslategray', background: '#303030',
hover: 'lightgray', hover: '#505050',
itemBackground: 'black', itemBackground: 'black',
itemColor: 'white', itemColor: 'white',
buttonBackground: 'lightblue', buttonBackground: 'lightblue',
@ -19,14 +19,14 @@ function createCSS() {
if (CSScreated) return; if (CSScreated) return;
const css = ` const css = `
:root { --rounded: 0.1rem; } :root { --rounded: 0.1rem; }
.menu { position: absolute; top: 0rem; right: 0; width: max-content; padding: 0 0.2rem 0 0.2rem; line-height: 1.8rem; z-index: 10; .menu { position: absolute; top: 0rem; right: 0; min-width: 180px; width: max-content; padding: 0.2rem 0.8rem 0 0.8rem; line-height: 1.8rem; z-index: 10; background: ${theme.background}; border: none }
box-shadow: 0 0 8px dimgrey; background: ${theme.background}; border-radius: var(--rounded); border-color: black; border-style: solid; border-width: thin; } .button { text-shadow: none; }
.menu:hover { box-shadow: 0 0 8px ${theme.hover}; }
.menu-container { display: block; max-height: 100vh; } .menu-container { display: block; max-height: 100vh; }
.menu-container-fadeout { max-height: 0; overflow: hidden; transition: max-height, 0.5s ease; } .menu-container-fadeout { max-height: 0; overflow: hidden; transition: max-height, 0.5s ease; }
.menu-container-fadein { max-height: 100vh; overflow: hidden; transition: max-height, 0.5s ease; } .menu-container-fadein { max-height: 100vh; overflow: hidden; transition: max-height, 0.5s ease; }
.menu-item { display: flex; white-space: nowrap; padding: 0.2rem; cursor: default; width: 100%; } .menu-item { display: flex; white-space: nowrap; padding: 0.2rem; cursor: default; width: 100%; }
.menu-item:hover { background: ${theme.hover} }
.menu-title { cursor: pointer; } .menu-title { cursor: pointer; }
.menu-hr { margin: 0.2rem; border: 1px solid rgba(0, 0, 0, 0.5) } .menu-hr { margin: 0.2rem; border: 1px solid rgba(0, 0, 0, 0.5) }
.menu-label { padding: 0; font-weight: 800; } .menu-label { padding: 0; font-weight: 800; }
@ -39,30 +39,30 @@ function createCSS() {
.menu-chart-title { padding: 0; font-size: 0.8rem; font-weight: 800; align-items: center} .menu-chart-title { padding: 0; font-size: 0.8rem; font-weight: 800; align-items: center}
.menu-chart-canvas { background: transparent; margin: 0.2rem 0 0.2rem 0.6rem; } .menu-chart-canvas { background: transparent; margin: 0.2rem 0 0.2rem 0.6rem; }
.menu-button { border: 0; background: ${theme.buttonBackground}; width: -webkit-fill-available; padding: 8px; margin: 8px; cursor: pointer; box-shadow: 4px 4px 4px 0 dimgrey; .menu-button { border: 0; background: ${theme.buttonBackground}; width: -webkit-fill-available; padding: 8px; margin: 8px; cursor: pointer;
border-radius: var(--rounded); justify-content: center; font-family: inherit; font-variant: inherit; font-size: 1rem; font-weight: 800; } border-radius: var(--rounded); justify-content: center; font-family: inherit; font-variant: inherit; font-size: 1rem; font-weight: 800; }
.menu-button:hover { background: ${theme.buttonHover}; box-shadow: 4px 4px 4px 0 black; } .menu-button:hover { background: ${theme.buttonHover}; box-shadow: 4px 4px 4px 0 black; }
.menu-button:focus { outline: none; } .menu-button:focus { outline: none; }
.menu-checkbox { width: 2.8rem; height: 1rem; background: ${theme.itemBackground}; margin: 0.5rem 0.5rem 0 0; position: relative; border-radius: var(--rounded); } .menu-checkbox { width: 2.6rem; height: 1rem; background: ${theme.itemBackground}; margin: 0.5rem 1.0rem 0 0; position: relative; border-radius: var(--rounded); }
.menu-checkbox:after { content: 'OFF'; color: ${theme.checkboxOff}; position: absolute; right: 0.2rem; top: -0.4rem; font-weight: 800; font-size: 0.5rem; } .menu-checkbox:after { content: 'OFF'; color: ${theme.checkboxOff}; position: absolute; right: 0.2rem; top: -0.4rem; font-weight: 800; font-size: 0.5rem; }
.menu-checkbox:before { content: 'ON'; color: ${theme.checkboxOn}; position: absolute; left: 0.3rem; top: -0.4rem; font-weight: 800; font-size: 0.5rem; } .menu-checkbox:before { content: 'ON'; color: ${theme.checkboxOn}; position: absolute; left: 0.3rem; top: -0.4rem; font-weight: 800; font-size: 0.5rem; }
.menu-checkbox-label { width: 1.3rem; height: 0.8rem; cursor: pointer; position: absolute; top: 0.1rem; left: 0.1rem; z-index: 1; background: ${theme.checkboxOff}; .menu-checkbox-label { width: 1.3rem; height: 1rem; cursor: pointer; position: absolute; top: 0; left: 0rem; z-index: 1; background: ${theme.checkboxOff};
border-radius: var(--rounded); transition: left 0.6s ease; } border-radius: var(--rounded); transition: left 0.6s ease; }
input[type=checkbox] { visibility: hidden; } input[type=checkbox] { visibility: hidden; }
input[type=checkbox]:checked + label { left: 1.4rem; background: ${theme.checkboxOn}; } input[type=checkbox]:checked + label { left: 1.4rem; background: ${theme.checkboxOn}; }
.menu-range { margin: 0.2rem 0.5rem 0 0; width: 3.5rem; background: transparent; color: ${theme.rangeBackground}; } .menu-range { margin: 0.2rem 1.0rem 0 0; width: 5rem; background: transparent; color: ${theme.rangeBackground}; }
.menu-range:before { color: ${theme.rangeLabel}; margin: 0 0.4rem 0 0; font-weight: 800; font-size: 0.6rem; position: relative; top: 0.3rem; content: attr(value); } .menu-range:before { color: ${theme.rangeLabel}; margin: 0 0.4rem 0 0; font-weight: 800; font-size: 0.6rem; position: relative; top: 0.3rem; content: attr(value); }
input[type=range] { -webkit-appearance: none; } input[type=range] { -webkit-appearance: none; }
input[type=range]::-webkit-slider-runnable-track { width: 100%; height: 1rem; cursor: pointer; background: ${theme.itemBackground}; border-radius: var(--rounded); border: 1px; } input[type=range]::-webkit-slider-runnable-track { width: 100%; height: 1rem; cursor: pointer; background: ${theme.itemBackground}; border-radius: var(--rounded); border: 1px; }
input[type=range]::-moz-range-track { width: 100%; height: 1rem; cursor: pointer; background: ${theme.itemBackground}; border-radius: var(--rounded); border: 1px; } input[type=range]::-moz-range-track { width: 100%; height: 1rem; cursor: pointer; background: ${theme.itemBackground}; border-radius: var(--rounded); border: 1px; }
input[type=range]::-webkit-slider-thumb { border: 1px solid #000000; margin-top: 0.05rem; height: 0.9rem; width: 1rem; border-radius: var(--rounded); background: ${theme.rangeBackground}; cursor: pointer; -webkit-appearance: none; } input[type=range]::-webkit-slider-thumb { border: 1px solid #000000; margin-top: 0; height: 1rem; width: 0.6rem; border-radius: var(--rounded); background: ${theme.rangeBackground}; cursor: pointer; -webkit-appearance: none; }
input[type=range]::-moz-range-thumb { border: 1px solid #000000; margin-top: 0.05rem; height: 0.9rem; width: 1rem; border-radius: var(--rounded); background: ${theme.rangeBackground}; cursor: pointer; -webkit-appearance: none; } input[type=range]::-moz-range-thumb { border: 1px solid #000000; margin-top: 0rem; height: 1rem; width: 0.6rem; border-radius: var(--rounded); background: ${theme.rangeBackground}; cursor: pointer; -webkit-appearance: none; }
.svg-background { fill:darkslategrey; cursor:pointer; opacity: 0.6; } .svg-background { fill:#303030; cursor:pointer; opacity: 0.6; }
.svg-foreground { fill:white; cursor:pointer; opacity: 0.8; } .svg-foreground { fill:white; cursor:pointer; opacity: 0.8; }
`; `;
const el = document.createElement('style'); const el = document.createElement('style');
@ -80,18 +80,19 @@ class Menu {
this.instance = instance; this.instance = instance;
instance++; instance++;
this._maxFPS = 0; this._maxFPS = 0;
this.hidden = 0; this.hidden = false;
} }
createMenu(parent, title = '', position = { top: null, left: null, bottom: null, right: null }) { createMenu(parent, title = '', position = { top: null, left: null, bottom: null, right: null }) {
/** @type {HTMLDivElement} */
this.menu = document.createElement('div'); this.menu = document.createElement('div');
this.menu.id = `menu-${instance}`; this.menu.id = `menu-${instance}`;
this.menu.className = 'menu'; this.menu.className = 'menu';
if (position) { if (position) {
if (position.top) this.menu.style.top = position.top; if (position.top) this.menu.style.top = `${position.top}`;
if (position.bottom) this.menu.style.bottom = position.bottom; if (position.bottom) this.menu.style.bottom = `${position.bottom}`;
if (position.left) this.menu.style.left = position.left; if (position.left) this.menu.style.left = `${position.left}`;
if (position.right) this.menu.style.right = position.right; if (position.right) this.menu.style.right = `${position.right}`;
} }
this.container = document.createElement('div'); this.container = document.createElement('div');
@ -109,9 +110,11 @@ class Menu {
if (title) elTitle.innerHTML = `${title}${svg}`; if (title) elTitle.innerHTML = `${title}${svg}`;
this.menu.appendChild(elTitle); this.menu.appendChild(elTitle);
elTitle.addEventListener('click', () => { elTitle.addEventListener('click', () => {
this.container.classList.toggle('menu-container-fadeout'); if (this.container && this.menu) {
this.container.classList.toggle('menu-container-fadein'); this.container.classList.toggle('menu-container-fadeout');
this.menu.style.borderStyle = this.container.classList.contains('menu-container-fadeout') ? 'none' : 'solid'; this.container.classList.toggle('menu-container-fadein');
// this.menu.style.borderStyle = this.container.classList.contains('menu-container-fadeout') ? 'none' : 'solid';
}
}); });
this.menu.appendChild(this.container); this.menu.appendChild(this.container);
@ -129,40 +132,44 @@ class Menu {
} }
get width() { get width() {
return this.menu.offsetWidth; return this.menu ? this.menu.offsetWidth : 0;
} }
get height() { get height() {
return this.menu.offsetHeight; return this.menu ? this.menu.offsetHeight : 0;
} }
hide() { hide() {
if (this.container.classList.contains('menu-container-fadein')) { if (this.container && this.container.classList.contains('menu-container-fadein')) {
this.container.classList.toggle('menu-container-fadeout'); this.container.classList.toggle('menu-container-fadeout');
this.container.classList.toggle('menu-container-fadein'); this.container.classList.toggle('menu-container-fadein');
} }
} }
visible() { visible() {
return (this.container.classList.contains('menu-container-fadein')); return (this.container ? this.container.classList.contains('menu-container-fadein') : false);
} }
toggle(evt) { toggle(evt) {
this.container.classList.toggle('menu-container-fadeout'); if (this.container && this.menu) {
this.container.classList.toggle('menu-container-fadein'); this.container.classList.toggle('menu-container-fadeout');
if (this.container.classList.contains('menu-container-fadein') && evt) { this.container.classList.toggle('menu-container-fadein');
const x = evt.x || (evt.touches && evt.touches[0] ? evt.touches[0].pageX : null); /*
// const y = evt.y || (evt.touches && evt.touches[0] ? evt.touches[0].pageY : null); if (this.container.classList.contains('menu-container-fadein') && evt) {
if (x) this.menu.style.left = `${x - (this.menu.offsetWidth / 2)}px`; const x = evt.x || (evt.touches && evt.touches[0] ? evt.touches[0].pageX : null);
// if (y) this.menu.style.top = '5.5rem'; // `${evt.y + 55}px`; // const y = evt.y || (evt.touches && evt.touches[0] ? evt.touches[0].pageY : null);
if (this.menu.offsetLeft < 0) this.menu.style.left = 0; if (x) this.menu.style.left = `${x - (this.menu.offsetWidth / 2)}px`;
if ((this.menu.offsetLeft + this.menu.offsetWidth) > window.innerWidth) { // if (y) this.menu.style.top = '5.5rem'; // `${evt.y + 55}px`;
this.menu.style.left = null; if (this.menu.offsetLeft < 0) this.menu.style.left = '0';
this.menu.style.right = 0; if ((this.menu.offsetLeft + this.menu.offsetWidth) > window.innerWidth) {
this.menu.style.left = '';
this.menu.style.right = '0';
}
// this.menu.style.borderStyle = 'solid';
} else {
// this.menu.style.borderStyle = 'none';
} }
this.menu.style.borderStyle = 'solid'; */
} else {
this.menu.style.borderStyle = 'none';
} }
} }
@ -171,7 +178,7 @@ class Menu {
el.className = 'menu-title'; el.className = 'menu-title';
el.id = this.newID; el.id = this.newID;
el.innerHTML = title; el.innerHTML = title;
this.menu.appendChild(el); if (this.menu) this.menu.appendChild(el);
el.addEventListener('click', () => { el.addEventListener('click', () => {
this.hidden = !this.hidden; this.hidden = !this.hidden;
const all = document.getElementsByClassName('menu'); const all = document.getElementsByClassName('menu');
@ -187,7 +194,7 @@ class Menu {
el.className = 'menu-item menu-label'; el.className = 'menu-item menu-label';
el.id = this.newID; el.id = this.newID;
el.innerHTML = title; el.innerHTML = title;
this.container.appendChild(el); if (this.container) this.container.appendChild(el);
return el; return el;
} }
@ -195,10 +202,12 @@ class Menu {
const el = document.createElement('div'); const el = document.createElement('div');
el.className = 'menu-item'; el.className = 'menu-item';
el.innerHTML = `<div class="menu-checkbox"><input class="menu-checkbox" type="checkbox" id="${this.newID}" ${object[variable] ? 'checked' : ''}/><label class="menu-checkbox-label" for="${this.ID}"></label></div>${title}`; el.innerHTML = `<div class="menu-checkbox"><input class="menu-checkbox" type="checkbox" id="${this.newID}" ${object[variable] ? 'checked' : ''}/><label class="menu-checkbox-label" for="${this.ID}"></label></div>${title}`;
this.container.appendChild(el); if (this.container) this.container.appendChild(el);
el.addEventListener('change', (evt) => { el.addEventListener('change', (evt) => {
object[variable] = evt.target.checked; if (evt.target) {
if (callback) callback(evt.target.checked); object[variable] = evt.target['checked'];
if (callback) callback(evt.target['checked']);
}
}); });
return el; return el;
} }
@ -211,13 +220,13 @@ class Menu {
const def = item === selected ? 'selected' : ''; const def = item === selected ? 'selected' : '';
options += `<option value="${item}" ${def}>${item}</option>`; options += `<option value="${item}" ${def}>${item}</option>`;
} }
el.innerHTML = `<div class="menu-list"><select name="${this.ID}" class="menu-list-item">${options}</select><label for="${this.ID}"></label></div>${title}`; el.innerHTML = `<div class="menu-list"><select name="${this.ID}" title="${title}" class="menu-list-item">${options}</select><label for="${this.ID}"></label></div>${title}`;
el.style.fontFamily = document.body.style.fontFamily; el.style.fontFamily = document.body.style.fontFamily;
el.style.fontSize = document.body.style.fontSize; el.style.fontSize = document.body.style.fontSize;
el.style.fontVariant = document.body.style.fontVariant; el.style.fontVariant = document.body.style.fontVariant;
this.container.appendChild(el); if (this.container) this.container.appendChild(el);
el.addEventListener('change', (evt) => { el.addEventListener('change', (evt) => {
if (callback) callback(items[evt.target.selectedIndex]); if (callback && evt.target) callback(items[evt.target['selectedIndex']]);
}); });
return el; return el;
} }
@ -225,14 +234,16 @@ class Menu {
addRange(title, object, variable, min, max, step, callback) { addRange(title, object, variable, min, max, step, callback) {
const el = document.createElement('div'); const el = document.createElement('div');
el.className = 'menu-item'; el.className = 'menu-item';
el.innerHTML = `<input class="menu-range" type="range" id="${this.newID}" min="${min}" max="${max}" step="${step}" value="${object[variable]}">${title}`; el.innerHTML = `<input class="menu-range" type="range" title="${title}" id="${this.newID}" min="${min}" max="${max}" step="${step}" value="${object[variable]}">${title}`;
this.container.appendChild(el); if (this.container) this.container.appendChild(el);
el.addEventListener('change', (evt) => { el.addEventListener('change', (evt) => {
object[variable] = parseInt(evt.target.value) === parseFloat(evt.target.value) ? parseInt(evt.target.value) : parseFloat(evt.target.value); if (evt.target) {
evt.target.setAttribute('value', evt.target.value); object[variable] = parseInt(evt.target['value']) === parseFloat(evt.target['value']) ? parseInt(evt.target['value']) : parseFloat(evt.target['value']);
if (callback) callback(evt.target.value); evt.target.setAttribute('value', evt.target['value']);
if (callback) callback(evt.target['value']);
}
}); });
el.input = el.children[0]; el['input'] = el.children[0];
return el; return el;
} }
@ -241,7 +252,7 @@ class Menu {
el.className = 'menu-item'; el.className = 'menu-item';
el.id = this.newID; el.id = this.newID;
if (html) el.innerHTML = html; if (html) el.innerHTML = html;
this.container.appendChild(el); if (this.container) this.container.appendChild(el);
return el; return el;
} }
@ -254,7 +265,7 @@ class Menu {
el.type = 'button'; el.type = 'button';
el.id = this.newID; el.id = this.newID;
el.innerText = titleOn; el.innerText = titleOn;
this.container.appendChild(el); if (this.container) this.container.appendChild(el);
el.addEventListener('click', () => { el.addEventListener('click', () => {
if (el.innerText === titleOn) el.innerText = titleOff; if (el.innerText === titleOn) el.innerText = titleOff;
else el.innerText = titleOn; else el.innerText = titleOn;
@ -268,11 +279,10 @@ class Menu {
el.className = 'menu-item'; el.className = 'menu-item';
el.id = `menu-val-${title}`; el.id = `menu-val-${title}`;
el.innerText = `${title}: ${val}${suffix}`; el.innerText = `${title}: ${val}${suffix}`;
this.container.appendChild(el); if (this.container) this.container.appendChild(el);
return el; return el;
} }
// eslint-disable-next-line class-methods-use-this
updateValue(title, val, suffix = '') { updateValue(title, val, suffix = '') {
const el = document.getElementById(`menu-val-${title}`); const el = document.getElementById(`menu-val-${title}`);
if (el) el.innerText = `${title}: ${val}${suffix}`; if (el) el.innerText = `${title}: ${val}${suffix}`;
@ -285,16 +295,17 @@ class Menu {
el.className = 'menu-item menu-chart-title'; el.className = 'menu-item menu-chart-title';
el.id = this.newID; el.id = this.newID;
el.innerHTML = `<font color=${theme.chartColor}>${title}</font><canvas id="menu-canvas-${id}" class="menu-chart-canvas" width="${width}px" height="${height}px"></canvas>`; el.innerHTML = `<font color=${theme.chartColor}>${title}</font><canvas id="menu-canvas-${id}" class="menu-chart-canvas" width="${width}px" height="${height}px"></canvas>`;
this.container.appendChild(el); if (this.container) this.container.appendChild(el);
return el; return el;
} }
// eslint-disable-next-line class-methods-use-this
async updateChart(id, values) { async updateChart(id, values) {
if (!values || (values.length === 0)) return; if (!values || (values.length === 0)) return;
/** @type {HTMLCanvasElement} */
const canvas = document.getElementById(`menu-canvas-${id}`); const canvas = document.getElementById(`menu-canvas-${id}`);
if (!canvas) return; if (!canvas) return;
const ctx = canvas.getContext('2d'); const ctx = canvas.getContext('2d');
if (!ctx) return;
ctx.fillStyle = theme.background; ctx.fillStyle = theme.background;
ctx.fillRect(0, 0, canvas.width, canvas.height); ctx.fillRect(0, 0, canvas.width, canvas.height);
const width = canvas.width / values.length; const width = canvas.width / values.length;
@ -308,7 +319,7 @@ class Menu {
ctx.fillRect(i * width, 0, width - 4, canvas.height); ctx.fillRect(i * width, 0, width - 4, canvas.height);
ctx.fillStyle = theme.background; ctx.fillStyle = theme.background;
ctx.font = `${width / 1.5}px "Segoe UI"`; ctx.font = `${width / 1.5}px "Segoe UI"`;
ctx.fillText(Math.round(values[i]), i * width + 1, canvas.height - 1, width - 1); ctx.fillText(Math.round(values[i]).toString(), i * width + 1, canvas.height - 1, width - 1);
} }
} }
} }

85
demo/helpers/webrtc.js Normal file
View File

@ -0,0 +1,85 @@
const debug = true;
async function log(...msg) {
if (debug) {
const dt = new Date();
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
console.log(ts, 'webrtc', ...msg); // eslint-disable-line no-console
}
}
/**
* helper implementation of webrtc
* performs:
* - discovery
* - handshake
* - connct to webrtc stream
* - assign webrtc stream to video element
*
* for development purposes i'm using test webrtc server that reads rtsp stream from a security camera:
* <https://github.com/vladmandic/stream-rtsp>
*
* @param {string} server
* @param {string} streamName
* @param {HTMLVideoElement} elementName
* @return {promise}
*/
async function webRTC(server, streamName, elementName) {
const suuid = streamName;
log('client starting');
log(`server: ${server} stream: ${suuid}`);
const stream = new MediaStream();
const connection = new RTCPeerConnection();
connection.oniceconnectionstatechange = () => log('connection', connection.iceConnectionState);
connection.onnegotiationneeded = async () => {
let offer;
if (connection.localDescription) {
offer = await connection.createOffer();
await connection.setLocalDescription(offer);
const res = await fetch(`${server}/stream/receiver/${suuid}`, {
method: 'POST',
headers: { 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8' },
body: new URLSearchParams({
suuid: `${suuid}`,
data: `${btoa(connection.localDescription.sdp || '')}`,
}),
});
}
const data = res && res.ok ? await res.text() : '';
if (data.length === 0 || !offer) {
log('cannot connect:', server);
} else {
connection.setRemoteDescription(new RTCSessionDescription({
type: 'answer',
sdp: atob(data),
}));
log('negotiation start:', offer);
}
};
connection.ontrack = (event) => {
stream.addTrack(event.track);
const video = (typeof elementName === 'string') ? document.getElementById(elementName) : elementName;
if (video instanceof HTMLVideoElement) video.srcObject = stream;
else log('element is not a video element:', elementName);
// video.onloadeddata = async () => log('resolution:', video.videoWidth, video.videoHeight);
log('received track:', event.track);
};
const res = await fetch(`${server}/stream/codec/${suuid}`);
const streams = res && res.ok ? await res.json() : [];
if (streams.length === 0) log('received no streams');
else log('received streams:', streams);
for (const s of streams) connection.addTransceiver(s.Type, { direction: 'sendrecv' });
const channel = connection.createDataChannel(suuid, { maxRetransmits: 10 });
channel.onmessage = (e) => log('channel message:', channel.label, 'payload', e.data);
channel.onerror = (e) => log('channel error:', channel.label, 'payload', e);
// channel.onbufferedamountlow = (e) => log('channel buffering:', channel.label, 'payload', e);
channel.onclose = () => log('channel close', channel.label);
channel.onopen = () => {
log('channel open', channel.label);
setInterval(() => channel.send('ping'), 1000); // send ping becouse PION doesn't handle RTCSessionDescription.close()
};
}
export default webRTC;

17
demo/icons.css Normal file

File diff suppressed because one or more lines are too long

133
demo/index-pwa.js Normal file
View File

@ -0,0 +1,133 @@
/**
* PWA Service Worker for Human main demo
*/
/* eslint-disable no-restricted-globals */
/// <reference lib="webworker" />
const skipCaching = false;
const cacheName = 'Human';
const cacheFiles = ['/favicon.ico', 'manifest.webmanifest']; // assets and models are cached on first access
let cacheModels = true; // *.bin; *.json
let cacheWASM = true; // *.wasm
let cacheOther = false; // *
let listening = false;
const stats = { hit: 0, miss: 0 };
const log = (...msg) => {
const dt = new Date();
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
console.log(ts, 'pwa', ...msg); // eslint-disable-line no-console
};
async function updateCached(req) {
fetch(req)
.then((update) => {
// update cache if request is ok
if (update.ok) {
caches
.open(cacheName)
.then((cache) => cache.put(req, update))
.catch((err) => log('cache update error', err)); // eslint-disable-line promise/no-nesting
}
return true;
})
.catch((err) => {
log('fetch error', err);
return false;
});
}
async function getCached(evt) {
// just fetch
if (skipCaching) return fetch(evt.request);
// get from cache or fetch if not in cache
let found = await caches.match(evt.request);
if (found && found.ok) {
stats.hit += 1;
} else {
stats.miss += 1;
found = await fetch(evt.request);
}
// if still don't have it, return offline page
if (!found || !found.ok) {
found = await caches.match('offline.html');
}
// update cache in the background
if (found && found.type === 'basic' && found.ok) {
const uri = new URL(evt.request.url);
if (uri.pathname.endsWith('.bin') || uri.pathname.endsWith('.json')) {
if (cacheModels) updateCached(evt.request);
} else if (uri.pathname.endsWith('.wasm')) {
if (cacheWASM) updateCached(evt.request);
} else if (cacheOther) {
updateCached(evt.request);
}
}
return found;
}
function cacheInit() {
caches.open(cacheName)
.then((cache) => cache.addAll(cacheFiles)
.then( // eslint-disable-line promise/no-nesting
() => log('cache refresh:', cacheFiles.length, 'files'),
(err) => log('cache error', err),
))
.catch(() => log('cache error'));
}
if (!listening) {
// get messages from main app to update configuration
self.addEventListener('message', (evt) => {
log('event message:', evt.data);
switch (evt.data.key) {
case 'cacheModels': cacheModels = evt.data.val; break;
case 'cacheWASM': cacheWASM = evt.data.val; break;
case 'cacheOther': cacheOther = evt.data.val; break;
default:
}
});
self.addEventListener('install', (evt) => {
log('install');
self.skipWaiting();
evt.waitUntil(cacheInit);
});
self.addEventListener('activate', (evt) => {
log('activate');
evt.waitUntil(self.clients.claim());
});
self.addEventListener('fetch', (evt) => {
const uri = new URL(evt.request.url);
// if (uri.pathname === '/') { log('cache skip /', evt.request); return; } // skip root access requests
if (evt.request.cache === 'only-if-cached' && evt.request.mode !== 'same-origin') return; // required due to chrome bug
if (uri.origin !== self.location.origin) return; // skip non-local requests
if (evt.request.method !== 'GET') return; // only cache get requests
if (evt.request.url.includes('/api/')) return; // don't cache api requests, failures are handled at the time of call
const response = getCached(evt);
if (response) evt.respondWith(response);
else log('fetch response missing');
});
// only trigger controllerchange once
let refreshed = false;
self.addEventListener('controllerchange', (evt) => {
log(`PWA: ${evt.type}`);
if (refreshed) return;
refreshed = true;
self.location.reload();
});
listening = true;
}

37
demo/index-worker.js Normal file
View File

@ -0,0 +1,37 @@
/**
* Web worker used by main demo app
* Loaded from index.js
*/
/// <reference lib="webworker"/>
// load Human using IIFE script as Chome Mobile does not support Modules as Workers
self.importScripts('../dist/human.js'); // eslint-disable-line no-restricted-globals
let busy = false;
// eslint-disable-next-line new-cap, no-undef
const human = new Human.default();
onmessage = async (msg) => { // receive message from main thread
if (busy) return;
busy = true;
// received from index.js using:
// worker.postMessage({ image: image.data.buffer, width: canvas.width, height: canvas.height, config }, [image.data.buffer]);
const image = new ImageData(new Uint8ClampedArray(msg.data.image), msg.data.width, msg.data.height);
let result = {};
result = await human.detect(image, msg.data.userConfig);
result.tensors = human.tf.engine().state.numTensors; // append to result object so main thread get info
result.backend = human.tf.getBackend(); // append to result object so main thread get info
if (result.canvas) { // convert canvas to imageData and send it by reference
const canvas = new OffscreenCanvas(result.canvas.width, result.canvas.height);
const ctx = canvas.getContext('2d');
if (ctx) ctx.drawImage(result.canvas, 0, 0);
const img = ctx ? ctx.getImageData(0, 0, result.canvas.width, result.canvas.height) : null;
result.canvas = null; // must strip original canvas from return value as it cannot be transfered from worker thread
if (img) postMessage({ result, image: img.data.buffer, width: msg.data.width, height: msg.data.height }, [img.data.buffer]);
else postMessage({ result }); // send message back to main thread with canvas
} else {
postMessage({ result }); // send message back to main thread without canvas
}
busy = false;
};

View File

@ -1,45 +1,42 @@
<!DOCTYPE html> <!DOCTYPE html>
<html lang="en"> <html lang="en">
<head> <head>
<title>Human</title> <meta charset="utf-8">
<meta charset="utf-8"> <title>Human</title>
<meta http-equiv="content-type"> <meta name="viewport" content="width=device-width" id="viewport">
<meta content="text/html"> <meta name="keywords" content="Human">
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>"> <meta name="application-name" content="Human">
<meta name="viewport" content="width=device-width, initial-scale=0.6, minimum-scale=0.3, maximum-scale=3.0, shrink-to-fit=yes, user-scalable=yes"> <meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="theme-color" content="#000000"/> <meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="application-name" content="Human"> <meta name="theme-color" content="#000000">
<meta name="msapplication-tooltip" content="Human: AI-powered 3D Human Detection"> <link rel="manifest" href="./manifest.webmanifest">
<link rel="manifest" href="./manifest.webmanifest"> <link rel="shortcut icon" href="../favicon.ico" type="image/x-icon">
<link rel="shortcut icon" href="../favicon.ico" type="image/x-icon"> <link rel="apple-touch-icon" href="../assets/icon.png">
<link rel="apple-touch-icon" href="../assets/icon.png"> <link rel="stylesheet" type="text/css" href="./icons.css">
<!-- load compiled demo js --> <script src="./index.js" type="module"></script>
<script src="../dist/demo-browser-index.js"></script>
<!-- alternatively load demo sources directly -->
<!-- <script src="./browser.js" type="module"></script> -->
<style> <style>
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../assets/lato-light.woff2') } @font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../assets/lato-light.woff2') }
@font-face { font-family: 'FA'; font-display: swap; font-style: normal; font-weight: 900; src: local('FA'), url('../assets/fa-solid-900.woff2'); }
html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; } html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
body { margin: 0; background: black; color: white; overflow-x: hidden; scrollbar-width: none; } body { margin: 0; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
body::-webkit-scrollbar { display: none; } body::-webkit-scrollbar { display: none; }
hr { width: 100%; } hr { width: 100%; }
.play { position: absolute; width: 250px; height: 250px; z-index: 9; top: 55%; left: 50%; margin-left: -125px; display: none; } .play { position: absolute; width: 256px; height: 256px; z-index: 9; bottom: 15%; left: 50%; margin-left: -125px; display: none; filter: grayscale(1); }
.play:hover { filter: grayscale(0); }
.btn-background { fill:grey; cursor: pointer; opacity: 0.6; } .btn-background { fill:grey; cursor: pointer; opacity: 0.6; }
.btn-background:hover { opacity: 1; } .btn-background:hover { opacity: 1; }
.btn-foreground { fill:white; cursor: pointer; opacity: 0.8; } .btn-foreground { fill:white; cursor: pointer; opacity: 0.8; }
.btn-foreground:hover { opacity: 1; } .btn-foreground:hover { opacity: 1; }
.status { position: absolute; width: 100vw; bottom: 15%; text-align: center; font-size: 4rem; font-weight: 100; text-shadow: 2px 2px darkslategrey; } .status { position: absolute; width: 100vw; bottom: 10%; text-align: center; font-size: 3rem; font-weight: 100; text-shadow: 2px 2px #303030; }
.thumbnail { margin: 8px; box-shadow: 0 0 4px 4px dimgrey; } .thumbnail { margin: 8px; box-shadow: 0 0 4px 4px dimgrey; }
.thumbnail:hover { box-shadow: 0 0 8px 8px dimgrey; filter: grayscale(1); } .thumbnail:hover { box-shadow: 0 0 8px 8px dimgrey; filter: grayscale(1); }
.log { position: absolute; bottom: 0; margin: 0.4rem; font-size: 0.9rem; } .log { position: absolute; bottom: 0; margin: 0.4rem 0.4rem 0 0.4rem; font-size: 0.9rem; }
.menubar { width: 100vw; background: darkslategray; display: flex; justify-content: space-evenly; text-align: center; padding: 8px; cursor: pointer; } .menubar { width: 100vw; background: #303030; display: flex; justify-content: space-evenly; text-align: center; padding: 8px; cursor: pointer; }
.samples-container { display: flex; flex-wrap: wrap; } .samples-container { display: flex; flex-wrap: wrap; }
.video { display: none; } .video { display: none; }
.canvas { margin: 0 auto; } .canvas { margin: 0 auto; }
.bench { position: absolute; right: 0; bottom: 0; } .bench { position: absolute; right: 0; bottom: 0; }
.compare-image { width: 200px; position: absolute; top: 150px; left: 30px; box-shadow: 0 0 2px 2px black; background: black; } .compare-image { width: 200px; position: absolute; top: 150px; left: 30px; box-shadow: 0 0 2px 2px black; background: black; display: none; }
.loader { width: 300px; height: 300px; border: 3px solid transparent; border-radius: 50%; border-top: 4px solid #f15e41; animation: spin 4s linear infinite; position: absolute; bottom: 25%; left: 50%; margin-left: -150px; z-index: 15; } .loader { width: 300px; height: 300px; border: 3px solid transparent; border-radius: 50%; border-top: 4px solid #f15e41; animation: spin 4s linear infinite; position: absolute; bottom: 15%; left: 50%; margin-left: -150px; z-index: 15; }
.loader::before, .loader::after { content: ""; position: absolute; top: 6px; bottom: 6px; left: 6px; right: 6px; border-radius: 50%; border: 4px solid transparent; } .loader::before, .loader::after { content: ""; position: absolute; top: 6px; bottom: 6px; left: 6px; right: 6px; border-radius: 50%; border: 4px solid transparent; }
.loader::before { border-top-color: #bad375; animation: 3s spin linear infinite; } .loader::before { border-top-color: #bad375; animation: 3s spin linear infinite; }
.loader::after { border-top-color: #26a9e0; animation: spin 1.5s linear infinite; } .loader::after { border-top-color: #26a9e0; animation: spin 1.5s linear infinite; }
@ -64,38 +61,58 @@
.button-model::before { content: "\f2c2"; } .button-model::before { content: "\f2c2"; }
.button-start::before { content: "\f144"; } .button-start::before { content: "\f144"; }
.button-stop::before { content: "\f28b"; } .button-stop::before { content: "\f28b"; }
.icon { width: 180px; text-align: -webkit-center; text-align: -moz-center; filter: grayscale(1); }
.icon:hover { background: #505050; filter: grayscale(0); }
.hint { opacity: 0; transition-duration: 0.5s; transition-property: opacity; font-style: italic; position: fixed; top: 5rem; padding: 8px; margin: 8px; box-shadow: 0 0 2px 2px #303030; }
.input-file { align-self: center; width: 5rem; }
.results { position: absolute; left: 0; top: 5rem; background: #303030; width: 20rem; height: 90%; font-size: 0.8rem; overflow-y: auto; display: none }
.results::-webkit-scrollbar { background-color: #303030; }
.results::-webkit-scrollbar-thumb { background: black; border-radius: 10px; }
.json-line { margin: 4px 0; display: flex; justify-content: flex-start; }
.json { margin-right: 8px; margin-left: 8px; }
.json-type { color: lightyellow; }
.json-key { color: white; }
.json-index { color: lightcoral; }
.json-value { margin-left: 20px; }
.json-number { color: lightgreen; }
.json-boolean { color: lightyellow; }
.json-string { color: lightblue; }
.json-size { color: gray; }
.hide { display: none; }
.fas { display: inline-block; width: 0; height: 0; border-style: solid; }
.fa-caret-down { border-width: 10px 8px 0 8px; border-color: white transparent }
.fa-caret-right { border-width: 10px 0 8px 10px; border-color: transparent transparent transparent white; }
</style> </style>
</head> </head>
<body> <body>
<div id="play" class="play"> <div id="play" class="play icon-play"></div>
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512">
<path d="M256 8C119 8 8 119 8 256s111 248 248 248 248-111 248-248S393 8 256 8zm115.7 272l-176 101c-15.8 8.8-35.7-2.5-35.7-21V152c0-18.4 19.8-29.8 35.7-21l176 107c16.4 9.2 16.4 32.9 0 42z" class="btn-background"/>
<path d="M371.7 280l-176 101c-15.8 8.8-35.7-2.5-35.7-21V152c0-18.4 19.8-29.8 35.7-21l176 107c16.4 9.2 16.4 32.9 0 42z" class="btn-foreground"/>
</svg>
</div>
<div id="background"> <div id="background">
<div class='wave one'></div> <div class="wave one"></div>
<div class='wave two'></div> <div class="wave two"></div>
<div class='wave three'></div> <div class="wave three"></div>
</div> </div>
<div id="loader" class="loader"></div> <div id="loader" class="loader"></div>
<div id="status" class="status"></div> <div id="status" class="status"></div>
<div id="menubar" class="menubar"> <div id="menubar" class="menubar">
<span class="button button-display" id="btnDisplay">Display<br>Options</span> <div id="btnDisplay" class="icon"><div class="icon-binoculars"> </div>display</div>
<span class="button button-image" id="btnImage">Image<br>Processing</span> <div id="btnImage" class="icon"><div class="icon-brush"></div>input</div>
<span class="button button-process" id="btnProcess">Model<br>Processing</span> <div id="btnProcess" class="icon"><div class="icon-stats"></div>options</div>
<span class="button button-model" id="btnModel">Model<br>Selection</span> <div id="btnModel" class="icon"><div class="icon-games"></div>models</div>
<span class="button button-start" id="btnStart">Start<br>Video</span> <div id="btnStart" class="icon"><div class="icon-webcam"></div><span id="btnStartText">start video</span></div>
</div> </div>
<div id="media"> <div id="media">
<canvas id="canvas" class="canvas"></canvas> <canvas id="canvas" class="canvas"></canvas>
<video id="video" playsinline class="video"></video> <video id="video" playsinline class="video"></video>
</div> </div>
<div id="compare-container" style="display: none" class="compare-image"> <div id="compare-container" class="compare-image">
<canvas id="compare-canvas" width="200px" height="200px"></canvas> <canvas id="compare-canvas" width="200" height="200"></canvas>
<div id="simmilarity"></div> <div id="similarity"></div>
</div> </div>
<div id="samples-container" class="samples-container"></div> <div id="samples-container" class="samples-container"></div>
<div id="hint" class="hint"></div>
<div id="log" class="log"></div> <div id="log" class="log"></div>
<div id="results" class="results"></div>
</body> </body>
</html> </html>

1025
demo/index.js Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,12 +0,0 @@
{
"name": "Human Library",
"short_name": "Human",
"icons": [
{ "src": "../assets/icon.png", "sizes": "853x853", "type": "image/png", "purpose": "any maskable" }
],
"start_url": "./index.html",
"scope": "/",
"display": "standalone",
"background_color": "#000000",
"theme_color": "#000000"
}

View File

@ -1,12 +1,10 @@
{ {
"name": "Human Library", "name": "Human Library",
"short_name": "Human", "short_name": "Human",
"icons": [ "icons": [{ "src": "../assets/icon.png", "sizes": "512x512", "type": "image/png", "purpose": "any maskable" }],
{ "src": "../assets/icon.png", "sizes": "853x853", "type": "image/png", "purpose": "any maskable" }
],
"start_url": "./index.html", "start_url": "./index.html",
"scope": "/", "scope": "/",
"display": "standalone", "display": "standalone",
"background_color": "#000000", "background_color": "#000000",
"theme_color": "#000000" "theme_color": "#000000"
} }

View File

@ -0,0 +1,71 @@
# Human Multithreading Demos
- **Browser** demo `multithread` & `worker`
Runs each `human` module in a separate web worker for highest possible performance
- **NodeJS** demo `node-multiprocess` & `node-multiprocess-worker`
Runs multiple parallel `human` by dispaching them to pool of pre-created worker processes
<br><hr><br>
## NodeJS Multi-process Demo
`nodejs/node-multiprocess.js` and `nodejs/node-multiprocess-worker.js`: Demo using NodeJS with CommonJS module
Demo that starts n child worker processes for parallel execution
```shell
node demo/nodejs/node-multiprocess.js
```
<!-- eslint-skip -->
```json
2021-06-01 08:54:19 INFO: @vladmandic/human version 2.0.0
2021-06-01 08:54:19 INFO: User: vlado Platform: linux Arch: x64 Node: v16.0.0
2021-06-01 08:54:19 INFO: Human multi-process test
2021-06-01 08:54:19 STATE: Enumerated images: ./assets 15
2021-06-01 08:54:19 STATE: Main: started worker: 130362
2021-06-01 08:54:19 STATE: Main: started worker: 130363
2021-06-01 08:54:19 STATE: Main: started worker: 130369
2021-06-01 08:54:19 STATE: Main: started worker: 130370
2021-06-01 08:54:20 STATE: Worker: PID: 130370 TensorFlow/JS 3.6.0 Human 2.0.0 Backend: tensorflow
2021-06-01 08:54:20 STATE: Worker: PID: 130362 TensorFlow/JS 3.6.0 Human 2.0.0 Backend: tensorflow
2021-06-01 08:54:20 STATE: Worker: PID: 130369 TensorFlow/JS 3.6.0 Human 2.0.0 Backend: tensorflow
2021-06-01 08:54:20 STATE: Worker: PID: 130363 TensorFlow/JS 3.6.0 Human 2.0.0 Backend: tensorflow
2021-06-01 08:54:21 STATE: Main: dispatching to worker: 130370
2021-06-01 08:54:21 INFO: Latency: worker initializtion: 1348 message round trip: 0
2021-06-01 08:54:21 DATA: Worker received message: 130370 { test: true }
2021-06-01 08:54:21 STATE: Main: dispatching to worker: 130362
2021-06-01 08:54:21 DATA: Worker received message: 130362 { image: 'samples/ai-face.jpg' }
2021-06-01 08:54:21 DATA: Worker received message: 130370 { image: 'samples/ai-body.jpg' }
2021-06-01 08:54:21 STATE: Main: dispatching to worker: 130369
2021-06-01 08:54:21 STATE: Main: dispatching to worker: 130363
2021-06-01 08:54:21 DATA: Worker received message: 130369 { image: 'assets/human-sample-upper.jpg' }
2021-06-01 08:54:21 DATA: Worker received message: 130363 { image: 'assets/sample-me.jpg' }
2021-06-01 08:54:24 DATA: Main: worker finished: 130362 detected faces: 1 bodies: 1 hands: 0 objects: 1
2021-06-01 08:54:24 STATE: Main: dispatching to worker: 130362
2021-06-01 08:54:24 DATA: Worker received message: 130362 { image: 'assets/sample1.jpg' }
2021-06-01 08:54:25 DATA: Main: worker finished: 130369 detected faces: 1 bodies: 1 hands: 0 objects: 1
2021-06-01 08:54:25 STATE: Main: dispatching to worker: 130369
2021-06-01 08:54:25 DATA: Main: worker finished: 130370 detected faces: 1 bodies: 1 hands: 0 objects: 1
2021-06-01 08:54:25 STATE: Main: dispatching to worker: 130370
2021-06-01 08:54:25 DATA: Worker received message: 130369 { image: 'assets/sample2.jpg' }
2021-06-01 08:54:25 DATA: Main: worker finished: 130363 detected faces: 1 bodies: 1 hands: 0 objects: 2
2021-06-01 08:54:25 STATE: Main: dispatching to worker: 130363
2021-06-01 08:54:25 DATA: Worker received message: 130370 { image: 'assets/sample3.jpg' }
2021-06-01 08:54:25 DATA: Worker received message: 130363 { image: 'assets/sample4.jpg' }
2021-06-01 08:54:30 DATA: Main: worker finished: 130362 detected faces: 3 bodies: 1 hands: 0 objects: 7
2021-06-01 08:54:30 STATE: Main: dispatching to worker: 130362
2021-06-01 08:54:30 DATA: Worker received message: 130362 { image: 'assets/sample5.jpg' }
2021-06-01 08:54:31 DATA: Main: worker finished: 130369 detected faces: 3 bodies: 1 hands: 0 objects: 5
2021-06-01 08:54:31 STATE: Main: dispatching to worker: 130369
2021-06-01 08:54:31 DATA: Worker received message: 130369 { image: 'assets/sample6.jpg' }
2021-06-01 08:54:31 DATA: Main: worker finished: 130363 detected faces: 4 bodies: 1 hands: 2 objects: 2
2021-06-01 08:54:31 STATE: Main: dispatching to worker: 130363
2021-06-01 08:54:39 STATE: Main: worker exit: 130370 0
2021-06-01 08:54:39 DATA: Main: worker finished: 130362 detected faces: 1 bodies: 1 hands: 0 objects: 1
2021-06-01 08:54:39 DATA: Main: worker finished: 130369 detected faces: 1 bodies: 1 hands: 1 objects: 3
2021-06-01 08:54:39 STATE: Main: worker exit: 130362 0
2021-06-01 08:54:39 STATE: Main: worker exit: 130369 0
2021-06-01 08:54:41 DATA: Main: worker finished: 130363 detected faces: 9 bodies: 1 hands: 0 objects: 10
2021-06-01 08:54:41 STATE: Main: worker exit: 130363 0
2021-06-01 08:54:41 INFO: Processed: 15 images in total: 22006 ms working: 20658 ms average: 1377 ms
```

View File

@ -0,0 +1,33 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Human</title>
<meta name="viewport" content="width=device-width" id="viewport">
<meta name="keywords" content="Human">
<meta name="application-name" content="Human">
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="theme-color" content="#000000">
<link rel="manifest" href="../../manifest.webmanifest">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../../assets/icon.png">
<script src="../multithread/index.js" type="module"></script>
<style>
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
body { margin: 0; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
body::-webkit-scrollbar { display: none; }
.status { position: absolute; width: 100vw; bottom: 10%; text-align: center; font-size: 3rem; font-weight: 100; text-shadow: 2px 2px #303030; }
.log { position: absolute; bottom: 0; margin: 0.4rem 0.4rem 0 0.4rem; font-size: 0.9rem; }
.video { display: none; }
.canvas { margin: 0 auto; }
</style>
</head>
<body>
<div id="status" class="status"></div>
<canvas id="canvas" class="canvas"></canvas>
<video id="video" playsinline class="video"></video>
<div id="log" class="log"></div>
</body>
</html>

264
demo/multithread/index.js Normal file
View File

@ -0,0 +1,264 @@
/**
* Human demo for browsers
*
* @description Demo app that enables all Human modules and runs them in separate worker threads
*
*/
import { Human } from '../../dist/human.esm.js'; // equivalent of @vladmandic/human
import GLBench from '../helpers/gl-bench.js';
const workerJS = '../multithread/worker.js';
const config = {
main: { // processes input and runs gesture analysis
warmup: 'none',
backend: 'webgl',
modelBasePath: '../../models/',
async: false,
filter: { enabled: true },
face: { enabled: false },
object: { enabled: false },
gesture: { enabled: true },
hand: { enabled: false },
body: { enabled: false },
segmentation: { enabled: false },
},
face: { // runs all face models
warmup: 'none',
backend: 'webgl',
modelBasePath: '../../models/',
async: false,
filter: { enabled: false },
face: { enabled: true },
object: { enabled: false },
gesture: { enabled: false },
hand: { enabled: false },
body: { enabled: false },
segmentation: { enabled: false },
},
body: { // runs body model
warmup: 'none',
backend: 'webgl',
modelBasePath: '../../models/',
async: false,
filter: { enabled: false },
face: { enabled: false },
object: { enabled: false },
gesture: { enabled: false },
hand: { enabled: false },
body: { enabled: true },
segmentation: { enabled: false },
},
hand: { // runs hands model
warmup: 'none',
backend: 'webgl',
modelBasePath: '../../models/',
async: false,
filter: { enabled: false },
face: { enabled: false },
object: { enabled: false },
gesture: { enabled: false },
hand: { enabled: true },
body: { enabled: false },
segmentation: { enabled: false },
},
object: { // runs object model
warmup: 'none',
backend: 'webgl',
modelBasePath: '../../models/',
async: false,
filter: { enabled: false },
face: { enabled: false },
object: { enabled: true },
gesture: { enabled: false },
hand: { enabled: false },
body: { enabled: false },
segmentation: { enabled: false },
},
};
let human;
let canvas;
let video;
let bench;
const busy = {
face: false,
hand: false,
body: false,
object: false,
};
const workers = {
/** @type {Worker | null} */
face: null,
/** @type {Worker | null} */
body: null,
/** @type {Worker | null} */
hand: null,
/** @type {Worker | null} */
object: null,
};
const time = {
main: 0,
draw: 0,
face: '[warmup]',
body: '[warmup]',
hand: '[warmup]',
object: '[warmup]',
};
const start = {
main: 0,
draw: 0,
face: 0,
body: 0,
hand: 0,
object: 0,
};
const result = { // initialize empty result object which will be partially filled with results from each thread
performance: {},
hand: [],
body: [],
face: [],
object: [],
};
function log(...msg) {
const dt = new Date();
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
console.log(ts, ...msg); // eslint-disable-line no-console
}
async function drawResults() {
start.draw = human.now();
const interpolated = human.next(result);
await human.draw.all(canvas, interpolated);
time.draw = Math.round(1 + human.now() - start.draw);
const fps = Math.round(10 * 1000 / time.main) / 10;
const draw = Math.round(10 * 1000 / time.draw) / 10;
const div = document.getElementById('log');
if (div) div.innerText = `Human: version ${human.version} | Performance: Main ${time.main}ms Face: ${time.face}ms Body: ${time.body}ms Hand: ${time.hand}ms Object ${time.object}ms | FPS: ${fps} / ${draw}`;
requestAnimationFrame(drawResults);
}
async function receiveMessage(msg) {
result[msg.data.type] = msg.data.result;
busy[msg.data.type] = false;
time[msg.data.type] = Math.round(human.now() - start[msg.data.type]);
}
async function runDetection() {
start.main = human.now();
if (!bench) {
bench = new GLBench(null, { trackGPU: false, chartHz: 20, chartLen: 20 });
bench.begin('human');
}
const ctx = canvas.getContext('2d');
ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
if (!busy.face) {
busy.face = true;
start.face = human.now();
if (workers.face) workers.face.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.face, type: 'face' }, [imageData.data.buffer.slice(0)]);
}
if (!busy.body) {
busy.body = true;
start.body = human.now();
if (workers.body) workers.body.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.body, type: 'body' }, [imageData.data.buffer.slice(0)]);
}
if (!busy.hand) {
busy.hand = true;
start.hand = human.now();
if (workers.hand) workers.hand.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.hand, type: 'hand' }, [imageData.data.buffer.slice(0)]);
}
if (!busy.object) {
busy.object = true;
start.object = human.now();
if (workers.object) workers.object.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.object, type: 'object' }, [imageData.data.buffer.slice(0)]);
}
time.main = Math.round(human.now() - start.main);
bench.nextFrame();
requestAnimationFrame(runDetection);
}
async function setupCamera() {
video = document.getElementById('video');
canvas = document.getElementById('canvas');
const output = document.getElementById('log');
let stream;
const constraints = {
audio: false,
video: {
facingMode: 'user',
resizeMode: 'crop-and-scale',
width: { ideal: document.body.clientWidth },
aspectRatio: document.body.clientWidth / document.body.clientHeight,
},
};
// enumerate devices for diag purposes
navigator.mediaDevices.enumerateDevices()
.then((devices) => log('enumerated devices:', devices))
.catch(() => log('mediaDevices error'));
log('camera constraints', constraints);
try {
stream = await navigator.mediaDevices.getUserMedia(constraints);
} catch (err) {
if (output) output.innerText += `\n${err.name}: ${err.message}`;
log('camera error:', err);
}
if (stream) {
const tracks = stream.getVideoTracks();
log('enumerated viable tracks:', tracks);
const track = stream.getVideoTracks()[0];
const settings = track.getSettings();
log('selected video source:', track, settings);
} else {
log('missing video stream');
}
const promise = !stream || new Promise((resolve) => {
video.onloadeddata = () => {
canvas.style.height = '100vh';
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
video.play();
resolve(true);
};
});
// attach input to video element
if (stream && video) video.srcObject = stream;
return promise;
}
async function startWorkers() {
if (!workers.face) workers.face = new Worker(workerJS);
if (!workers.body) workers.body = new Worker(workerJS);
if (!workers.hand) workers.hand = new Worker(workerJS);
if (!workers.object) workers.object = new Worker(workerJS);
workers.face.onmessage = receiveMessage;
workers.body.onmessage = receiveMessage;
workers.hand.onmessage = receiveMessage;
workers.object.onmessage = receiveMessage;
}
async function main() {
if (typeof Worker === 'undefined' || typeof OffscreenCanvas === 'undefined') {
return;
}
human = new Human(config.main);
const div = document.getElementById('log');
if (div) div.innerText = `Human: version ${human.version}`;
await startWorkers();
await setupCamera();
runDetection();
drawResults();
}
window.onload = main;

View File

@ -0,0 +1,85 @@
/**
* Human demo for NodeJS
*
* Used by node-multiprocess.js as an on-demand started worker process
* Receives messages from parent process and sends results
*/
const fs = require('fs');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// workers actual import tfjs and human modules
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
const Human = require('../../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
let human = null;
const myConfig = {
// backend: 'tensorflow',
modelBasePath: 'file://models/',
debug: false,
async: true,
face: {
enabled: true,
detector: { enabled: true, rotation: false },
mesh: { enabled: true },
iris: { enabled: true },
description: { enabled: true },
emotion: { enabled: true },
},
hand: {
enabled: true,
},
// body: { modelPath: 'blazepose.json', enabled: true },
body: { enabled: true },
object: { enabled: true },
};
// read image from a file and create tensor to be used by human
// this way we don't need any monkey patches
// you can add any pre-proocessing here such as resizing, etc.
async function image(img) {
const buffer = fs.readFileSync(img);
const tensor = human.tf.tidy(() => human.tf.node.decodeImage(buffer).toFloat().expandDims());
return tensor;
}
// actual human detection
async function detect(img) {
const tensor = await image(img);
const result = await human.detect(tensor);
if (process.send) { // check if ipc exists
process.send({ image: img, detected: result }); // send results back to main
process.send({ ready: true }); // send signal back to main that this worker is now idle and ready for next image
}
tf.dispose(tensor);
}
async function main() {
process.on('unhandledRejection', (err) => {
// @ts-ignore // no idea if exception message is compelte
log.error(err?.message || err || 'no error message');
});
// on worker start first initialize message handler so we don't miss any messages
process.on('message', (msg) => {
// if main told worker to exit
if (msg.exit && process.exit) process.exit(); // eslint-disable-line no-process-exit
if (msg.test && process.send) process.send({ test: true });
if (msg.image) detect(msg.image); // if main told worker to process image
log.data('Worker received message:', process.pid, msg); // generic log
});
// create instance of human
human = new Human(myConfig);
// wait until tf is ready
await human.tf.ready();
// pre-load models
log.state('Worker: PID:', process.pid, `TensorFlow/JS ${human.tf.version['tfjs-core']} Human ${human.version} Backend: ${human.tf.getBackend()}`);
await human.load();
// now we're ready, so send message back to main that it knows it can use this worker
if (process.send) process.send({ ready: true });
}
main();

View File

@ -0,0 +1,97 @@
/**
* Human demo for NodeJS
*
* Uses NodeJS fork functionality with inter-processing-messaging
* Starts a pool of worker processes and dispatch work items to each worker when they are available
* Uses node-multiprocess-worker.js for actual processing
*/
const fs = require('fs');
const path = require('path');
const childProcess = require('child_process'); // eslint-disable-line camelcase
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// note that main process does not import human or tfjs at all, it's all done from worker process
const workerFile = 'demo/multithread/node-multiprocess-worker.js';
const imgPathRoot = './samples/in'; // modify to include your sample images
const numWorkers = 4; // how many workers will be started
const workers = []; // this holds worker processes
const images = []; // this holds queue of enumerated images
const t = []; // timers
let numImages;
// trigered by main when worker sends ready message
// if image pool is empty, signal worker to exit otherwise dispatch image to worker and remove image from queue
async function submitDetect(worker) {
if (!t[2]) t[2] = process.hrtime.bigint(); // first time do a timestamp so we can measure initial latency
if (images.length === numImages) worker.send({ test: true }); // for first image in queue just measure latency
if (images.length === 0) worker.send({ exit: true }); // nothing left in queue
else {
log.state('Main: dispatching to worker:', worker.pid);
worker.send({ image: images[0] });
images.shift();
}
}
// loop that waits for all workers to complete
function waitCompletion() {
const activeWorkers = workers.reduce((any, worker) => (any += worker.connected ? 1 : 0), 0);
if (activeWorkers > 0) setImmediate(() => waitCompletion());
else {
t[1] = process.hrtime.bigint();
log.info('Processed:', numImages, 'images in', 'total:', Math.trunc(Number(t[1] - t[0]) / 1000000), 'ms', 'working:', Math.trunc(Number(t[1] - t[2]) / 1000000), 'ms', 'average:', Math.trunc(Number(t[1] - t[2]) / numImages / 1000000), 'ms');
}
}
function measureLatency() {
t[3] = process.hrtime.bigint();
const latencyInitialization = Math.trunc(Number(t[2] - t[0]) / 1000 / 1000);
const latencyRoundTrip = Math.trunc(Number(t[3] - t[2]) / 1000 / 1000);
log.info('Latency: worker initializtion: ', latencyInitialization, 'message round trip:', latencyRoundTrip);
}
async function main() {
process.on('unhandledRejection', (err) => {
// @ts-ignore // no idea if exception message is compelte
log.error(err?.message || err || 'no error message');
});
log.header();
log.info('Human multi-process test');
// enumerate all images into queue
const dir = fs.readdirSync(imgPathRoot);
for (const imgFile of dir) {
if (imgFile.toLocaleLowerCase().endsWith('.jpg')) images.push(path.join(imgPathRoot, imgFile));
}
numImages = images.length;
log.state('Enumerated images:', imgPathRoot, numImages);
t[0] = process.hrtime.bigint();
t[1] = process.hrtime.bigint();
t[2] = process.hrtime.bigint();
// manage worker processes
for (let i = 0; i < numWorkers; i++) {
// create worker process
workers[i] = await childProcess.fork(workerFile, ['special']);
// parse message that worker process sends back to main
// if message is ready, dispatch next image in queue
// if message is processing result, just print how many faces were detected
// otherwise it's an unknown message
workers[i].on('message', (msg) => {
if (msg.ready) submitDetect(workers[i]);
else if (msg.image) log.data('Main: worker finished:', workers[i].pid, 'detected faces:', msg.detected.face?.length, 'bodies:', msg.detected.body?.length, 'hands:', msg.detected.hand?.length, 'objects:', msg.detected.object?.length);
else if (msg.test) measureLatency();
else log.data('Main: worker message:', workers[i].pid, msg);
});
// just log when worker exits
workers[i].on('exit', (msg) => log.state('Main: worker exit:', workers[i].pid, msg));
// just log which worker was started
log.state('Main: started worker:', workers[i].pid);
}
// wait for all workers to complete
waitCompletion();
}
main();

View File

@ -0,0 +1,18 @@
/// <reference lib="webworker" />
// load Human using IIFE script as Chome Mobile does not support Modules as Workers
self.importScripts('../../dist/human.js'); // eslint-disable-line no-restricted-globals
let human;
onmessage = async (msg) => {
// received from index.js using:
// worker.postMessage({ image: image.data.buffer, width: canvas.width, height: canvas.height, config }, [image.data.buffer]);
// Human is registered as global namespace using IIFE script
if (!human) human = new Human.default(msg.data.config); // eslint-disable-line no-undef, new-cap
const image = new ImageData(new Uint8ClampedArray(msg.data.image), msg.data.width, msg.data.height);
let result = {};
result = await human.detect(image, msg.data.config);
postMessage({ result: result[msg.data.type], type: msg.data.type });
};

View File

@ -1,101 +0,0 @@
const log = require('@vladmandic/pilogger');
const fs = require('fs');
const process = require('process');
// for Node, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human
const tf = require('@tensorflow/tfjs-node'); // or const tf = require('@tensorflow/tfjs-node-gpu');
// load specific version of Human library that matches TensorFlow mode
const Human = require('../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
let human = null;
const myConfig = {
backend: 'tensorflow',
console: true,
videoOptimized: false,
async: false,
face: {
enabled: true,
detector: { modelPath: 'file://models/faceboxes.json', enabled: true, minConfidence: 0.5 },
// detector: { modelPath: 'file://models/blazeface-back.json', enabled: false }, // cannot use blazeface in nodejs due to missing required kernel function in tfjs-node
mesh: { modelPath: 'file://models/facemesh.json', enabled: false }, // depends on blazeface detector
iris: { modelPath: 'file://models/iris.json', enabled: true },
age: { modelPath: 'file://models/age-ssrnet-imdb.json', enabled: true },
gender: { modelPath: 'file://models/gender.json', enabled: true },
emotion: { modelPath: 'file://models/emotion.json', enabled: true },
},
// body: { modelPath: 'file://models/blazepose.json', modelType: 'blazepose', inputSize: 256, enabled: true },
body: { modelPath: 'file://models/posenet.json', modelType: 'posenet', inputSize: 257, enabled: true },
hand: {
enabled: true,
detector: { modelPath: 'file://models/handdetect.json' },
skeleton: { modelPath: 'file://models/handskeleton.json' },
},
};
async function init() {
// wait until tf is ready
await tf.ready();
// create instance of human
human = new Human(myConfig);
// pre-load models
log.info('Human:', human.version);
log.info('Active Configuration', human.config);
await human.load();
const loaded = Object.keys(human.models).filter((a) => human.models[a]);
log.info('Loaded:', loaded);
log.info('Memory state:', human.tf.engine().memory());
}
async function detect(input) {
// read input image file and create tensor to be used for processing
const buffer = fs.readFileSync(input);
const decoded = human.tf.node.decodeImage(buffer);
const casted = decoded.toFloat();
const image = casted.expandDims(0);
decoded.dispose();
casted.dispose();
// image shape contains image dimensions and depth
log.state('Processing:', image.shape);
// run actual detection
const result = await human.detect(image, myConfig);
// dispose image tensor as we no longer need it
image.dispose();
// print data to console
log.data('Face: ', result.face);
log.data('Body:', result.body);
log.data('Hand:', result.hand);
log.data('Gesture:', result.gesture);
}
async function test() {
// test with embedded full body image
let result;
log.state('Processing embedded warmup image: face');
myConfig.warmup = 'face';
result = await human.warmup(myConfig);
log.data('Face: ', result.face);
log.state('Processing embedded warmup image: full');
myConfig.warmup = 'full';
result = await human.warmup(myConfig);
log.data('Body:', result.body);
log.data('Hand:', result.hand);
log.data('Gesture:', result.gesture);
}
async function main() {
log.header();
log.info('Current folder:', process.env.PWD);
await init();
if (process.argv.length !== 3) {
log.warn('Parameters: <input image> missing');
await test();
} else if (!fs.existsSync(process.argv[2])) {
log.error(`File not found: ${process.argv[2]}`);
} else {
await detect(process.argv[2]);
}
}
main();

121
demo/nodejs/README.md Normal file
View File

@ -0,0 +1,121 @@
# Human Demos for NodeJS
- `node`: Process images from files, folders or URLs
uses native methods for image loading and decoding without external dependencies
- `node-canvas`: Process image from file or URL and draw results to a new image file using `node-canvas`
uses `node-canvas` library to load and decode images from files, draw detection results and write output to a new image file
- `node-video`: Processing of video input using `ffmpeg`
uses `ffmpeg` to decode video input (can be a file, stream or device such as webcam) and
output results in a pipe that are captured by demo app as frames and processed by `Human` library
- `node-webcam`: Processing of webcam screenshots using `fswebcam`
uses `fswebcam` to connect to web cam and take screenshots at regular interval which are then processed by `Human` library
- `node-event`: Showcases usage of `Human` eventing to get notifications on processing
- `node-similarity`: Compares two input images for similarity of detected faces
- `process-folder`: Processing all images in input folder and creates output images
interally used to generate samples gallery
<br>
## Main Demo
`nodejs/node.js`: Demo using NodeJS with CommonJS module
Simple demo that can process any input image
Note that you can run demo as-is and it will perform detection on provided sample images,
or you can pass a path to image to analyze, either on local filesystem or using URL
```shell
node demo/nodejs/node.js
```
<!-- eslint-skip -->
```js
2021-06-01 08:52:15 INFO: @vladmandic/human version 2.0.0
2021-06-01 08:52:15 INFO: User: vlado Platform: linux Arch: x64 Node: v16.0.0
2021-06-01 08:52:15 INFO: Current folder: /home/vlado/dev/human
2021-06-01 08:52:15 INFO: Human: 2.0.0
2021-06-01 08:52:15 INFO: Active Configuration {
backend: 'tensorflow',
modelBasePath: 'file://models/',
wasmPath: '../node_modules/@tensorflow/tfjs-backend-wasm/dist/',
debug: true,
async: false,
warmup: 'full',
cacheSensitivity: 0.75,
filter: {
enabled: true,
width: 0,
height: 0,
flip: true,
return: true,
brightness: 0,
contrast: 0,
sharpness: 0,
blur: 0,
saturation: 0,
hue: 0,
negative: false,
sepia: false,
vintage: false,
kodachrome: false,
technicolor: false,
polaroid: false,
pixelate: 0
},
gesture: { enabled: true },
face: {
enabled: true,
detector: { modelPath: 'blazeface.json', rotation: false, maxDetected: 10, skipFrames: 15, minConfidence: 0.2, iouThreshold: 0.1, return: false, enabled: true },
mesh: { enabled: true, modelPath: 'facemesh.json' },
iris: { enabled: true, modelPath: 'iris.json' },
description: { enabled: true, modelPath: 'faceres.json', skipFrames: 16, minConfidence: 0.1 },
emotion: { enabled: true, minConfidence: 0.1, skipFrames: 17, modelPath: 'emotion.json' }
},
body: { enabled: true, modelPath: 'movenet-lightning.json', maxDetected: 1, minConfidence: 0.2 },
hand: {
enabled: true,
rotation: true,
skipFrames: 18,
minConfidence: 0.1,
iouThreshold: 0.1,
maxDetected: 2,
landmarks: true,
detector: { modelPath: 'handdetect.json' },
skeleton: { modelPath: 'handskeleton.json' }
},
object: { enabled: true, modelPath: 'centernet.json', minConfidence: 0.2, iouThreshold: 0.4, maxDetected: 10, skipFrames: 19 }
}
08:52:15.673 Human: version: 2.0.0
08:52:15.674 Human: tfjs version: 3.6.0
08:52:15.674 Human: platform: linux x64
08:52:15.674 Human: agent: NodeJS v16.0.0
08:52:15.674 Human: setting backend: tensorflow
08:52:15.710 Human: load model: file://models/blazeface.json
08:52:15.743 Human: load model: file://models/facemesh.json
08:52:15.744 Human: load model: file://models/iris.json
08:52:15.760 Human: load model: file://models/emotion.json
08:52:15.847 Human: load model: file://models/handdetect.json
08:52:15.847 Human: load model: file://models/handskeleton.json
08:52:15.914 Human: load model: file://models/movenet-lightning.json
08:52:15.957 Human: load model: file://models/centernet.json
08:52:16.015 Human: load model: file://models/faceres.json
08:52:16.015 Human: tf engine state: 50796152 bytes 1318 tensors
2021-06-01 08:52:16 INFO: Loaded: [ 'face', 'movenet', 'handpose', 'emotion', 'centernet', 'faceres', [length]: 6 ]
2021-06-01 08:52:16 INFO: Memory state: { unreliable: true, numTensors: 1318, numDataBuffers: 1318, numBytes: 50796152 }
2021-06-01 08:52:16 INFO: Loading image: private/daz3d/daz3d-kiaria-02.jpg
2021-06-01 08:52:16 STATE: Processing: [ 1, 1300, 1000, 3, [length]: 4 ]
2021-06-01 08:52:17 DATA: Results:
2021-06-01 08:52:17 DATA: Face: #0 boxScore:0.88 faceScore:1 age:16.3 genderScore:0.97 gender:female emotionScore:0.85 emotion:happy iris:61.05
2021-06-01 08:52:17 DATA: Body: #0 score:0.82 keypoints:17
2021-06-01 08:52:17 DATA: Hand: #0 score:0.89
2021-06-01 08:52:17 DATA: Hand: #1 score:0.97
2021-06-01 08:52:17 DATA: Gesture: face#0 gesture:facing left
2021-06-01 08:52:17 DATA: Gesture: body#0 gesture:leaning right
2021-06-01 08:52:17 DATA: Gesture: hand#0 gesture:pinky forward middlefinger up
2021-06-01 08:52:17 DATA: Gesture: hand#1 gesture:pinky forward middlefinger up
2021-06-01 08:52:17 DATA: Gesture: iris#0 gesture:looking left
2021-06-01 08:52:17 DATA: Object: #0 score:0.55 label:person
2021-06-01 08:52:17 DATA: Object: #1 score:0.23 label:bottle
2021-06-01 08:52:17 DATA: Persons:
2021-06-01 08:52:17 DATA: #0: Face:score:1 age:16.3 gender:female iris:61.05 Body:score:0.82 keypoints:17 LeftHand:no RightHand:yes Gestures:4
```

66
demo/nodejs/node-bench.js Normal file
View File

@ -0,0 +1,66 @@
/**
* Human simple demo for NodeJS
*/
const childProcess = require('child_process'); // eslint-disable-line camelcase
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
const canvas = require('canvas'); // eslint-disable-line node/no-unpublished-require
const config = {
cacheSensitivity: 0.01,
wasmPlatformFetch: true,
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
};
const count = 10;
async function loadImage(input) {
const inputImage = await canvas.loadImage(input);
const inputCanvas = new canvas.Canvas(inputImage.width, inputImage.height);
const inputCtx = inputCanvas.getContext('2d');
inputCtx.drawImage(inputImage, 0, 0);
const imageData = inputCtx.getImageData(0, 0, inputCanvas.width, inputCanvas.height);
process.send({ input, resolution: [inputImage.width, inputImage.height] });
return imageData;
}
async function runHuman(module, backend) {
if (backend === 'wasm') require('@tensorflow/tfjs-backend-wasm'); // eslint-disable-line node/no-unpublished-require, global-require
const Human = require('../../dist/' + module); // eslint-disable-line global-require, import/no-dynamic-require
config.backend = backend;
const human = new Human.Human(config);
human.env.Canvas = canvas.Canvas;
human.env.Image = canvas.Image;
human.env.ImageData = canvas.ImageData;
process.send({ human: human.version, module });
await human.init();
process.send({ desired: human.config.backend, wasm: human.env.wasm, tfjs: human.tf.version.tfjs, tensorflow: human.env.tensorflow });
const imageData = await loadImage('samples/in/ai-body.jpg');
const t0 = human.now();
await human.load();
const t1 = human.now();
await human.warmup();
const t2 = human.now();
for (let i = 0; i < count; i++) await human.detect(imageData);
const t3 = human.now();
process.send({ backend: human.tf.getBackend(), load: Math.round(t1 - t0), warmup: Math.round(t2 - t1), detect: Math.round(t3 - t2), count, memory: human.tf.memory().numBytes });
}
async function executeWorker(args) {
return new Promise((resolve) => {
const worker = childProcess.fork(process.argv[1], args);
worker.on('message', (msg) => log.data(msg));
worker.on('exit', () => resolve(true));
});
}
async function main() {
if (process.argv[2]) {
await runHuman(process.argv[2], process.argv[3]);
} else {
await executeWorker(['human.node.js', 'tensorflow']);
await executeWorker(['human.node-gpu.js', 'tensorflow']);
await executeWorker(['human.node-wasm.js', 'wasm']);
}
}
main();

View File

@ -0,0 +1,82 @@
/**
* Human demo for NodeJS using Canvas library
*
* Requires [canvas](https://www.npmjs.com/package/canvas) to provide Canvas functionality in NodeJS environment
*/
const fs = require('fs');
const process = require('process');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// in nodejs environments tfjs-node is required to be loaded before human
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
const canvas = require('canvas'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
const config = { // just enable all and leave default settings
debug: false,
face: { enabled: true, detector: { maxDetected: 10 } }, // includes mesh, iris, emotion, descriptor
hand: { enabled: true, maxDetected: 20, minConfidence: 0.5, detector: { modelPath: 'handtrack.json' } }, // use alternative hand model
body: { enabled: true },
object: { enabled: true },
gestures: { enabled: true },
};
async function main() {
log.header();
globalThis.Canvas = canvas.Canvas; // patch global namespace with canvas library
globalThis.ImageData = canvas.ImageData; // patch global namespace with canvas library
// human.env.Canvas = canvas.Canvas; // alternatively monkey-patch human to use external canvas library
// human.env.ImageData = canvas.ImageData; // alternatively monkey-patch human to use external canvas library
// init
const human = new Human.Human(config); // create instance of human
log.info('Human:', human.version, 'TF:', tf.version_core);
await human.load(); // pre-load models
log.info('Loaded models:', human.models.loaded());
log.info('Memory state:', human.tf.engine().memory());
// parse cmdline
const input = process.argv[2];
let output = process.argv[3];
if (!output.toLowerCase().endsWith('.jpg')) output += '.jpg';
if (process.argv.length !== 4) log.error('Parameters: <input-image> <output-image> missing');
else if (!fs.existsSync(input) && !input.startsWith('http')) log.error(`File not found: ${process.argv[2]}`);
else {
// everything seems ok
const inputImage = await canvas.loadImage(input); // load image using canvas library
log.info('Loaded image', input, inputImage.width, inputImage.height);
const inputCanvas = new canvas.Canvas(inputImage.width, inputImage.height); // create canvas
const inputCtx = inputCanvas.getContext('2d');
inputCtx.drawImage(inputImage, 0, 0); // draw input image onto canvas
const imageData = inputCtx.getImageData(0, 0, inputCanvas.width, inputCanvas.height);
// run detection
const result = await human.detect(imageData);
// print results summary
const persons = result.persons; // invoke persons getter, only used to print summary on console
for (let i = 0; i < persons.length; i++) {
const face = persons[i].face;
const faceTxt = face ? `score:${face.score} age:${face.age} gender:${face.gender} iris:${face.iris}` : null;
const body = persons[i].body;
const bodyTxt = body ? `score:${body.score} keypoints:${body.keypoints.length}` : null;
log.data(`Detected: #${i}: Face:${faceTxt} Body:${bodyTxt} LeftHand:${persons[i].hands.left ? 'yes' : 'no'} RightHand:${persons[i].hands.right ? 'yes' : 'no'} Gestures:${persons[i].gestures.length}`);
}
// draw detected results onto canvas and save it to a file
const outputCanvas = new canvas.Canvas(inputImage.width, inputImage.height); // create canvas
const outputCtx = outputCanvas.getContext('2d');
outputCtx.drawImage(result.canvas || inputImage, 0, 0); // draw input image onto canvas
human.draw.all(outputCanvas, result); // use human build-in method to draw results as overlays on canvas
const outFile = fs.createWriteStream(output); // write canvas to new image file
outFile.on('finish', () => log.state('Output image:', output, outputCanvas.width, outputCanvas.height));
outFile.on('error', (err) => log.error('Output error:', output, err));
const stream = outputCanvas.createJPEGStream({ quality: 0.5, progressive: true, chromaSubsampling: true });
stream.pipe(outFile);
}
}
main();

95
demo/nodejs/node-event.js Normal file
View File

@ -0,0 +1,95 @@
/**
* Human demo for NodeJS
*/
const fs = require('fs');
const process = require('process');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// in nodejs environments tfjs-node is required to be loaded before human
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
let human = null;
const myConfig = {
modelBasePath: 'file://models/',
debug: false,
async: true,
filter: { enabled: false },
face: {
enabled: true,
detector: { enabled: true },
mesh: { enabled: true },
iris: { enabled: true },
description: { enabled: true },
emotion: { enabled: true },
},
hand: { enabled: true },
body: { enabled: true },
object: { enabled: true },
};
async function detect(input) {
// read input image from file or url into buffer
let buffer;
log.info('Loading image:', input);
if (input.startsWith('http:') || input.startsWith('https:')) {
const res = await fetch(input);
if (res && res.ok) buffer = Buffer.from(await res.arrayBuffer());
else log.error('Invalid image URL:', input, res.status, res.statusText, res.headers.get('content-type'));
} else {
buffer = fs.readFileSync(input);
}
log.data('Image bytes:', buffer?.length, 'buffer:', buffer?.slice(0, 32));
// decode image using tfjs-node so we don't need external depenencies
if (!buffer) return;
const tensor = human.tf.node.decodeImage(buffer, 3);
// run detection
await human.detect(tensor, myConfig);
human.tf.dispose(tensor); // dispose image tensor as we no longer need it
}
async function main() {
log.header();
human = new Human.Human(myConfig);
log.info('Human:', human.version, 'TF:', tf.version_core);
if (human.events) {
human.events.addEventListener('warmup', () => {
log.info('Event Warmup');
});
human.events.addEventListener('load', () => {
log.info('Event Loaded:', human.models.loaded(), human.tf.engine().memory());
});
human.events.addEventListener('image', () => {
log.info('Event Image:', human.process.tensor.shape);
});
human.events.addEventListener('detect', () => {
log.data('Event Detected:');
const persons = human.result.persons;
for (let i = 0; i < persons.length; i++) {
const face = persons[i].face;
const faceTxt = face ? `score:${face.score} age:${face.age} gender:${face.gender} iris:${face.distance}` : null;
const body = persons[i].body;
const bodyTxt = body ? `score:${body.score} keypoints:${body.keypoints?.length}` : null;
log.data(` #${i}: Face:${faceTxt} Body:${bodyTxt} LeftHand:${persons[i].hands.left ? 'yes' : 'no'} RightHand:${persons[i].hands.right ? 'yes' : 'no'} Gestures:${persons[i].gestures.length}`);
}
});
}
await human.tf.ready(); // wait until tf is ready
const input = process.argv[2]; // process input
if (input) await detect(input);
else log.error('Missing <input>');
}
main();

30
demo/nodejs/node-fetch.js Normal file
View File

@ -0,0 +1,30 @@
/**
* Human demo for NodeJS using http fetch to get image file
*
* Requires [node-fetch](https://www.npmjs.com/package/node-fetch) to provide `fetch` functionality in NodeJS environment
*/
const fs = require('fs');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// in nodejs environments tfjs-node is required to be loaded before human
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
const humanConfig = {
modelBasePath: 'https://vladmandic.github.io/human/models/',
};
async function main(inputFile) {
global.fetch = (await import('node-fetch')).default; // eslint-disable-line node/no-unpublished-import, import/no-unresolved, node/no-missing-import, node/no-extraneous-import
const human = new Human.Human(humanConfig); // create instance of human using default configuration
log.info('Human:', human.version, 'TF:', tf.version_core);
await human.load(); // optional as models would be loaded on-demand first time they are required
await human.warmup(); // optional as model warmup is performed on-demand first time its executed
const buffer = fs.readFileSync(inputFile); // read file data into buffer
const tensor = human.tf.node.decodeImage(buffer); // decode jpg data
const result = await human.detect(tensor); // run detection; will initialize backend and on-demand load models
log.data(result.gesture);
}
main('samples/in/ai-body.jpg');

View File

@ -0,0 +1,64 @@
/**
* Human Person Similarity test for NodeJS
*/
const fs = require('fs');
const process = require('process');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// in nodejs environments tfjs-node is required to be loaded before human
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
let human = null;
const myConfig = {
modelBasePath: 'file://models/',
debug: true,
face: { emotion: { enabled: false } },
body: { enabled: false },
hand: { enabled: false },
gesture: { enabled: false },
};
async function init() {
human = new Human.Human(myConfig);
await human.tf.ready();
log.info('Human:', human.version, 'TF:', tf.version_core);
await human.load();
log.info('Loaded:', human.models.loaded());
log.info('Memory state:', human.tf.engine().memory());
}
async function detect(input) {
if (!fs.existsSync(input)) {
throw new Error('Cannot load image:', input);
}
const buffer = fs.readFileSync(input);
const tensor = human.tf.node.decodeImage(buffer, 3);
log.state('Loaded image:', input, tensor.shape);
const result = await human.detect(tensor, myConfig);
human.tf.dispose(tensor);
log.state('Detected faces:', result.face.length);
return result;
}
async function main() {
log.configure({ inspect: { breakLength: 265 } });
log.header();
if (process.argv.length !== 4) {
log.error('Parameters: <first image> <second image> missing');
return;
}
await init();
const res1 = await detect(process.argv[2]);
const res2 = await detect(process.argv[3]);
if (!res1 || !res1.face || res1.face.length === 0 || !res2 || !res2.face || res2.face.length === 0) {
throw new Error('Could not detect face descriptors');
}
const similarity = human.match.similarity(res1.face[0].embedding, res2.face[0].embedding, { order: 2 });
log.data('Similarity: ', similarity);
}
main();

View File

@ -0,0 +1,32 @@
/**
* Human simple demo for NodeJS
*/
const fs = require('fs');
const process = require('process');
// in nodejs environments tfjs-node is required to be loaded before human
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
const humanConfig = {
// add any custom config here
debug: true,
body: { enabled: false },
};
async function detect(inputFile) {
const human = new Human.Human(humanConfig); // create instance of human using default configuration
console.log('Human:', human.version, 'TF:', tf.version_core); // eslint-disable-line no-console
await human.load(); // optional as models would be loaded on-demand first time they are required
await human.warmup(); // optional as model warmup is performed on-demand first time its executed
const buffer = fs.readFileSync(inputFile); // read file data into buffer
const tensor = human.tf.node.decodeImage(buffer); // decode jpg data
console.log('loaded input file:', inputFile, 'resolution:', tensor.shape); // eslint-disable-line no-console
const result = await human.detect(tensor); // run detection; will initialize backend and on-demand load models
console.log(result); // eslint-disable-line no-console
}
if (process.argv.length === 3) detect(process.argv[2]); // if input file is provided as cmdline parameter use it
else detect('samples/in/ai-body.jpg'); // else use built-in test inputfile

91
demo/nodejs/node-video.js Normal file
View File

@ -0,0 +1,91 @@
/**
* Human demo for NodeJS
* Unsupported sample of using external utility ffmpeg to capture to decode video input and process it using Human
*
* Uses ffmpeg to process video input and output stream of motion jpeg images which are then parsed for frame start/end markers by pipe2jpeg
* Each frame triggers an event with jpeg buffer that then can be decoded and passed to human for processing
* If you want process at specific intervals, set output fps to some value
* If you want to process an input stream, set real-time flag and set input as required
*
* Note that [pipe2jpeg](https://www.npmjs.com/package/pipe2jpeg) is not part of Human dependencies and should be installed manually
* Working version of `ffmpeg` must be present on the system
*/
const process = require('process');
const spawn = require('child_process').spawn;
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// in nodejs environments tfjs-node is required to be loaded before human
// const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Pipe2Jpeg = require('pipe2jpeg'); // eslint-disable-line node/no-missing-require, import/no-unresolved
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
let count = 0; // counter
let busy = false; // busy flag
let inputFile = './test.mp4';
if (process.argv.length === 3) inputFile = process.argv[2];
const humanConfig = {
modelBasePath: 'file://models/',
debug: false,
async: true,
filter: { enabled: false },
face: {
enabled: true,
detector: { enabled: true, rotation: false },
mesh: { enabled: true },
iris: { enabled: true },
description: { enabled: true },
emotion: { enabled: true },
},
hand: { enabled: false },
body: { enabled: false },
object: { enabled: false },
};
const human = new Human.Human(humanConfig);
const pipe2jpeg = new Pipe2Jpeg();
const ffmpegParams = [
'-loglevel', 'quiet',
// input
// '-re', // optional process video in real-time not as fast as possible
'-i', `${inputFile}`, // input file
// output
'-an', // drop audio
'-c:v', 'mjpeg', // use motion jpeg as output encoder
'-pix_fmt', 'yuvj422p', // typical for mp4, may need different settings for some videos
'-f', 'image2pipe', // pipe images as output
// '-vf', 'fps=5,scale=800:600', // optional video filter, do anything here such as process at fixed 5fps or resize to specific resulution
'pipe:1', // output to unix pipe that is then captured by pipe2jpeg
];
async function detect(jpegBuffer) {
if (busy) return; // skip processing if busy
busy = true;
const tensor = human.tf.node.decodeJpeg(jpegBuffer, 3); // decode jpeg buffer to raw tensor
const res = await human.detect(tensor);
human.tf.dispose(tensor); // must dispose tensor
// start custom processing here
log.data('frame', { frame: ++count, size: jpegBuffer.length, shape: tensor.shape, face: res?.face?.length, body: res?.body?.length, hand: res?.hand?.length, gesture: res?.gesture?.length });
if (res?.face?.[0]) log.data('person', { score: [res.face[0].boxScore, res.face[0].faceScore], age: res.face[0].age || 0, gender: [res.face[0].genderScore || 0, res.face[0].gender], emotion: res.face[0].emotion?.[0] });
// at the of processing mark loop as not busy so it can process next frame
busy = false;
}
async function main() {
log.header();
await human.tf.ready();
// pre-load models
log.info({ human: human.version, tf: human.tf.version_core });
log.info({ input: inputFile });
pipe2jpeg.on('data', (jpegBuffer) => detect(jpegBuffer));
const ffmpeg = spawn('ffmpeg', ffmpegParams, { stdio: ['ignore', 'pipe', 'ignore'] });
ffmpeg.on('error', (error) => log.error('ffmpeg error:', error));
ffmpeg.on('exit', (code, signal) => log.info('ffmpeg exit', code, signal));
ffmpeg.stdout.pipe(pipe2jpeg);
}
main();

View File

@ -0,0 +1,94 @@
/**
* Human demo for NodeJS
* Unsupported sample of using external utility fswebcam to capture screenshot from attached webcam in regular intervals and process it using Human
*
* Note that [node-webcam](https://www.npmjs.com/package/node-webcam) is not part of Human dependencies and should be installed manually
* Working version of `fswebcam` must be present on the system
*/
let initial = true; // remember if this is the first run to print additional details
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
const nodeWebCam = require('node-webcam'); // eslint-disable-line import/no-unresolved, node/no-missing-require
// in nodejs environments tfjs-node is required to be loaded before human
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
// options for node-webcam
const tempFile = 'webcam-snap'; // node-webcam requires writting snapshot to a file, recommended to use tmpfs to avoid excessive disk writes
const optionsCamera = {
callbackReturn: 'buffer', // this means whatever `fswebcam` writes to disk, no additional processing so it's fastest
saveShots: false, // don't save processed frame to disk, note that temp file is still created by fswebcam thus recommendation for tmpfs
};
const camera = nodeWebCam.create(optionsCamera);
// options for human
const optionsHuman = {
modelBasePath: 'file://models/',
};
const human = new Human.Human(optionsHuman);
function buffer2tensor(buffer) {
return human.tf.tidy(() => {
if (!buffer) return null;
const decode = human.tf.node.decodeImage(buffer, 3);
let expand;
if (decode.shape[2] === 4) { // input is in rgba format, need to convert to rgb
const channels = human.tf.split(decode, 4, 2); // tf.split(tensor, 4, 2); // split rgba to channels
const rgb = human.tf.stack([channels[0], channels[1], channels[2]], 2); // stack channels back to rgb and ignore alpha
expand = human.tf.reshape(rgb, [1, decode.shape[0], decode.shape[1], 3]); // move extra dim from the end of tensor and use it as batch number instead
} else {
expand = human.tf.expandDims(decode, 0); // inpur ia rgb so use as-is
}
const cast = human.tf.cast(expand, 'float32');
return cast;
});
}
async function detect() {
// trigger next frame every 5 sec
// triggered here before actual capture and detection since we assume it will complete in less than 5sec
// so it's as close as possible to real 5sec and not 5sec + detection time
// if there is a chance of race scenario where detection takes longer than loop trigger, then trigger should be at the end of the function instead
setTimeout(() => detect(), 5000);
camera.capture(tempFile, (err, data) => { // gets the (default) jpeg data from from webcam
if (err) {
log.error('error capturing webcam:', err);
} else {
const tensor = buffer2tensor(data); // create tensor from image buffer
if (initial) log.data('input tensor:', tensor.shape);
human.detect(tensor) // eslint-disable-line promise/no-promise-in-callback
.then((result) => {
if (result && result.face && result.face.length > 0) {
for (let i = 0; i < result.face.length; i++) {
const face = result.face[i];
const emotion = face.emotion?.reduce((prev, curr) => (prev.score > curr.score ? prev : curr));
log.data(`detected face: #${i} boxScore:${face.boxScore} faceScore:${face.faceScore} age:${face.age} genderScore:${face.genderScore} gender:${face.gender} emotionScore:${emotion?.score} emotion:${emotion?.emotion} iris:${face.iris}`);
}
} else {
log.data(' Face: N/A');
}
return result;
})
.catch(() => log.error('human detect error'));
}
initial = false;
});
// alternatively to triggering every 5sec sec, simply trigger next frame as fast as possible
// setImmediate(() => process());
}
async function main() {
log.info('human:', human.version, 'tf:', tf.version_core);
camera.list((list) => {
log.data('detected camera:', list);
});
await human.load();
detect();
}
log.header();
main();

213
demo/nodejs/node.js Normal file
View File

@ -0,0 +1,213 @@
/**
* Human demo for NodeJS
*/
const fs = require('fs');
const path = require('path');
const process = require('process');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// in nodejs environments tfjs-node is required to be loaded before human
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
let human = null;
const myConfig = {
// backend: 'tensorflow',
modelBasePath: 'file://models/',
debug: true,
async: false,
filter: {
enabled: true,
flip: true,
},
face: {
enabled: true,
detector: { enabled: true, rotation: false },
mesh: { enabled: true },
iris: { enabled: true },
description: { enabled: true },
emotion: { enabled: true },
},
hand: {
enabled: true,
},
// body: { modelPath: 'blazepose.json', enabled: true },
body: { enabled: true },
object: { enabled: true },
};
async function init() {
// create instance of human
human = new Human.Human(myConfig);
// wait until tf is ready
await human.tf.ready();
log.info('human:', human.version, 'tf:', tf.version_core);
// pre-load models
log.info('Human:', human.version);
// log.info('Active Configuration', human.config);
await human.load();
log.info('Loaded:', human.models.loaded());
// log.info('Memory state:', human.tf.engine().memory());
log.data(tf.backend().binding ? tf.backend().binding.TF_Version : null);
}
async function detect(input) {
// read input image file and create tensor to be used for processing
let buffer;
log.info('Loading image:', input);
if (input.startsWith('http:') || input.startsWith('https:')) {
const res = await fetch(input);
if (res && res.ok) buffer = Buffer.from(await res.arrayBuffer());
else log.error('Invalid image URL:', input, res.status, res.statusText, res.headers.get('content-type'));
} else {
buffer = fs.readFileSync(input);
}
log.data('Image bytes:', buffer?.length, 'buffer:', buffer?.slice(0, 32));
// decode image using tfjs-node so we don't need external depenencies
// can also be done using canvas.js or some other 3rd party image library
if (!buffer) return {};
const tensor = human.tf.tidy(() => {
const decode = human.tf.node.decodeImage(buffer, 3);
let expand;
if (decode.shape[2] === 4) { // input is in rgba format, need to convert to rgb
const channels = human.tf.split(decode, 4, 2); // tf.split(tensor, 4, 2); // split rgba to channels
const rgb = human.tf.stack([channels[0], channels[1], channels[2]], 2); // stack channels back to rgb and ignore alpha
expand = human.tf.reshape(rgb, [1, decode.shape[0], decode.shape[1], 3]); // move extra dim from the end of tensor and use it as batch number instead
} else {
expand = human.tf.expandDims(decode, 0);
}
const cast = human.tf.cast(expand, 'float32');
return cast;
});
// image shape contains image dimensions and depth
log.state('Processing:', tensor.shape);
// run actual detection
let result;
try {
result = await human.detect(tensor, myConfig);
} catch (err) {
log.error('caught', err);
}
// dispose image tensor as we no longer need it
human.tf.dispose(tensor);
// print data to console
log.data('Results:');
if (result && result.face && result.face.length > 0) {
for (let i = 0; i < result.face.length; i++) {
const face = result.face[i];
const emotion = face.emotion.reduce((prev, curr) => (prev.score > curr.score ? prev : curr));
log.data(` Face: #${i} boxScore:${face.boxScore} faceScore:${face.faceScore} age:${face.age} genderScore:${face.genderScore} gender:${face.gender} emotionScore:${emotion.score} emotion:${emotion.emotion} distance:${face.distance}`);
}
} else {
log.data(' Face: N/A');
}
if (result && result.body && result.body.length > 0) {
for (let i = 0; i < result.body.length; i++) {
const body = result.body[i];
log.data(` Body: #${i} score:${body.score} keypoints:${body.keypoints?.length}`);
}
} else {
log.data(' Body: N/A');
}
if (result && result.hand && result.hand.length > 0) {
for (let i = 0; i < result.hand.length; i++) {
const hand = result.hand[i];
log.data(` Hand: #${i} score:${hand.score} keypoints:${hand.keypoints?.length}`);
}
} else {
log.data(' Hand: N/A');
}
if (result && result.gesture && result.gesture.length > 0) {
for (let i = 0; i < result.gesture.length; i++) {
const [key, val] = Object.entries(result.gesture[i]);
log.data(` Gesture: ${key[0]}#${key[1]} gesture:${val[1]}`);
}
} else {
log.data(' Gesture: N/A');
}
if (result && result.object && result.object.length > 0) {
for (let i = 0; i < result.object.length; i++) {
const object = result.object[i];
log.data(` Object: #${i} score:${object.score} label:${object.label}`);
}
} else {
log.data(' Object: N/A');
}
// print data to console
if (result) {
// invoke persons getter
const persons = result.persons;
// write result objects to file
// fs.writeFileSync('result.json', JSON.stringify(result, null, 2));
log.data('Persons:');
for (let i = 0; i < persons.length; i++) {
const face = persons[i].face;
const faceTxt = face ? `score:${face.score} age:${face.age} gender:${face.gender} iris:${face.iris}` : null;
const body = persons[i].body;
const bodyTxt = body ? `score:${body.score} keypoints:${body.keypoints?.length}` : null;
log.data(` #${i}: Face:${faceTxt} Body:${bodyTxt} LeftHand:${persons[i].hands.left ? 'yes' : 'no'} RightHand:${persons[i].hands.right ? 'yes' : 'no'} Gestures:${persons[i].gestures.length}`);
}
}
return result;
}
async function test() {
process.on('unhandledRejection', (err) => {
// @ts-ignore // no idea if exception message is compelte
log.error(err?.message || err || 'no error message');
});
// test with embedded full body image
let result;
log.state('Processing embedded warmup image: face');
myConfig.warmup = 'face';
result = await human.warmup(myConfig);
log.state('Processing embedded warmup image: full');
myConfig.warmup = 'full';
result = await human.warmup(myConfig);
// no need to print results as they are printed to console during detection from within the library due to human.config.debug set
return result;
}
async function main() {
log.configure({ inspect: { breakLength: 265 } });
log.header();
log.info('Current folder:', process.env.PWD);
await init();
const f = process.argv[2];
if (process.argv.length !== 3) {
log.warn('Parameters: <input image | folder> missing');
await test();
} else if (!fs.existsSync(f) && !f.startsWith('http')) {
log.error(`File not found: ${process.argv[2]}`);
} else if (fs.existsSync(f)) {
const stat = fs.statSync(f);
if (stat.isDirectory()) {
const dir = fs.readdirSync(f);
for (const file of dir) {
await detect(path.join(f, file));
}
} else {
await detect(f);
}
} else {
await detect(f);
}
}
main();

View File

@ -0,0 +1,119 @@
/**
* Human demo for NodeJS
*
* Takes input and output folder names parameters and processes all images
* found in input folder and creates annotated images in output folder
*
* Requires [canvas](https://www.npmjs.com/package/canvas) to provide Canvas functionality in NodeJS environment
*/
const fs = require('fs');
const path = require('path');
const process = require('process');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
const canvas = require('canvas'); // eslint-disable-line node/no-unpublished-require
// for nodejs, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human
const tf = require('@tensorflow/tfjs-node-gpu'); // eslint-disable-line node/no-unpublished-require
const Human = require('../../dist/human.node-gpu.js'); // this is 'const Human = require('../dist/human.node-gpu.js').default;'
const config = { // just enable all and leave default settings
modelBasePath: 'file://models',
debug: true,
softwareKernels: true, // slower but enhanced precision since face rotation can work in software mode in nodejs environments
cacheSensitivity: 0.01,
face: { enabled: true, detector: { maxDetected: 100, minConfidence: 0.1 } },
object: { enabled: true, maxDetected: 100, minConfidence: 0.1 },
gesture: { enabled: true },
hand: { enabled: true, maxDetected: 100, minConfidence: 0.2 },
body: { enabled: true, maxDetected: 100, minConfidence: 0.1, modelPath: 'https://vladmandic.github.io/human-models/models/movenet-multipose.json' },
};
const poolSize = 4;
const human = new Human.Human(config); // create instance of human
async function saveFile(shape, buffer, result, outFile) {
return new Promise(async (resolve, reject) => { // eslint-disable-line no-async-promise-executor
const outputCanvas = new canvas.Canvas(shape[2], shape[1]); // create canvas
const outputCtx = outputCanvas.getContext('2d');
const inputImage = await canvas.loadImage(buffer); // load image using canvas library
outputCtx.drawImage(inputImage, 0, 0); // draw input image onto canvas
human.draw.all(outputCanvas, result); // use human build-in method to draw results as overlays on canvas
const outStream = fs.createWriteStream(outFile); // write canvas to new image file
outStream.on('finish', () => {
log.data('Output image:', outFile, outputCanvas.width, outputCanvas.height);
resolve();
});
outStream.on('error', (err) => {
log.error('Output error:', outFile, err);
reject();
});
const stream = outputCanvas.createJPEGStream({ quality: 0.5, progressive: true, chromaSubsampling: true });
stream.pipe(outStream);
});
}
async function processFile(image, inFile, outFile) {
const buffer = fs.readFileSync(inFile);
const tensor = tf.tidy(() => {
const decode = tf.node.decodeImage(buffer, 3);
const expand = tf.expandDims(decode, 0);
const cast = tf.cast(expand, 'float32');
return cast;
});
log.state('Loaded image:', inFile, tensor.shape);
const result = await human.detect(tensor);
human.tf.dispose(tensor);
log.data(`Detected: ${image}:`, 'Face:', result.face.length, 'Body:', result.body.length, 'Hand:', result.hand.length, 'Objects:', result.object.length, 'Gestures:', result.gesture.length);
if (outFile) await saveFile(tensor.shape, buffer, result, outFile);
}
async function main() {
log.header();
globalThis.Canvas = canvas.Canvas; // patch global namespace with canvas library
globalThis.ImageData = canvas.ImageData; // patch global namespace with canvas library
log.info('Human:', human.version, 'TF:', tf.version_core);
const configErrors = await human.validate();
if (configErrors.length > 0) log.error('Configuration errors:', configErrors);
await human.load(); // pre-load models
log.info('Loaded models:', human.models.loaded());
const inDir = process.argv[2];
const outDir = process.argv[3];
if (!inDir) {
log.error('Parameters: <input-directory> missing');
return;
}
if (inDir && (!fs.existsSync(inDir) || !fs.statSync(inDir).isDirectory())) {
log.error('Invalid input directory:', fs.existsSync(inDir) ?? fs.statSync(inDir).isDirectory());
return;
}
if (!outDir) {
log.info('Parameters: <output-directory> missing, images will not be saved');
}
if (outDir && (!fs.existsSync(outDir) || !fs.statSync(outDir).isDirectory())) {
log.error('Invalid output directory:', fs.existsSync(outDir) ?? fs.statSync(outDir).isDirectory());
return;
}
const dir = fs.readdirSync(inDir);
const images = dir.filter((f) => fs.statSync(path.join(inDir, f)).isFile() && (f.toLocaleLowerCase().endsWith('.jpg') || f.toLocaleLowerCase().endsWith('.jpeg')));
log.info(`Processing folder: ${inDir} entries:`, dir.length, 'images', images.length);
const t0 = performance.now();
const promises = [];
for (let i = 0; i < images.length; i++) {
const inFile = path.join(inDir, images[i]);
const outFile = outDir ? path.join(outDir, images[i]) : null;
promises.push(processFile(images[i], inFile, outFile));
if (i % poolSize === 0) await Promise.all(promises);
}
await Promise.all(promises);
const t1 = performance.now();
log.info(`Processed ${images.length} images in ${Math.round(t1 - t0)} ms`);
}
main();

36
demo/offline.html Normal file
View File

@ -0,0 +1,36 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Human: Offline</title>
<meta name="viewport" content="width=device-width, shrink-to-fit=yes">
<meta name="mobile-web-app-capable" content="yes">
<meta name="application-name" content="Human">
<meta name="keywords" content="Human">
<meta name="description" content="Human; Author: Vladimir Mandic <mandic00@live.com>">
<meta name="msapplication-tooltip" content="Human; Author: Vladimir Mandic <mandic00@live.com>">
<meta name="theme-color" content="#000000">
<link rel="manifest" href="manifest.webmanifest">
<link rel="shortcut icon" href="/favicon.ico" type="image/x-icon">
<link rel="icon" sizes="256x256" href="../assets/icon.png">
<link rel="apple-touch-icon" href="../assets/icon.png">
<link rel="apple-touch-startup-image" href="../assets/icon.png">
<style>
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../assets/lato-light.woff2') }
body { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; background: black; color: #ebebeb; }
h1 { font-size: 2rem; margin-top: 1.2rem; font-weight: bold; }
a { color: white; }
a:link { color: lightblue; text-decoration: none; }
a:hover { color: lightskyblue; text-decoration: none; }
.row { width: 90vw; margin: auto; margin-top: 100px; text-align: center; }
</style>
</head>
<body>
<div class="row text-center">
<h1>
<a href="/">Human: Offline</a><br>
<img alt="icon" src="../assets/icon.png">
</h1>
</div>
</body>
</html>

View File

@ -0,0 +1,61 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Human Demo</title>
<meta name="viewport" content="width=device-width, shrink-to-fit=yes">
<meta name="mobile-web-app-capable" content="yes">
<meta name="application-name" content="Human Demo">
<meta name="keywords" content="Human Demo">
<meta name="description" content="Human Demo; Author: Vladimir Mandic <mandic00@live.com>">
<link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../favicon.ico" type="image/x-icon">
<link rel="icon" sizes="256x256" href="../assets/icons/dash-256.png">
<link rel="apple-touch-icon" href="../assets/icons/dash-256.png">
<link rel="apple-touch-startup-image" href="../assets/icons/dash-256.png">
<style>
@font-face { font-family: 'CenturyGothic'; font-display: swap; font-style: normal; font-weight: 400; src: local('CenturyGothic'), url('../assets/century-gothic.ttf') format('truetype'); }
html { font-size: 18px; }
body { font-size: 1rem; font-family: "CenturyGothic", "Segoe UI", sans-serif; font-variant: small-caps; width: -webkit-fill-available; height: 100%; background: black; color: white; overflow: hidden; margin: 0; }
select { font-size: 1rem; font-family: "CenturyGothic", "Segoe UI", sans-serif; font-variant: small-caps; background: gray; color: white; border: none; }
</style>
<script src="../segmentation/index.js" type="module"></script>
</head>
<body>
<noscript><h1>javascript is required</h1></noscript>
<nav>
<div id="nav" class="nav"></div>
</nav>
<header>
<div id="header" class="header" style="position: fixed; top: 0; right: 0; padding: 4px; margin: 16px; background: rgba(0, 0, 0, 0.5); z-index: 10; line-height: 2rem;">
<label for="mode">mode</label>
<select id="mode" name="mode">
<option value="default">remove background</option>
<option value="alpha">draw alpha channel</option>
<option value="foreground">full foreground</option>
<option value="state">recurrent state</option>
</select><br>
<label for="composite">composite</label>
<select id="composite" name="composite"></select><br>
<label for="ratio">downsample ratio</label>
<input type="range" name="ratio" id="ratio" min="0.1" max="1" value="0.5" step="0.05">
<div id="fps" style="margin-top: 8px"></div>
</div>
</header>
<main>
<div id="main" class="main">
<video id="webcam" style="position: fixed; top: 0; left: 0; width: 50vw; height: 50vh"></video>
<img id="background" alt="background" style="position: fixed; top: 0; right: 0; width: 50vw; height: 50vh" controls></img>
<canvas id="output" style="position: fixed; bottom: 0; left: 0; height: 50vh"></canvas>
<canvas id="merge" style="position: fixed; bottom: 0; right: 0; height: 50vh"></canvas>
</div>
</main>
<footer>
<div id="footer" class="footer"></div>
</footer>
<aside>
<div id="aside" class="aside"></div>
</aside>
</body>
</html>

View File

@ -0,0 +1,99 @@
/**
* Human demo for browsers
* @default Human Library
* @summary <https://github.com/vladmandic/human>
* @author <https://github.com/vladmandic>
* @copyright <https://github.com/vladmandic>
* @license MIT
*/
import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
const humanConfig = { // user configuration for human, used to fine-tune behavior
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
filter: { enabled: true, equalization: false, flip: false },
face: { enabled: false },
body: { enabled: false },
hand: { enabled: false },
object: { enabled: false },
gesture: { enabled: false },
segmentation: {
enabled: true,
modelPath: 'rvm.json', // can use rvm, selfie or meet
ratio: 0.5,
mode: 'default',
},
};
const backgroundImage = '../../samples/in/background.jpg';
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
const log = (...msg) => console.log(...msg); // eslint-disable-line no-console
async function main() {
// gather dom elements
const dom = {
background: document.getElementById('background'),
webcam: document.getElementById('webcam'),
output: document.getElementById('output'),
merge: document.getElementById('merge'),
mode: document.getElementById('mode'),
composite: document.getElementById('composite'),
ratio: document.getElementById('ratio'),
fps: document.getElementById('fps'),
};
// set defaults
dom.fps.innerText = 'initializing';
dom.ratio.valueAsNumber = human.config.segmentation.ratio;
dom.background.src = backgroundImage;
dom.composite.innerHTML = ['source-atop', 'color', 'color-burn', 'color-dodge', 'copy', 'darken', 'destination-atop', 'destination-in', 'destination-out', 'destination-over', 'difference', 'exclusion', 'hard-light', 'hue', 'lighten', 'lighter', 'luminosity', 'multiply', 'overlay', 'saturation', 'screen', 'soft-light', 'source-in', 'source-out', 'source-over', 'xor'].map((gco) => `<option value="${gco}">${gco}</option>`).join(''); // eslint-disable-line max-len
const ctxMerge = dom.merge.getContext('2d');
log('human version:', human.version, '| tfjs version:', human.tf.version['tfjs-core']);
log('platform:', human.env.platform, '| agent:', human.env.agent);
await human.load(); // preload all models
log('backend:', human.tf.getBackend(), '| available:', human.env.backends);
log('models stats:', human.models.stats());
log('models loaded:', human.models.loaded());
await human.warmup(); // warmup function to initialize backend for future faster detection
const numTensors = human.tf.engine().state.numTensors;
// initialize webcam
dom.webcam.onplay = () => { // start processing on video play
log('start processing');
dom.output.width = human.webcam.width;
dom.output.height = human.webcam.height;
dom.merge.width = human.webcam.width;
dom.merge.height = human.webcam.height;
loop(); // eslint-disable-line no-use-before-define
};
await human.webcam.start({ element: dom.webcam, crop: true, width: window.innerWidth / 2, height: window.innerHeight / 2 }); // use human webcam helper methods and associate webcam stream with a dom element
if (!human.webcam.track) dom.fps.innerText = 'webcam error';
// processing loop
async function loop() {
if (!human.webcam.element || human.webcam.paused) return; // check if webcam is valid and playing
human.config.segmentation.mode = dom.mode.value; // get segmentation mode from ui
human.config.segmentation.ratio = dom.ratio.valueAsNumber; // get segmentation downsample ratio from ui
const t0 = Date.now();
const rgba = await human.segmentation(human.webcam.element, human.config); // run model and process results
const t1 = Date.now();
if (!rgba) {
dom.fps.innerText = 'error';
return;
}
dom.fps.innerText = `fps: ${Math.round(10000 / (t1 - t0)) / 10}`; // mark performance
human.draw.tensor(rgba, dom.output); // draw raw output
human.tf.dispose(rgba); // dispose tensors
ctxMerge.globalCompositeOperation = 'source-over';
ctxMerge.drawImage(dom.background, 0, 0); // draw original video to first stacked canvas
ctxMerge.globalCompositeOperation = dom.composite.value;
ctxMerge.drawImage(dom.output, 0, 0); // draw processed output to second stacked canvas
if (numTensors !== human.tf.engine().state.numTensors) log({ leak: human.tf.engine().state.numTensors - numTensors }); // check for memory leaks
requestAnimationFrame(loop);
}
}
window.onload = main;

28
demo/tracker/README.md Normal file
View File

@ -0,0 +1,28 @@
## Tracker
### Based on
<https://github.com/opendatacam/node-moving-things-tracker>
### Build
- remove reference to `lodash`:
> `isEqual` in <tracker.js>
- replace external lib:
> curl https://raw.githubusercontent.com/ubilabs/kd-tree-javascript/master/kdTree.js -o lib/kdTree-min.js
- build with `esbuild`:
> node_modules/.bin/esbuild --bundle tracker.js --format=esm --platform=browser --target=esnext --keep-names --tree-shaking=false --analyze --outfile=/home/vlado/dev/human/demo/tracker/tracker.js --banner:js="/* eslint-disable */"
### Usage
computeDistance(item1, item2)
disableKeepInMemory()
enableKeepInMemory()
getAllTrackedItems()
getJSONDebugOfTrackedItems(roundInt = true)
getJSONOfAllTrackedItems()
getJSONOfTrackedItems(roundInt = true)
getTrackedItemsInMOTFormat(frameNb)
reset()
setParams(newParams)
updateTrackedItemsWithNewFrame(detectionsOfThisFrame, frameNb)

65
demo/tracker/index.html Normal file
View File

@ -0,0 +1,65 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Human</title>
<meta name="viewport" content="width=device-width" id="viewport">
<meta name="keywords" content="Human">
<meta name="application-name" content="Human">
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="theme-color" content="#000000">
<link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../../assets/icon.png">
<script src="./index.js" type="module"></script>
<style>
html { font-family: 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
body { margin: 0; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
body::-webkit-scrollbar { display: none; }
input[type="file"] { font-family: 'Segoe UI'; font-size: 14px; font-variant: small-caps; }
::-webkit-file-upload-button { background: #333333; color: white; border: 0; border-radius: 0; padding: 6px 16px; box-shadow: 4px 4px 4px #222222; font-family: 'Segoe UI'; font-size: 14px; font-variant: small-caps; }
</style>
</head>
<body>
<div style="display: flex">
<video id="video" playsinline style="width: 25vw" controls controlslist="nofullscreen nodownload noremoteplayback" disablepictureinpicture loop></video>
<canvas id="canvas" style="width: 75vw"></canvas>
</div>
<div class="uploader" style="padding: 8px">
<input type="file" name="inputvideo" id="inputvideo" accept="video/*"></input>
<input type="checkbox" id="interpolation" name="interpolation"></input>
<label for="tracker">interpolation</label>
</div>
<form id="config" style="padding: 8px; line-height: 1.6rem;">
tracker |
<input type="checkbox" id="tracker" name="tracker" checked></input>
<label for="tracker">enabled</label> |
<input type="checkbox" id="keepInMemory" name="keepInMemory"></input>
<label for="keepInMemory">keepInMemory</label> |
<br>
tracker source |
<input type="radio" id="box-face" name="box" value="face" checked>
<label for="box-face">face</label> |
<input type="radio" id="box-body" name="box" value="body">
<label for="box-face">body</label> |
<input type="radio" id="box-object" name="box" value="object">
<label for="box-face">object</label> |
<br>
tracker config |
<input type="range" id="unMatchedFramesTolerance" name="unMatchedFramesTolerance" min="0" max="300" step="1", value="60"></input>
<label for="unMatchedFramesTolerance">unMatchedFramesTolerance</label> |
<input type="range" id="iouLimit" name="unMatchedFramesTolerance" min="0" max="1" step="0.01", value="0.1"></input>
<label for="iouLimit">iouLimit</label> |
<input type="range" id="distanceLimit" name="unMatchedFramesTolerance" min="0" max="1" step="0.01", value="0.1"></input>
<label for="distanceLimit">distanceLimit</label> |
<input type="radio" id="matchingAlgorithm-kdTree" name="matchingAlgorithm" value="kdTree" checked>
<label for="matchingAlgorithm-kdTree">kdTree</label> |
<input type="radio" id="matchingAlgorithm-munkres" name="matchingAlgorithm" value="munkres">
<label for="matchingAlgorithm-kdTree">munkres</label> |
</form>
<pre id="status" style="position: absolute; top: 12px; right: 20px; background-color: grey; padding: 8px; box-shadow: 2px 2px black"></pre>
<pre id="log" style="padding: 8px"></pre>
<div id="performance" style="position: absolute; bottom: 0; width: 100%; padding: 8px; font-size: 0.8rem;"></div>
</body>
</html>

10
demo/tracker/index.js Normal file

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

Some files were not shown because too many files have changed in this diff Show More