Compare commits
No commits in common. "2.5.6" and "main" have entirely different histories.
|
@ -1,9 +1,8 @@
|
||||||
{
|
{
|
||||||
"$schema": "https://developer.microsoft.com/json-schemas/api-extractor/v7/api-extractor.schema.json",
|
"$schema": "https://developer.microsoft.com/json-schemas/api-extractor/v7/api-extractor.schema.json",
|
||||||
"mainEntryPointFilePath": "types/lib/src/human.d.ts",
|
"mainEntryPointFilePath": "types/lib/src/human.d.ts",
|
||||||
"bundledPackages": ["@tensorflow/tfjs-core", "@tensorflow/tfjs-converter", "@types/offscreencanvas"],
|
|
||||||
"compiler": {
|
"compiler": {
|
||||||
"skipLibCheck": false
|
"skipLibCheck": true
|
||||||
},
|
},
|
||||||
"newlineKind": "lf",
|
"newlineKind": "lf",
|
||||||
"dtsRollup": {
|
"dtsRollup": {
|
||||||
|
@ -11,28 +10,18 @@
|
||||||
"untrimmedFilePath": "types/human.d.ts"
|
"untrimmedFilePath": "types/human.d.ts"
|
||||||
},
|
},
|
||||||
"docModel": { "enabled": false },
|
"docModel": { "enabled": false },
|
||||||
"tsdocMetadata": {
|
"tsdocMetadata": { "enabled": false },
|
||||||
"enabled": false
|
|
||||||
},
|
|
||||||
"apiReport": { "enabled": false },
|
"apiReport": { "enabled": false },
|
||||||
"messages": {
|
"messages": {
|
||||||
"compilerMessageReporting": {
|
"compilerMessageReporting": {
|
||||||
"default": {
|
"default": { "logLevel": "warning" }
|
||||||
"logLevel": "warning"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"extractorMessageReporting": {
|
"extractorMessageReporting": {
|
||||||
"default": {
|
"default": { "logLevel": "warning" },
|
||||||
"logLevel": "warning"
|
"ae-missing-release-tag": { "logLevel": "none" }
|
||||||
},
|
|
||||||
"ae-missing-release-tag": {
|
|
||||||
"logLevel": "none"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"tsdocMessageReporting": {
|
"tsdocMessageReporting": {
|
||||||
"default": {
|
"default": { "logLevel": "warning" }
|
||||||
"logLevel": "warning"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
44
.build.json
|
@ -8,13 +8,14 @@
|
||||||
"profiles": {
|
"profiles": {
|
||||||
"production": ["clean", "compile", "typings", "typedoc", "lint", "changelog"],
|
"production": ["clean", "compile", "typings", "typedoc", "lint", "changelog"],
|
||||||
"development": ["serve", "watch", "compile"],
|
"development": ["serve", "watch", "compile"],
|
||||||
"serve": ["serve"]
|
"serve": ["serve"],
|
||||||
|
"clean": ["clean"]
|
||||||
},
|
},
|
||||||
"clean": {
|
"clean": {
|
||||||
"locations": ["dist/*", "types/lib/*", "typedoc/*"]
|
"locations": ["dist/*", "types/*", "typedoc/*"]
|
||||||
},
|
},
|
||||||
"lint": {
|
"lint": {
|
||||||
"locations": [ "*.json", "src/**/*.ts", "test/**/*.js", "demo/**/*.js" ],
|
"locations": [ "**/*.json", "src/**/*.ts", "test/**/*.js", "demo/**/*.js", "**/*.md" ],
|
||||||
"rules": { }
|
"rules": { }
|
||||||
},
|
},
|
||||||
"changelog": {
|
"changelog": {
|
||||||
|
@ -23,8 +24,8 @@
|
||||||
"serve": {
|
"serve": {
|
||||||
"sslKey": "node_modules/@vladmandic/build/cert/https.key",
|
"sslKey": "node_modules/@vladmandic/build/cert/https.key",
|
||||||
"sslCrt": "node_modules/@vladmandic/build/cert/https.crt",
|
"sslCrt": "node_modules/@vladmandic/build/cert/https.crt",
|
||||||
"httpPort": 10030,
|
"httpPort": 8000,
|
||||||
"httpsPort": 10031,
|
"httpsPort": 8001,
|
||||||
"documentRoot": ".",
|
"documentRoot": ".",
|
||||||
"defaultFolder": "demo",
|
"defaultFolder": "demo",
|
||||||
"defaultFile": "index.html"
|
"defaultFile": "index.html"
|
||||||
|
@ -38,6 +39,13 @@
|
||||||
"banner": { "js": "/*\n Human\n homepage: <https://github.com/vladmandic/human>\n author: <https://github.com/vladmandic>'\n*/\n" }
|
"banner": { "js": "/*\n Human\n homepage: <https://github.com/vladmandic/human>\n author: <https://github.com/vladmandic>'\n*/\n" }
|
||||||
},
|
},
|
||||||
"targets": [
|
"targets": [
|
||||||
|
{
|
||||||
|
"name": "tfjs/browser/version",
|
||||||
|
"platform": "browser",
|
||||||
|
"format": "esm",
|
||||||
|
"input": "tfjs/tf-version.ts",
|
||||||
|
"output": "dist/tfjs.version.js"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "tfjs/nodejs/cpu",
|
"name": "tfjs/nodejs/cpu",
|
||||||
"platform": "node",
|
"platform": "node",
|
||||||
|
@ -76,6 +84,7 @@
|
||||||
"format": "cjs",
|
"format": "cjs",
|
||||||
"input": "tfjs/tf-node-wasm.ts",
|
"input": "tfjs/tf-node-wasm.ts",
|
||||||
"output": "dist/tfjs.esm.js",
|
"output": "dist/tfjs.esm.js",
|
||||||
|
"minify": false,
|
||||||
"external": ["@tensorflow"]
|
"external": ["@tensorflow"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -86,13 +95,6 @@
|
||||||
"output": "dist/human.node-wasm.js",
|
"output": "dist/human.node-wasm.js",
|
||||||
"external": ["@tensorflow"]
|
"external": ["@tensorflow"]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"name": "tfjs/browser/version",
|
|
||||||
"platform": "browser",
|
|
||||||
"format": "esm",
|
|
||||||
"input": "tfjs/tf-version.ts",
|
|
||||||
"output": "dist/tfjs.version.js"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"name": "tfjs/browser/esm/nobundle",
|
"name": "tfjs/browser/esm/nobundle",
|
||||||
"platform": "browser",
|
"platform": "browser",
|
||||||
|
@ -107,16 +109,17 @@
|
||||||
"format": "esm",
|
"format": "esm",
|
||||||
"input": "src/human.ts",
|
"input": "src/human.ts",
|
||||||
"output": "dist/human.esm-nobundle.js",
|
"output": "dist/human.esm-nobundle.js",
|
||||||
"sourcemap": true,
|
"sourcemap": false,
|
||||||
"external": ["@tensorflow"]
|
"external": ["@tensorflow"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "tfjs/browser/esm/custom",
|
"name": "tfjs/browser/esm/bundle",
|
||||||
"platform": "browser",
|
"platform": "browser",
|
||||||
"format": "esm",
|
"format": "esm",
|
||||||
"input": "tfjs/tf-custom.ts",
|
"input": "tfjs/tf-browser.ts",
|
||||||
"output": "dist/tfjs.esm.js",
|
"output": "dist/tfjs.esm.js",
|
||||||
"sourcemap": false
|
"sourcemap": false,
|
||||||
|
"minify": true
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "human/browser/iife/bundle",
|
"name": "human/browser/iife/bundle",
|
||||||
|
@ -157,6 +160,15 @@
|
||||||
"output": "demo/faceid/index.js",
|
"output": "demo/faceid/index.js",
|
||||||
"sourcemap": true,
|
"sourcemap": true,
|
||||||
"external": ["*/human.esm.js"]
|
"external": ["*/human.esm.js"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "demo/tracker",
|
||||||
|
"platform": "browser",
|
||||||
|
"format": "esm",
|
||||||
|
"input": "demo/tracker/index.ts",
|
||||||
|
"output": "demo/tracker/index.js",
|
||||||
|
"sourcemap": true,
|
||||||
|
"external": ["*/human.esm.js"]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|
248
.eslintrc.json
|
@ -1,93 +1,221 @@
|
||||||
{
|
{
|
||||||
"globals": {},
|
"globals": {
|
||||||
|
},
|
||||||
|
"rules": {
|
||||||
|
"@typescript-eslint/no-require-imports":"off"
|
||||||
|
},
|
||||||
|
"overrides": [
|
||||||
|
{
|
||||||
|
"files": ["**/*.ts"],
|
||||||
|
"parser": "@typescript-eslint/parser",
|
||||||
|
"parserOptions": { "ecmaVersion": "latest", "project": ["./tsconfig.json"] },
|
||||||
|
"plugins": ["@typescript-eslint"],
|
||||||
|
"env": {
|
||||||
|
"browser": true,
|
||||||
|
"commonjs": false,
|
||||||
|
"node": false,
|
||||||
|
"es2021": true
|
||||||
|
},
|
||||||
|
"extends": [
|
||||||
|
"airbnb-base",
|
||||||
|
"eslint:recommended",
|
||||||
|
"plugin:@typescript-eslint/eslint-recommended",
|
||||||
|
"plugin:@typescript-eslint/recommended",
|
||||||
|
"plugin:@typescript-eslint/recommended-requiring-type-checking",
|
||||||
|
"plugin:@typescript-eslint/strict",
|
||||||
|
"plugin:import/recommended",
|
||||||
|
"plugin:promise/recommended"
|
||||||
|
],
|
||||||
|
"rules": {
|
||||||
|
"@typescript-eslint/ban-ts-comment":"off",
|
||||||
|
"@typescript-eslint/dot-notation":"off",
|
||||||
|
"@typescript-eslint/no-empty-interface":"off",
|
||||||
|
"@typescript-eslint/no-inferrable-types":"off",
|
||||||
|
"@typescript-eslint/no-misused-promises":"off",
|
||||||
|
"@typescript-eslint/no-unnecessary-condition":"off",
|
||||||
|
"@typescript-eslint/no-unsafe-argument":"off",
|
||||||
|
"@typescript-eslint/no-unsafe-assignment":"off",
|
||||||
|
"@typescript-eslint/no-unsafe-call":"off",
|
||||||
|
"@typescript-eslint/no-unsafe-member-access":"off",
|
||||||
|
"@typescript-eslint/no-unsafe-return":"off",
|
||||||
|
"@typescript-eslint/no-require-imports":"off",
|
||||||
|
"@typescript-eslint/no-empty-object-type":"off",
|
||||||
|
"@typescript-eslint/non-nullable-type-assertion-style":"off",
|
||||||
|
"@typescript-eslint/prefer-for-of":"off",
|
||||||
|
"@typescript-eslint/prefer-nullish-coalescing":"off",
|
||||||
|
"@typescript-eslint/prefer-ts-expect-error":"off",
|
||||||
|
"@typescript-eslint/restrict-plus-operands":"off",
|
||||||
|
"@typescript-eslint/restrict-template-expressions":"off",
|
||||||
|
"dot-notation":"off",
|
||||||
|
"guard-for-in":"off",
|
||||||
|
"import/extensions": ["off", "always"],
|
||||||
|
"import/no-unresolved":"off",
|
||||||
|
"import/prefer-default-export":"off",
|
||||||
|
"lines-between-class-members":"off",
|
||||||
|
"max-len": [1, 275, 3],
|
||||||
|
"no-async-promise-executor":"off",
|
||||||
|
"no-await-in-loop":"off",
|
||||||
|
"no-bitwise":"off",
|
||||||
|
"no-continue":"off",
|
||||||
|
"no-lonely-if":"off",
|
||||||
|
"no-mixed-operators":"off",
|
||||||
|
"no-param-reassign":"off",
|
||||||
|
"no-plusplus":"off",
|
||||||
|
"no-regex-spaces":"off",
|
||||||
|
"no-restricted-syntax":"off",
|
||||||
|
"no-return-assign":"off",
|
||||||
|
"no-void":"off",
|
||||||
|
"object-curly-newline":"off",
|
||||||
|
"prefer-destructuring":"off",
|
||||||
|
"prefer-template":"off",
|
||||||
|
"radix":"off"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"files": ["**/*.d.ts"],
|
||||||
|
"parser": "@typescript-eslint/parser",
|
||||||
|
"parserOptions": { "ecmaVersion": "latest", "project": ["./tsconfig.json"] },
|
||||||
|
"plugins": ["@typescript-eslint"],
|
||||||
|
"env": {
|
||||||
|
"browser": true,
|
||||||
|
"commonjs": false,
|
||||||
|
"node": false,
|
||||||
|
"es2021": true
|
||||||
|
},
|
||||||
|
"extends": [
|
||||||
|
"airbnb-base",
|
||||||
|
"eslint:recommended",
|
||||||
|
"plugin:@typescript-eslint/eslint-recommended",
|
||||||
|
"plugin:@typescript-eslint/recommended",
|
||||||
|
"plugin:@typescript-eslint/recommended-requiring-type-checking",
|
||||||
|
"plugin:@typescript-eslint/strict",
|
||||||
|
"plugin:import/recommended",
|
||||||
|
"plugin:promise/recommended"
|
||||||
|
],
|
||||||
|
"rules": {
|
||||||
|
"@typescript-eslint/array-type":"off",
|
||||||
|
"@typescript-eslint/ban-types":"off",
|
||||||
|
"@typescript-eslint/consistent-indexed-object-style":"off",
|
||||||
|
"@typescript-eslint/consistent-type-definitions":"off",
|
||||||
|
"@typescript-eslint/no-empty-interface":"off",
|
||||||
|
"@typescript-eslint/no-explicit-any":"off",
|
||||||
|
"@typescript-eslint/no-invalid-void-type":"off",
|
||||||
|
"@typescript-eslint/no-unnecessary-type-arguments":"off",
|
||||||
|
"@typescript-eslint/no-unnecessary-type-constraint":"off",
|
||||||
|
"comma-dangle":"off",
|
||||||
|
"indent":"off",
|
||||||
|
"lines-between-class-members":"off",
|
||||||
|
"max-classes-per-file":"off",
|
||||||
|
"max-len":"off",
|
||||||
|
"no-multiple-empty-lines":"off",
|
||||||
|
"no-shadow":"off",
|
||||||
|
"no-use-before-define":"off",
|
||||||
|
"quotes":"off",
|
||||||
|
"semi":"off"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"files": ["**/*.js"],
|
||||||
|
"parserOptions": { "sourceType": "module", "ecmaVersion": "latest" },
|
||||||
|
"plugins": [],
|
||||||
"env": {
|
"env": {
|
||||||
"browser": true,
|
"browser": true,
|
||||||
"commonjs": true,
|
"commonjs": true,
|
||||||
"node": true,
|
"node": true,
|
||||||
"es2021": true
|
"es2021": true
|
||||||
},
|
},
|
||||||
"parser": "@typescript-eslint/parser",
|
|
||||||
"parserOptions": {
|
|
||||||
"ecmaVersion": 2021
|
|
||||||
},
|
|
||||||
"plugins": [
|
|
||||||
"@typescript-eslint",
|
|
||||||
"html"
|
|
||||||
],
|
|
||||||
"extends": [
|
"extends": [
|
||||||
"airbnb-base",
|
"airbnb-base",
|
||||||
"eslint:recommended",
|
"eslint:recommended",
|
||||||
"plugin:@typescript-eslint/eslint-recommended",
|
|
||||||
"plugin:@typescript-eslint/recommended",
|
|
||||||
"plugin:import/errors",
|
|
||||||
"plugin:import/warnings",
|
|
||||||
"plugin:json/recommended-with-comments",
|
|
||||||
"plugin:node/recommended",
|
"plugin:node/recommended",
|
||||||
"plugin:promise/recommended"
|
"plugin:promise/recommended"
|
||||||
],
|
],
|
||||||
"ignorePatterns": [
|
|
||||||
"assets",
|
|
||||||
"demo/helpers/*.js",
|
|
||||||
"demo/typescript/*.js",
|
|
||||||
"demo/faceid/*.js",
|
|
||||||
"dist",
|
|
||||||
"media",
|
|
||||||
"models",
|
|
||||||
"node_modules",
|
|
||||||
"types/human.d.ts"
|
|
||||||
],
|
|
||||||
"rules": {
|
"rules": {
|
||||||
"@typescript-eslint/ban-ts-comment": "off",
|
|
||||||
"@typescript-eslint/explicit-module-boundary-types": "off",
|
|
||||||
"@typescript-eslint/no-shadow": "error",
|
|
||||||
"@typescript-eslint/no-var-requires": "off",
|
|
||||||
"@typescript-eslint/prefer-as-const": "off",
|
|
||||||
"@typescript-eslint/triple-slash-reference": "off",
|
|
||||||
"@typescript-eslint/no-inferrable-types": "off",
|
|
||||||
"@typescript-eslint/no-empty-interface": ["error", { "allowSingleExtends": true }],
|
|
||||||
"camelcase": "off",
|
|
||||||
"class-methods-use-this": "off",
|
|
||||||
"dot-notation":"off",
|
"dot-notation":"off",
|
||||||
"func-names": "off",
|
"import/extensions": ["error", "always"],
|
||||||
"guard-for-in": "off",
|
|
||||||
"import/extensions": "off",
|
|
||||||
"import/named": "off",
|
|
||||||
"import/no-extraneous-dependencies":"off",
|
"import/no-extraneous-dependencies":"off",
|
||||||
"import/no-named-as-default": "off",
|
|
||||||
"import/no-unresolved": "off",
|
|
||||||
"import/prefer-default-export": "off",
|
|
||||||
"lines-between-class-members": "off",
|
|
||||||
"max-len": [1, 275, 3],
|
"max-len": [1, 275, 3],
|
||||||
"newline-per-chained-call": "off",
|
|
||||||
"no-async-promise-executor": "off",
|
|
||||||
"no-await-in-loop":"off",
|
"no-await-in-loop":"off",
|
||||||
"no-bitwise":"off",
|
"no-bitwise":"off",
|
||||||
"no-case-declarations":"off",
|
|
||||||
"no-continue":"off",
|
"no-continue":"off",
|
||||||
"no-else-return": "off",
|
|
||||||
"no-lonely-if": "off",
|
|
||||||
"no-loop-func": "off",
|
|
||||||
"no-mixed-operators":"off",
|
"no-mixed-operators":"off",
|
||||||
"no-param-reassign":"off",
|
"no-param-reassign":"off",
|
||||||
"no-plusplus":"off",
|
"no-plusplus":"off",
|
||||||
"no-process-exit": "off",
|
|
||||||
"no-regex-spaces":"off",
|
"no-regex-spaces":"off",
|
||||||
"no-restricted-globals": "off",
|
|
||||||
"no-restricted-syntax":"off",
|
"no-restricted-syntax":"off",
|
||||||
"no-return-assign":"off",
|
"no-return-assign":"off",
|
||||||
"no-shadow": "off",
|
|
||||||
"no-underscore-dangle": "off",
|
|
||||||
"node/no-missing-import": ["error", { "tryExtensions": [".js", ".json", ".ts"] }],
|
|
||||||
"node/no-unpublished-import": "off",
|
|
||||||
"node/no-unpublished-require": "off",
|
|
||||||
"node/no-unsupported-features/es-syntax":"off",
|
"node/no-unsupported-features/es-syntax":"off",
|
||||||
"node/shebang": "off",
|
|
||||||
"object-curly-newline":"off",
|
"object-curly-newline":"off",
|
||||||
"prefer-destructuring":"off",
|
"prefer-destructuring":"off",
|
||||||
"prefer-template":"off",
|
"prefer-template":"off",
|
||||||
"promise/always-return": "off",
|
|
||||||
"promise/catch-or-return": "off",
|
|
||||||
"promise/no-nesting": "off",
|
|
||||||
"radix":"off"
|
"radix":"off"
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"files": ["**/*.json"],
|
||||||
|
"parserOptions": { "ecmaVersion": "latest" },
|
||||||
|
"plugins": ["json"],
|
||||||
|
"env": {
|
||||||
|
"browser": false,
|
||||||
|
"commonjs": false,
|
||||||
|
"node": false,
|
||||||
|
"es2021": false
|
||||||
|
},
|
||||||
|
"extends": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"files": ["**/*.html"],
|
||||||
|
"parserOptions": { "sourceType": "module", "ecmaVersion": "latest" },
|
||||||
|
"parser": "@html-eslint/parser",
|
||||||
|
"plugins": ["html", "@html-eslint"],
|
||||||
|
"env": {
|
||||||
|
"browser": true,
|
||||||
|
"commonjs": false,
|
||||||
|
"node": false,
|
||||||
|
"es2021": false
|
||||||
|
},
|
||||||
|
"extends": ["plugin:@html-eslint/recommended"],
|
||||||
|
"rules": {
|
||||||
|
"@html-eslint/element-newline":"off",
|
||||||
|
"@html-eslint/attrs-newline":"off",
|
||||||
|
"@html-eslint/indent": ["error", 2]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"files": ["**/*.md"],
|
||||||
|
"plugins": ["markdown"],
|
||||||
|
"processor": "markdown/markdown",
|
||||||
|
"rules": {
|
||||||
|
"no-undef":"off"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"files": ["**/*.md/*.js"],
|
||||||
|
"rules": {
|
||||||
|
"@typescript-eslint/no-unused-vars":"off",
|
||||||
|
"@typescript-eslint/triple-slash-reference":"off",
|
||||||
|
"import/newline-after-import":"off",
|
||||||
|
"import/no-unresolved":"off",
|
||||||
|
"no-console":"off",
|
||||||
|
"no-global-assign":"off",
|
||||||
|
"no-multi-spaces":"off",
|
||||||
|
"no-restricted-globals":"off",
|
||||||
|
"no-undef":"off",
|
||||||
|
"no-unused-vars":"off",
|
||||||
|
"node/no-missing-import":"off",
|
||||||
|
"node/no-missing-require":"off",
|
||||||
|
"promise/catch-or-return":"off"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"ignorePatterns": [
|
||||||
|
"node_modules",
|
||||||
|
"assets",
|
||||||
|
"dist",
|
||||||
|
"demo/helpers/*.js",
|
||||||
|
"demo/typescript/*.js",
|
||||||
|
"demo/faceid/*.js",
|
||||||
|
"demo/tracker/*.js",
|
||||||
|
"typedoc"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,11 @@
|
||||||
|
github: [vladmandic]
|
||||||
|
patreon: # Replace with a single Patreon username
|
||||||
|
open_collective: # Replace with a single Open Collective username
|
||||||
|
ko_fi: # Replace with a single Ko-fi username
|
||||||
|
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||||
|
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||||
|
liberapay: # Replace with a single Liberapay username
|
||||||
|
issuehunt: # Replace with a single IssueHunt username
|
||||||
|
otechie: # Replace with a single Otechie username
|
||||||
|
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
|
||||||
|
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
|
@ -1,6 +1,9 @@
|
||||||
.vscode
|
node_modules/
|
||||||
node_modules
|
|
||||||
pnpm-lock.yaml
|
|
||||||
assets/tf*
|
|
||||||
*.swp
|
|
||||||
types/lib
|
types/lib
|
||||||
|
pnpm-lock.yaml
|
||||||
|
package-lock.json
|
||||||
|
*.swp
|
||||||
|
samples/**/*.mp4
|
||||||
|
samples/**/*.webm
|
||||||
|
temp
|
||||||
|
tmp
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
{
|
{
|
||||||
"MD012": false,
|
"MD012": false,
|
||||||
"MD013": false,
|
"MD013": false,
|
||||||
|
"MD029": false,
|
||||||
"MD033": false,
|
"MD033": false,
|
||||||
"MD036": false,
|
"MD036": false,
|
||||||
"MD041": false
|
"MD041": false
|
||||||
|
|
4
.npmrc
|
@ -1 +1,5 @@
|
||||||
force=true
|
force=true
|
||||||
|
omit=dev
|
||||||
|
legacy-peer-deps=true
|
||||||
|
strict-peer-dependencies=false
|
||||||
|
node-options='--no-deprecation'
|
||||||
|
|
|
@ -0,0 +1,10 @@
|
||||||
|
{
|
||||||
|
"search.exclude": {
|
||||||
|
"dist/*": true,
|
||||||
|
"node_modules/*": true,
|
||||||
|
"types": true,
|
||||||
|
"typedoc": true,
|
||||||
|
},
|
||||||
|
"search.useGlobalIgnoreFiles": true,
|
||||||
|
"search.useParentIgnoreFiles": true
|
||||||
|
}
|
328
CHANGELOG.md
|
@ -1,20 +1,276 @@
|
||||||
#
|
# @vladmandic/human
|
||||||
|
|
||||||
Version: **undefined**
|
Version: **3.3.5**
|
||||||
Description: **undefined**
|
Description: **Human: AI-powered 3D Face Detection & Rotation Tracking, Face Description & Recognition, Body Pose Tracking, 3D Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction, Gesture Recognition**
|
||||||
|
|
||||||
Author: **undefined**
|
Author: **Vladimir Mandic <mandic00@live.com>**
|
||||||
License: **undefined**
|
License: **MIT**
|
||||||
Repository: **<https://github.com/vladmandic/human>**
|
Repository: **<https://github.com/vladmandic/human>**
|
||||||
|
|
||||||
## Changelog
|
## Changelog
|
||||||
|
|
||||||
|
### **3.3.5** 2025/02/05 mandic00@live.com
|
||||||
|
|
||||||
|
|
||||||
|
### **origin/main** 2024/10/24 mandic00@live.com
|
||||||
|
|
||||||
|
- add human.draw.tensor method
|
||||||
|
|
||||||
|
### **3.3.4** 2024/10/24 mandic00@live.com
|
||||||
|
|
||||||
|
|
||||||
|
### **3.3.3** 2024/10/14 mandic00@live.com
|
||||||
|
|
||||||
|
- add loaded property to model stats and mark models not loaded correctly.
|
||||||
|
- release build
|
||||||
|
|
||||||
|
### **3.3.2** 2024/09/11 mandic00@live.com
|
||||||
|
|
||||||
|
- full rebuild
|
||||||
|
|
||||||
|
### **3.3.1** 2024/09/11 mandic00@live.com
|
||||||
|
|
||||||
|
- add config.face.detector.square option
|
||||||
|
- human 3.3 alpha test run
|
||||||
|
- human 3.3 alpha with new build environment
|
||||||
|
- release rebuild
|
||||||
|
- fix flazeface tensor scale and update build platform
|
||||||
|
|
||||||
|
### **3.2.2** 2024/04/17 mandic00@live.com
|
||||||
|
|
||||||
|
|
||||||
|
### **release: 3.2.1** 2024/02/15 mandic00@live.com
|
||||||
|
|
||||||
|
|
||||||
|
### **3.2.1** 2024/02/15 mandic00@live.com
|
||||||
|
|
||||||
|
|
||||||
|
### **3.2.0** 2023/12/06 mandic00@live.com
|
||||||
|
|
||||||
|
- set browser false when navigator object is empty
|
||||||
|
- https://github.com/vladmandic/human/issues/402
|
||||||
|
|
||||||
|
### **release: 3.1.2** 2023/09/18 mandic00@live.com
|
||||||
|
|
||||||
|
- full rebuild
|
||||||
|
|
||||||
|
### **3.1.2** 2023/09/18 mandic00@live.com
|
||||||
|
|
||||||
|
- major toolkit upgrade
|
||||||
|
- full rebuild
|
||||||
|
- major toolkit upgrade
|
||||||
|
|
||||||
|
### **3.1.1** 2023/08/05 mandic00@live.com
|
||||||
|
|
||||||
|
- fixes plus tfjs upgrade for new release
|
||||||
|
|
||||||
|
### **3.0.7** 2023/06/12 mandic00@live.com
|
||||||
|
|
||||||
|
- full rebuild
|
||||||
|
- fix memory leak in histogramequalization
|
||||||
|
- initial work on tracker
|
||||||
|
|
||||||
|
### **3.0.6** 2023/03/21 mandic00@live.com
|
||||||
|
|
||||||
|
- add optional crop to multiple models
|
||||||
|
- fix movenet-multipose
|
||||||
|
- add electron detection
|
||||||
|
- fix gender-ssrnet-imdb
|
||||||
|
- add movenet-multipose workaround
|
||||||
|
- rebuild and publish
|
||||||
|
- add face.detector.minsize configurable setting
|
||||||
|
- add affectnet
|
||||||
|
|
||||||
|
### **3.0.5** 2023/02/02 mandic00@live.com
|
||||||
|
|
||||||
|
- add gear-e models
|
||||||
|
- detect react-native
|
||||||
|
- redo blazeface annotations
|
||||||
|
|
||||||
|
### **3.0.4** 2023/01/29 mandic00@live.com
|
||||||
|
|
||||||
|
- make naviator calls safe
|
||||||
|
- fix facedetector-only configs
|
||||||
|
|
||||||
|
### **3.0.3** 2023/01/07 mandic00@live.com
|
||||||
|
|
||||||
|
- full rebuild
|
||||||
|
|
||||||
|
### **3.0.2** 2023/01/06 mandic00@live.com
|
||||||
|
|
||||||
|
- default face.rotation disabled
|
||||||
|
|
||||||
|
### **release: 3.0.1** 2022/11/22 mandic00@live.com
|
||||||
|
|
||||||
|
|
||||||
|
### **3.0.1** 2022/11/22 mandic00@live.com
|
||||||
|
|
||||||
|
- support dynamic loads
|
||||||
|
- polish demos
|
||||||
|
- add facedetect demo and fix model async load
|
||||||
|
- enforce markdown linting
|
||||||
|
- cleanup git history
|
||||||
|
- default empty result
|
||||||
|
- refactor draw and models namespaces
|
||||||
|
- refactor distance
|
||||||
|
- add basic anthropometry
|
||||||
|
- added webcam id specification
|
||||||
|
- include external typedefs
|
||||||
|
- prepare external typedefs
|
||||||
|
- rebuild all
|
||||||
|
- include project files for types
|
||||||
|
- architectural improvements
|
||||||
|
- refresh dependencies
|
||||||
|
- add named exports
|
||||||
|
- add draw label templates
|
||||||
|
- reduce dev dependencies
|
||||||
|
- tensor rank strong typechecks
|
||||||
|
- rebuild dependencies
|
||||||
|
|
||||||
|
### **2.11.1** 2022/10/09 mandic00@live.com
|
||||||
|
|
||||||
|
- add rvm segmentation model
|
||||||
|
- add human.webcam methods
|
||||||
|
- create funding.yml
|
||||||
|
- fix rotation interpolation
|
||||||
|
|
||||||
|
### **2.10.3** 2022/09/21 mandic00@live.com
|
||||||
|
|
||||||
|
- add human.video method
|
||||||
|
|
||||||
|
### **2.10.2** 2022/09/11 mandic00@live.com
|
||||||
|
|
||||||
|
- add node.js esm compatibility (#292)
|
||||||
|
- release
|
||||||
|
|
||||||
|
### **2.10.1** 2022/09/07 mandic00@live.com
|
||||||
|
|
||||||
|
- release candidate
|
||||||
|
- add config flags
|
||||||
|
- test update
|
||||||
|
- release preview
|
||||||
|
- optimize startup sequence
|
||||||
|
- reorder backend init code
|
||||||
|
- test embedding
|
||||||
|
- embedding test
|
||||||
|
- add browser iife tests
|
||||||
|
- minor bug fixes and increased test coverage
|
||||||
|
- extend release tests
|
||||||
|
- add model load exception handling
|
||||||
|
- add softwarekernels config option
|
||||||
|
- expand type safety
|
||||||
|
- full eslint rule rewrite
|
||||||
|
|
||||||
|
### **2.9.4** 2022/08/20 mandic00@live.com
|
||||||
|
|
||||||
|
- add browser test
|
||||||
|
- add tensorflow library detection
|
||||||
|
- fix wasm detection
|
||||||
|
- enumerate additional models
|
||||||
|
- release refresh
|
||||||
|
|
||||||
|
### **2.9.3** 2022/08/10 mandic00@live.com
|
||||||
|
|
||||||
|
- rehault testing framework
|
||||||
|
- release refresh
|
||||||
|
- add insightface
|
||||||
|
|
||||||
|
### **2.9.2** 2022/08/08 mandic00@live.com
|
||||||
|
|
||||||
|
- release rebuild
|
||||||
|
|
||||||
|
### **2.9.1** 2022/07/25 mandic00@live.com
|
||||||
|
|
||||||
|
- full rebuild
|
||||||
|
- release cleanup
|
||||||
|
- tflite experiments
|
||||||
|
- add load monitor test
|
||||||
|
- beta for upcoming major release
|
||||||
|
- swtich to release version of tfjs
|
||||||
|
- placeholder for face contours
|
||||||
|
- improve face compare in main demo
|
||||||
|
- add webview support
|
||||||
|
- fix(gear): ensure gear.modelpath is used for loadmodel()
|
||||||
|
- npm default install should be prod only
|
||||||
|
- fix npm v7 compatibility
|
||||||
|
- add getmodelstats method
|
||||||
|
- rebuild
|
||||||
|
- release build
|
||||||
|
|
||||||
|
### **2.8.1** 2022/06/08 mandic00@live.com
|
||||||
|
|
||||||
|
- webgpu and wasm optimizations
|
||||||
|
- add faceboxes prototype
|
||||||
|
- full rebuild
|
||||||
|
|
||||||
|
### **2.7.4** 2022/05/24 mandic00@live.com
|
||||||
|
|
||||||
|
|
||||||
|
### **2.7.3** 2022/05/24 mandic00@live.com
|
||||||
|
|
||||||
|
- add face.mesh.keepinvalid config flag
|
||||||
|
- initial work for new facemesh model
|
||||||
|
|
||||||
|
### **2.7.2** 2022/05/12 mandic00@live.com
|
||||||
|
|
||||||
|
- fix demo when used with video files
|
||||||
|
- major release
|
||||||
|
|
||||||
|
### **2.7.1** 2022/05/09 mandic00@live.com
|
||||||
|
|
||||||
|
- support 4k input
|
||||||
|
- add attention draw methods
|
||||||
|
- fix coloring function
|
||||||
|
- enable precompile as part of warmup
|
||||||
|
- prepare release beta
|
||||||
|
- change default face crop
|
||||||
|
- beta release 2.7
|
||||||
|
- refactor draw methods
|
||||||
|
- implement face attention model
|
||||||
|
- add electronjs demo
|
||||||
|
- rebuild
|
||||||
|
|
||||||
|
### **2.6.5** 2022/04/01 mandic00@live.com
|
||||||
|
|
||||||
|
- bundle offscreencanvas types
|
||||||
|
- prototype precompile pass
|
||||||
|
- fix changelog generation
|
||||||
|
- fix indexdb config check
|
||||||
|
|
||||||
|
### **2.6.4** 2022/02/27 mandic00@live.com
|
||||||
|
|
||||||
|
- fix types typo
|
||||||
|
- refresh
|
||||||
|
- add config option wasmplatformfetch
|
||||||
|
|
||||||
|
### **2.6.3** 2022/02/10 mandic00@live.com
|
||||||
|
|
||||||
|
- rebuild
|
||||||
|
|
||||||
|
### **2.6.2** 2022/02/07 mandic00@live.com
|
||||||
|
|
||||||
|
- release rebuild
|
||||||
|
|
||||||
|
### **2.6.1** 2022/01/20 mandic00@live.com
|
||||||
|
|
||||||
|
- implement model caching using indexdb
|
||||||
|
- prototype global fetch handler
|
||||||
|
- fix face box and hand tracking when in front of face
|
||||||
|
|
||||||
|
### **2.5.8** 2022/01/14 mandic00@live.com
|
||||||
|
|
||||||
|
- fix samples
|
||||||
|
- fix(src): typo
|
||||||
|
- change on how face box is calculated
|
||||||
|
|
||||||
|
### **2.5.7** 2021/12/27 mandic00@live.com
|
||||||
|
|
||||||
|
- fix posenet
|
||||||
|
- release refresh
|
||||||
|
|
||||||
### **2.5.6** 2021/12/15 mandic00@live.com
|
### **2.5.6** 2021/12/15 mandic00@live.com
|
||||||
|
|
||||||
- strong type for string enums
|
- strong type for string enums
|
||||||
|
|
||||||
### **origin/main** 2021/12/14 mandic00@live.com
|
|
||||||
|
|
||||||
- rebuild
|
- rebuild
|
||||||
- fix node detection in electron environment
|
- fix node detection in electron environment
|
||||||
|
|
||||||
|
@ -59,9 +315,7 @@
|
||||||
- rebuild
|
- rebuild
|
||||||
- add type defs when working with relative path imports
|
- add type defs when working with relative path imports
|
||||||
- disable humangl backend if webgl 1.0 is detected
|
- disable humangl backend if webgl 1.0 is detected
|
||||||
|
- add additional hand gestures
|
||||||
### **release: 2.5.1** 2021/11/08 mandic00@live.com
|
|
||||||
|
|
||||||
|
|
||||||
### **2.5.1** 2021/11/08 mandic00@live.com
|
### **2.5.1** 2021/11/08 mandic00@live.com
|
||||||
|
|
||||||
|
@ -89,9 +343,7 @@
|
||||||
- add ts demo
|
- add ts demo
|
||||||
- switch from es2018 to es2020 for main build
|
- switch from es2018 to es2020 for main build
|
||||||
- switch to custom tfjs for demos
|
- switch to custom tfjs for demos
|
||||||
|
- release 2.4
|
||||||
### **release: 2.4.1** 2021/10/25 mandic00@live.com
|
|
||||||
|
|
||||||
|
|
||||||
### **2.4.1** 2021/10/25 mandic00@live.com
|
### **2.4.1** 2021/10/25 mandic00@live.com
|
||||||
|
|
||||||
|
@ -118,7 +370,6 @@
|
||||||
|
|
||||||
- minor blazepose optimizations
|
- minor blazepose optimizations
|
||||||
- compress samples
|
- compress samples
|
||||||
- remove handdetect from default package
|
|
||||||
- remove posenet from default package
|
- remove posenet from default package
|
||||||
- enhanced movenet postprocessing
|
- enhanced movenet postprocessing
|
||||||
- use transferrable buffer for worker messages
|
- use transferrable buffer for worker messages
|
||||||
|
@ -126,17 +377,13 @@
|
||||||
- add node-match advanced example using worker thread pool
|
- add node-match advanced example using worker thread pool
|
||||||
- package updates
|
- package updates
|
||||||
- optimize image preprocessing
|
- optimize image preprocessing
|
||||||
|
- set webgpu optimized flags
|
||||||
### **release: 2.3.2** 2021/10/11 mandic00@live.com
|
|
||||||
|
|
||||||
- major precision improvements to movenet and handtrack
|
- major precision improvements to movenet and handtrack
|
||||||
- image processing fixes
|
- image processing fixes
|
||||||
- redesign body and hand caching and interpolation
|
- redesign body and hand caching and interpolation
|
||||||
- demo default config cleanup
|
- demo default config cleanup
|
||||||
- improve gaze and face angle visualizations in draw
|
- improve gaze and face angle visualizations in draw
|
||||||
|
- release 2.3.1
|
||||||
### **release 2.3.1** 2021/10/06 mandic00@live.com
|
|
||||||
|
|
||||||
|
|
||||||
### **2.3.1** 2021/10/06 mandic00@live.com
|
### **2.3.1** 2021/10/06 mandic00@live.com
|
||||||
|
|
||||||
|
@ -146,7 +393,6 @@
|
||||||
- fix backend order initialization
|
- fix backend order initialization
|
||||||
- added docker notes
|
- added docker notes
|
||||||
- breaking change: new similarity and match methods
|
- breaking change: new similarity and match methods
|
||||||
- release candidate
|
|
||||||
- tweaked default values
|
- tweaked default values
|
||||||
- enable handtrack as default model
|
- enable handtrack as default model
|
||||||
- redesign face processing
|
- redesign face processing
|
||||||
|
@ -176,9 +422,7 @@
|
||||||
### **2.2.2** 2021/09/17 mandic00@live.com
|
### **2.2.2** 2021/09/17 mandic00@live.com
|
||||||
|
|
||||||
- experimental webgl status monitoring
|
- experimental webgl status monitoring
|
||||||
|
- major release
|
||||||
### **release: 2.2.1** 2021/09/16 mandic00@live.com
|
|
||||||
|
|
||||||
|
|
||||||
### **2.2.1** 2021/09/16 mandic00@live.com
|
### **2.2.1** 2021/09/16 mandic00@live.com
|
||||||
|
|
||||||
|
@ -207,8 +451,6 @@
|
||||||
- implement event emitters
|
- implement event emitters
|
||||||
- fix iife loader
|
- fix iife loader
|
||||||
- simplify dependencies
|
- simplify dependencies
|
||||||
- fix file permissions
|
|
||||||
- remove old build server
|
|
||||||
- change build process
|
- change build process
|
||||||
- add benchmark info
|
- add benchmark info
|
||||||
- simplify canvas handling in nodejs
|
- simplify canvas handling in nodejs
|
||||||
|
@ -251,7 +493,6 @@
|
||||||
|
|
||||||
### **2.1.1** 2021/07/29 mandic00@live.com
|
### **2.1.1** 2021/07/29 mandic00@live.com
|
||||||
|
|
||||||
- proposal #141
|
|
||||||
- add note on manually disping tensor
|
- add note on manually disping tensor
|
||||||
- modularize model loading
|
- modularize model loading
|
||||||
|
|
||||||
|
@ -265,9 +506,7 @@
|
||||||
- reorganize demos
|
- reorganize demos
|
||||||
- fix centernet box width & height
|
- fix centernet box width & height
|
||||||
- add body segmentation sample
|
- add body segmentation sample
|
||||||
|
- add release notes
|
||||||
### **release: 2.0.1** 2021/06/08 mandic00@live.com
|
|
||||||
|
|
||||||
- release 2.0
|
- release 2.0
|
||||||
|
|
||||||
### **2.0.1** 2021/06/08 mandic00@live.com
|
### **2.0.1** 2021/06/08 mandic00@live.com
|
||||||
|
@ -296,7 +535,6 @@
|
||||||
- implemented human.next global interpolation method
|
- implemented human.next global interpolation method
|
||||||
- finished draw buffering and smoothing and enabled by default
|
- finished draw buffering and smoothing and enabled by default
|
||||||
- implemented service worker
|
- implemented service worker
|
||||||
- quantized centernet
|
|
||||||
- release candidate
|
- release candidate
|
||||||
- added usage restrictions
|
- added usage restrictions
|
||||||
- quantize handdetect model
|
- quantize handdetect model
|
||||||
|
@ -330,8 +568,6 @@
|
||||||
### **1.9.1** 2021/05/21 mandic00@live.com
|
### **1.9.1** 2021/05/21 mandic00@live.com
|
||||||
|
|
||||||
- caching improvements
|
- caching improvements
|
||||||
- sanitize server input
|
|
||||||
- remove nanodet weights from default distribution
|
|
||||||
- add experimental mb3-centernet object detection
|
- add experimental mb3-centernet object detection
|
||||||
- individual model skipframes values still max high threshold for caching
|
- individual model skipframes values still max high threshold for caching
|
||||||
- config.videooptimized has been removed and config.cachesensitivity has been added instead
|
- config.videooptimized has been removed and config.cachesensitivity has been added instead
|
||||||
|
@ -353,9 +589,7 @@
|
||||||
|
|
||||||
### **1.8.2** 2021/05/04 mandic00@live.com
|
### **1.8.2** 2021/05/04 mandic00@live.com
|
||||||
|
|
||||||
|
- release 1.8 with major changes and tfjs 3.6.0
|
||||||
### **release 1.8 with major changes and tfjs 3.6.0** 2021/04/30 mandic00@live.com
|
|
||||||
|
|
||||||
|
|
||||||
### **1.8.1** 2021/04/30 mandic00@live.com
|
### **1.8.1** 2021/04/30 mandic00@live.com
|
||||||
|
|
||||||
|
@ -389,7 +623,6 @@
|
||||||
- added filter.flip feature
|
- added filter.flip feature
|
||||||
- added demo load image from http
|
- added demo load image from http
|
||||||
- mobile demo optimization and iris gestures
|
- mobile demo optimization and iris gestures
|
||||||
- full test run
|
|
||||||
- full rebuild
|
- full rebuild
|
||||||
- new look
|
- new look
|
||||||
- added benchmarks
|
- added benchmarks
|
||||||
|
@ -499,7 +732,6 @@
|
||||||
|
|
||||||
- add experimental nanodet object detection
|
- add experimental nanodet object detection
|
||||||
- full models signature
|
- full models signature
|
||||||
- cleanup
|
|
||||||
|
|
||||||
### **1.1.7** 2021/03/16 mandic00@live.com
|
### **1.1.7** 2021/03/16 mandic00@live.com
|
||||||
|
|
||||||
|
@ -545,7 +777,6 @@
|
||||||
### **1.0.3** 2021/03/10 mandic00@live.com
|
### **1.0.3** 2021/03/10 mandic00@live.com
|
||||||
|
|
||||||
- strong typing for public classes and hide private classes
|
- strong typing for public classes and hide private classes
|
||||||
- re-added blazeface-front
|
|
||||||
- enhanced age, gender, emotion detection
|
- enhanced age, gender, emotion detection
|
||||||
- full rebuild
|
- full rebuild
|
||||||
|
|
||||||
|
@ -554,9 +785,6 @@
|
||||||
- remove blazeface-front, blazepose-upper, faceboxes
|
- remove blazeface-front, blazepose-upper, faceboxes
|
||||||
- remove blazeface-front and faceboxes
|
- remove blazeface-front and faceboxes
|
||||||
|
|
||||||
### **release: 1.0.1** 2021/03/09 mandic00@live.com
|
|
||||||
|
|
||||||
|
|
||||||
### **1.0.1** 2021/03/09 mandic00@live.com
|
### **1.0.1** 2021/03/09 mandic00@live.com
|
||||||
|
|
||||||
- fix for face detector when mesh is disabled
|
- fix for face detector when mesh is disabled
|
||||||
|
@ -571,7 +799,6 @@
|
||||||
- 0.40.5
|
- 0.40.5
|
||||||
- fix human.draw
|
- fix human.draw
|
||||||
- 0.40.4
|
- 0.40.4
|
||||||
- cleanup blazepose code
|
|
||||||
- fix demo
|
- fix demo
|
||||||
- 0.40.3
|
- 0.40.3
|
||||||
- 0.40.2
|
- 0.40.2
|
||||||
|
@ -594,19 +821,13 @@
|
||||||
- 0.20.11
|
- 0.20.11
|
||||||
- 0.20.10
|
- 0.20.10
|
||||||
- 0.20.9
|
- 0.20.9
|
||||||
- remove extra items
|
|
||||||
- simmilarity fix
|
|
||||||
- 0.20.8
|
- 0.20.8
|
||||||
- embedding fix
|
|
||||||
- 0.20.7
|
- 0.20.7
|
||||||
- build fix
|
- build fix
|
||||||
- 0.20.6
|
- 0.20.6
|
||||||
- embedding fix
|
- embedding fix
|
||||||
- 0.20.5
|
- 0.20.5
|
||||||
- fix imagefx and add dev builds
|
- fix imagefx and add dev builds
|
||||||
|
|
||||||
### **0.20.4** 2021/02/19 mandic00@live.com
|
|
||||||
|
|
||||||
- 0.20.4
|
- 0.20.4
|
||||||
- 0.20.3
|
- 0.20.3
|
||||||
- rebuild
|
- rebuild
|
||||||
|
@ -631,7 +852,6 @@
|
||||||
### **0.9.26** 2021/01/18 mandic00@live.com
|
### **0.9.26** 2021/01/18 mandic00@live.com
|
||||||
|
|
||||||
- fix face detection when mesh is disabled
|
- fix face detection when mesh is disabled
|
||||||
- added minification notes
|
|
||||||
- version bump
|
- version bump
|
||||||
|
|
||||||
### **0.9.25** 2021/01/13 mandic00@live.com
|
### **0.9.25** 2021/01/13 mandic00@live.com
|
||||||
|
@ -693,7 +913,6 @@
|
||||||
|
|
||||||
- conditional hand rotation
|
- conditional hand rotation
|
||||||
- staggered skipframes
|
- staggered skipframes
|
||||||
- fix permissions
|
|
||||||
|
|
||||||
### **0.9.13** 2020/12/08 mandic00@live.com
|
### **0.9.13** 2020/12/08 mandic00@live.com
|
||||||
|
|
||||||
|
@ -802,7 +1021,6 @@
|
||||||
- optimized model loader
|
- optimized model loader
|
||||||
- merge branch 'main' of https://github.com/vladmandic/human into main
|
- merge branch 'main' of https://github.com/vladmandic/human into main
|
||||||
- created wiki
|
- created wiki
|
||||||
- delete bug_report.md
|
|
||||||
- optimize font resizing
|
- optimize font resizing
|
||||||
- fix nms sync call
|
- fix nms sync call
|
||||||
|
|
||||||
|
@ -826,7 +1044,6 @@
|
||||||
|
|
||||||
- optimized camera and mobile layout
|
- optimized camera and mobile layout
|
||||||
- fixed worker and filter compatibility
|
- fixed worker and filter compatibility
|
||||||
- removed test code
|
|
||||||
|
|
||||||
### **0.7.2** 2020/11/04 mandic00@live.com
|
### **0.7.2** 2020/11/04 mandic00@live.com
|
||||||
|
|
||||||
|
@ -903,7 +1120,6 @@
|
||||||
### **0.4.8** 2020/10/28 mandic00@live.com
|
### **0.4.8** 2020/10/28 mandic00@live.com
|
||||||
|
|
||||||
- revert "updated menu handler"
|
- revert "updated menu handler"
|
||||||
- fix webpack compatibility issue
|
|
||||||
|
|
||||||
### **0.4.7** 2020/10/27 mandic00@live.com
|
### **0.4.7** 2020/10/27 mandic00@live.com
|
||||||
|
|
||||||
|
@ -991,7 +1207,6 @@
|
||||||
|
|
||||||
### **0.2.8** 2020/10/13 mandic00@live.com
|
### **0.2.8** 2020/10/13 mandic00@live.com
|
||||||
|
|
||||||
- added example image
|
|
||||||
|
|
||||||
### **0.2.7** 2020/10/13 mandic00@live.com
|
### **0.2.7** 2020/10/13 mandic00@live.com
|
||||||
|
|
||||||
|
@ -1007,7 +1222,6 @@
|
||||||
|
|
||||||
### **0.2.4** 2020/10/12 mandic00@live.com
|
### **0.2.4** 2020/10/12 mandic00@live.com
|
||||||
|
|
||||||
- removed extra files
|
|
||||||
|
|
||||||
### **0.2.3** 2020/10/12 mandic00@live.com
|
### **0.2.3** 2020/10/12 mandic00@live.com
|
||||||
|
|
||||||
|
|
284
README.md
|
@ -1,9 +1,9 @@
|
||||||
|
[](https://github.com/sponsors/vladmandic)
|
||||||

|

|
||||||

|

|
||||||

|

|
||||||

|

|
||||||

|

|
||||||

|
|
||||||
|
|
||||||
# Human Library
|
# Human Library
|
||||||
|
|
||||||
|
@ -13,26 +13,37 @@
|
||||||
|
|
||||||
<br>
|
<br>
|
||||||
|
|
||||||
JavaScript module using TensorFlow/JS Machine Learning library
|
## Highlights
|
||||||
|
|
||||||
- **Browser**:
|
- Compatible with most server-side and client-side environments and frameworks
|
||||||
Compatible with both desktop and mobile platforms
|
- Combines multiple machine learning models which can be switched on-demand depending on the use-case
|
||||||
Compatible with *CPU*, *WebGL*, *WASM* backends
|
- Related models are executed in an attention pipeline to provide details when needed
|
||||||
Compatible with *WebWorker* execution
|
- Optimized input pre-processing that can enhance image quality of any type of inputs
|
||||||
- **NodeJS**:
|
- Detection of frame changes to trigger only required models for improved performance
|
||||||
Compatible with both software *tfjs-node* and
|
- Intelligent temporal interpolation to provide smooth results regardless of processing performance
|
||||||
GPU accelerated backends *tfjs-node-gpu* using CUDA libraries
|
- Simple unified API
|
||||||
|
- Built-in Image, Video and WebCam handling
|
||||||
|
|
||||||
|
[*Jump to Quick Start*](#quick-start)
|
||||||
|
|
||||||
<br>
|
<br>
|
||||||
|
|
||||||
*Check out [**Simple Live Demo**](https://vladmandic.github.io/human/demo/typescript/index.html) fully annotated app as a good start starting point ([html](https://github.com/vladmandic/human/blob/main/demo/typescript/index.html))([code](https://github.com/vladmandic/human/blob/main/demo/typescript/index.ts))*
|
## Compatibility
|
||||||
|
|
||||||
*Check out [**Main Live Demo**](https://vladmandic.github.io/human/demo/index.html) app for advanced processing of of webcam, video stream or images static images with all possible tunable options*
|
**Browser**:
|
||||||
|
- Compatible with both desktop and mobile platforms
|
||||||
|
- Compatible with *WebGPU*, *WebGL*, *WASM*, *CPU* backends
|
||||||
|
- Compatible with *WebWorker* execution
|
||||||
|
- Compatible with *WebView*
|
||||||
|
- Primary platform: *Chromium*-based browsers
|
||||||
|
- Secondary platform: *Firefox*, *Safari*
|
||||||
|
|
||||||
- To start video detection, simply press *Play*
|
**NodeJS**:
|
||||||
- To process images, simply drag & drop in your Browser window
|
- Compatibile with *WASM* backend for executions on architectures where *tensorflow* binaries are not available
|
||||||
- Note: For optimal performance, select only models you'd like to use
|
- Compatible with *tfjs-node* using software execution via *tensorflow* shared libraries
|
||||||
- Note: If you have modern GPU, WebGL (default) backend is preferred, otherwise select WASM backend
|
- Compatible with *tfjs-node* using GPU-accelerated execution via *tensorflow* shared libraries and nVidia CUDA
|
||||||
|
- Supported versions are from **14.x** to **22.x**
|
||||||
|
- NodeJS version **23.x** is not supported due to breaking changes and issues with `@tensorflow/tfjs`
|
||||||
|
|
||||||
<br>
|
<br>
|
||||||
|
|
||||||
|
@ -41,37 +52,60 @@ JavaScript module using TensorFlow/JS Machine Learning library
|
||||||
- [NPM Link](https://www.npmjs.com/package/@vladmandic/human)
|
- [NPM Link](https://www.npmjs.com/package/@vladmandic/human)
|
||||||
## Demos
|
## Demos
|
||||||
|
|
||||||
|
*Check out [**Simple Live Demo**](https://vladmandic.github.io/human/demo/typescript/index.html) fully annotated app as a good start starting point ([html](https://github.com/vladmandic/human/blob/main/demo/typescript/index.html))([code](https://github.com/vladmandic/human/blob/main/demo/typescript/index.ts))*
|
||||||
|
|
||||||
|
*Check out [**Main Live Demo**](https://vladmandic.github.io/human/demo/index.html) app for advanced processing of of webcam, video stream or images static images with all possible tunable options*
|
||||||
|
|
||||||
|
- To start video detection, simply press *Play*
|
||||||
|
- To process images, simply drag & drop in your Browser window
|
||||||
|
- Note: For optimal performance, select only models you'd like to use
|
||||||
|
- Note: If you have modern GPU, *WebGL* (default) backend is preferred, otherwise select *WASM* backend
|
||||||
|
|
||||||
|
<br>
|
||||||
|
|
||||||
|
|
||||||
- [**List of all Demo applications**](https://github.com/vladmandic/human/wiki/Demos)
|
- [**List of all Demo applications**](https://github.com/vladmandic/human/wiki/Demos)
|
||||||
- [**Examples galery**](https://vladmandic.github.io/human/samples/samples.html)
|
- [**Live Examples galery**](https://vladmandic.github.io/human/samples/index.html)
|
||||||
|
|
||||||
### Browser Demos
|
### Browser Demos
|
||||||
|
|
||||||
|
*All browser demos are self-contained without any external dependencies*
|
||||||
|
|
||||||
- **Full** [[*Live*]](https://vladmandic.github.io/human/demo/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo): Main browser demo app that showcases all Human capabilities
|
- **Full** [[*Live*]](https://vladmandic.github.io/human/demo/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo): Main browser demo app that showcases all Human capabilities
|
||||||
- **Simple** [[*Live*]](https://vladmandic.github.io/human/demo/typescript/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/typescript): Simple demo in WebCam processing demo in TypeScript
|
- **Simple** [[*Live*]](https://vladmandic.github.io/human/demo/typescript/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/typescript): Simple demo in WebCam processing demo in TypeScript
|
||||||
- **Face Match** [[*Live*]](https://vladmandic.github.io/human/demo/facematch/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facematch): Extract faces from images, calculates face descriptors and simmilarities and matches them to known database
|
- **Embedded** [[*Live*]](https://vladmandic.github.io/human/demo/video/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/video/index.html): Even simpler demo with tiny code embedded in HTML file
|
||||||
|
- **Face Detect** [[*Live*]](https://vladmandic.github.io/human/demo/facedetect/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facedetect): Extract faces from images and processes details
|
||||||
|
- **Face Match** [[*Live*]](https://vladmandic.github.io/human/demo/facematch/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facematch): Extract faces from images, calculates face descriptors and similarities and matches them to known database
|
||||||
- **Face ID** [[*Live*]](https://vladmandic.github.io/human/demo/faceid/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/faceid): Runs multiple checks to validate webcam input before performing face match to faces in IndexDB
|
- **Face ID** [[*Live*]](https://vladmandic.github.io/human/demo/faceid/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/faceid): Runs multiple checks to validate webcam input before performing face match to faces in IndexDB
|
||||||
- **Multi-thread** [[*Live*]](https://vladmandic.github.io/human/demo/multithread/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/multithread): Runs each `human` module in a separate web worker for highest possible performance
|
- **Multi-thread** [[*Live*]](https://vladmandic.github.io/human/demo/multithread/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/multithread): Runs each Human module in a separate web worker for highest possible performance
|
||||||
- **3D Analysis** [[*Live*]](https://vladmandic.github.io/human-motion/src/index.html) [[*Details*]](https://github.com/vladmandic/human-motion): 3D tracking and visualization of heead, face, eye, body and hand
|
- **NextJS** [[*Live*]](https://vladmandic.github.io/human-next/out/index.html) [[*Details*]](https://github.com/vladmandic/human-next): Use Human with TypeScript, NextJS and ReactJS
|
||||||
- **Virtual Avatar** [[*Live*]](https://vladmandic.github.io/human-vrm/src/human-vrm.html) [[*Details*]](https://github.com/vladmandic/human-vrm): VR model with head, face, eye, body and hand tracking
|
- **ElectronJS** [[*Details*]](https://github.com/vladmandic/human-electron): Use Human with TypeScript and ElectonJS to create standalone cross-platform apps
|
||||||
|
- **3D Analysis with BabylonJS** [[*Live*]](https://vladmandic.github.io/human-motion/src/index.html) [[*Details*]](https://github.com/vladmandic/human-motion): 3D tracking and visualization of heead, face, eye, body and hand
|
||||||
|
- **VRM Virtual Model Tracking with Three.JS** [[*Live*]](https://vladmandic.github.io/human-three-vrm/src/human-vrm.html) [[*Details*]](https://github.com/vladmandic/human-three-vrm): VR model with head, face, eye, body and hand tracking
|
||||||
|
- **VRM Virtual Model Tracking with BabylonJS** [[*Live*]](https://vladmandic.github.io/human-bjs-vrm/src/index.html) [[*Details*]](https://github.com/vladmandic/human-bjs-vrm): VR model with head, face, eye, body and hand tracking
|
||||||
|
|
||||||
### NodeJS Demos
|
### NodeJS Demos
|
||||||
|
|
||||||
- **Main** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Process images from files, folders or URLs using native methods
|
*NodeJS demos may require extra dependencies which are used to decode inputs*
|
||||||
- **Canvas** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Process image from file or URL and draw results to a new image file using `node-canvas`
|
*See header of each demo to see its dependencies as they are not automatically installed with `Human`*
|
||||||
- **Video** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Processing of video input using `ffmpeg`
|
|
||||||
- **WebCam** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Processing of webcam screenshots using `fswebcam`
|
|
||||||
- **Events** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Showcases usage of `Human` eventing to get notifications on processing
|
|
||||||
- **Similarity** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Compares two input images for similarity of detected faces
|
|
||||||
- **Face Match** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facematch): Parallel processing of face **match** in multiple child worker threads
|
|
||||||
- **Multiple Workers** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Runs multiple parallel `human` by dispaching them to pool of pre-created worker processes
|
|
||||||
|
|
||||||
|
- **Main** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node.js): Process images from files, folders or URLs using native methods
|
||||||
|
- **Canvas** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-canvas.js): Process image from file or URL and draw results to a new image file using `node-canvas`
|
||||||
|
- **Video** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-video.js): Processing of video input using `ffmpeg`
|
||||||
|
- **WebCam** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-webcam.js): Processing of webcam screenshots using `fswebcam`
|
||||||
|
- **Events** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-event.js): Showcases usage of `Human` eventing to get notifications on processing
|
||||||
|
- **Similarity** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-similarity.js): Compares two input images for similarity of detected faces
|
||||||
|
- **Face Match** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facematch/node-match.js): Parallel processing of face **match** in multiple child worker threads
|
||||||
|
- **Multiple Workers** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/multithread/node-multiprocess.js): Runs multiple parallel `human` by dispaching them to pool of pre-created worker processes
|
||||||
|
- **Dynamic Load** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Loads Human dynamically with multiple different desired backends
|
||||||
|
|
||||||
## Project pages
|
## Project pages
|
||||||
|
|
||||||
- [**Code Repository**](https://github.com/vladmandic/human)
|
- [**Code Repository**](https://github.com/vladmandic/human)
|
||||||
- [**NPM Package**](https://www.npmjs.com/package/@vladmandic/human)
|
- [**NPM Package**](https://www.npmjs.com/package/@vladmandic/human)
|
||||||
- [**Issues Tracker**](https://github.com/vladmandic/human/issues)
|
- [**Issues Tracker**](https://github.com/vladmandic/human/issues)
|
||||||
- [**TypeDoc API Specification**](https://vladmandic.github.io/human/typedoc/classes/Human.html)
|
- [**TypeDoc API Specification - Main class**](https://vladmandic.github.io/human/typedoc/classes/Human.html)
|
||||||
|
- [**TypeDoc API Specification - Full**](https://vladmandic.github.io/human/typedoc/)
|
||||||
- [**Change Log**](https://github.com/vladmandic/human/blob/main/CHANGELOG.md)
|
- [**Change Log**](https://github.com/vladmandic/human/blob/main/CHANGELOG.md)
|
||||||
- [**Current To-do List**](https://github.com/vladmandic/human/blob/main/TODO.md)
|
- [**Current To-do List**](https://github.com/vladmandic/human/blob/main/TODO.md)
|
||||||
|
|
||||||
|
@ -82,6 +116,7 @@ JavaScript module using TensorFlow/JS Machine Learning library
|
||||||
- [**Usage & Functions**](https://github.com/vladmandic/human/wiki/Usage)
|
- [**Usage & Functions**](https://github.com/vladmandic/human/wiki/Usage)
|
||||||
- [**Configuration Details**](https://github.com/vladmandic/human/wiki/Config)
|
- [**Configuration Details**](https://github.com/vladmandic/human/wiki/Config)
|
||||||
- [**Result Details**](https://github.com/vladmandic/human/wiki/Result)
|
- [**Result Details**](https://github.com/vladmandic/human/wiki/Result)
|
||||||
|
- [**Customizing Draw Methods**](https://github.com/vladmandic/human/wiki/Draw)
|
||||||
- [**Caching & Smoothing**](https://github.com/vladmandic/human/wiki/Caching)
|
- [**Caching & Smoothing**](https://github.com/vladmandic/human/wiki/Caching)
|
||||||
- [**Input Processing**](https://github.com/vladmandic/human/wiki/Image)
|
- [**Input Processing**](https://github.com/vladmandic/human/wiki/Image)
|
||||||
- [**Face Recognition & Face Description**](https://github.com/vladmandic/human/wiki/Embedding)
|
- [**Face Recognition & Face Description**](https://github.com/vladmandic/human/wiki/Embedding)
|
||||||
|
@ -113,27 +148,24 @@ JavaScript module using TensorFlow/JS Machine Learning library
|
||||||
|
|
||||||
<hr><br>
|
<hr><br>
|
||||||
|
|
||||||
## Examples
|
## App Examples
|
||||||
|
|
||||||
Visit [Examples galery](https://vladmandic.github.io/human/samples/samples.html) for more examples
|
Visit [Examples gallery](https://vladmandic.github.io/human/samples/index.html) for more examples
|
||||||
<https://vladmandic.github.io/human/samples/samples.html>
|
[<img src="assets/samples.jpg" width="640"/>](assets/samples.jpg)
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
<br>
|
<br>
|
||||||
|
|
||||||
## Options
|
## Options
|
||||||
|
|
||||||
All options as presented in the demo application...
|
All options as presented in the demo application...
|
||||||
> [demo/index.html](demo/index.html)
|
[demo/index.html](demo/index.html)
|
||||||
|
[<img src="assets/screenshot-menu.png"/>](assets/screenshot-menu.png)
|
||||||

|
|
||||||
|
|
||||||
<br>
|
<br>
|
||||||
|
|
||||||
**Results Browser:**
|
**Results Browser:**
|
||||||
[ *Demo -> Display -> Show Results* ]<br>
|
[ *Demo -> Display -> Show Results* ]<br>
|
||||||

|
[<img src="assets/screenshot-results.png"/>](assets/screenshot-results.png)
|
||||||
|
|
||||||
<br>
|
<br>
|
||||||
|
|
||||||
|
@ -145,30 +177,47 @@ sorts them by similarity to selected face
|
||||||
and optionally matches detected face with database of known people to guess their names
|
and optionally matches detected face with database of known people to guess their names
|
||||||
> [demo/facematch](demo/facematch/index.html)
|
> [demo/facematch](demo/facematch/index.html)
|
||||||
|
|
||||||

|
[<img src="assets/screenshot-facematch.jpg" width="640"/>](assets/screenshot-facematch.jpg)
|
||||||
|
|
||||||
|
2. **Face Detect:**
|
||||||
|
Extracts all detect faces from loaded images on-demand and highlights face details on a selected face
|
||||||
|
> [demo/facedetect](demo/facedetect/index.html)
|
||||||
|
|
||||||
|
[<img src="assets/screenshot-facedetect.jpg" width="640"/>](assets/screenshot-facedetect.jpg)
|
||||||
|
|
||||||
|
3. **Face ID:**
|
||||||
|
Performs validation check on a webcam input to detect a real face and matches it to known faces stored in database
|
||||||
|
> [demo/faceid](demo/faceid/index.html)
|
||||||
|
|
||||||
|
[<img src="assets/screenshot-faceid.jpg" width="640"/>](assets/screenshot-faceid.jpg)
|
||||||
|
|
||||||
<br>
|
<br>
|
||||||
|
|
||||||
2. **3D Rendering:**
|
4. **3D Rendering:**
|
||||||
> [human-motion](https://github.com/vladmandic/human-motion)
|
> [human-motion](https://github.com/vladmandic/human-motion)
|
||||||
|
|
||||||

|
[<img src="https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-face.jpg" width="640"/>](https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-face.jpg)
|
||||||

|
[<img src="https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-body.jpg" width="640"/>](https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-body.jpg)
|
||||||

|
[<img src="https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-hand.jpg" width="640"/>](https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-hand.jpg)
|
||||||
|
|
||||||
<br>
|
<br>
|
||||||
|
|
||||||
3. **VR Model Tracking:**
|
5. **VR Model Tracking:**
|
||||||
> [human-vrmmotion](https://github.com/vladmandic/human-vrm)
|
> [human-three-vrm](https://github.com/vladmandic/human-three-vrm)
|
||||||
|
> [human-bjs-vrm](https://github.com/vladmandic/human-bjs-vrm)
|
||||||
|
|
||||||

|
[<img src="https://github.com/vladmandic/human-three-vrm/raw/main/assets/human-vrm-screenshot.jpg" width="640"/>](https://github.com/vladmandic/human-three-vrm/raw/main/assets/human-vrm-screenshot.jpg)
|
||||||
|
|
||||||
|
|
||||||
|
6. **Human as OS native application:**
|
||||||
|
> [human-electron](https://github.com/vladmandic/human-electron)
|
||||||
|
|
||||||
<br>
|
<br>
|
||||||
|
|
||||||
**468-Point Face Mesh Defails:**
|
**468-Point Face Mesh Defails:**
|
||||||
(view in full resolution to see keypoints)
|
(view in full resolution to see keypoints)
|
||||||
|
|
||||||

|
[<img src="assets/facemesh.png" width="400"/>](assets/facemesh.png)
|
||||||
|
|
||||||
<br><hr><br>
|
<br><hr><br>
|
||||||
|
|
||||||
|
@ -178,44 +227,25 @@ Simply load `Human` (*IIFE version*) directly from a cloud CDN in your HTML file
|
||||||
(pick one: `jsdelirv`, `unpkg` or `cdnjs`)
|
(pick one: `jsdelirv`, `unpkg` or `cdnjs`)
|
||||||
|
|
||||||
```html
|
```html
|
||||||
|
<!DOCTYPE HTML>
|
||||||
<script src="https://cdn.jsdelivr.net/npm/@vladmandic/human/dist/human.js"></script>
|
<script src="https://cdn.jsdelivr.net/npm/@vladmandic/human/dist/human.js"></script>
|
||||||
<script src="https://unpkg.dev/@vladmandic/human/dist/human.js"></script>
|
<script src="https://unpkg.dev/@vladmandic/human/dist/human.js"></script>
|
||||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/human/2.1.5/human.js"></script>
|
<script src="https://cdnjs.cloudflare.com/ajax/libs/human/3.0.0/human.js"></script>
|
||||||
```
|
```
|
||||||
|
|
||||||
For details, including how to use `Browser ESM` version or `NodeJS` version of `Human`, see [**Installation**](https://github.com/vladmandic/human/wiki/Install)
|
For details, including how to use `Browser ESM` version or `NodeJS` version of `Human`, see [**Installation**](https://github.com/vladmandic/human/wiki/Install)
|
||||||
|
|
||||||
<br>
|
<br>
|
||||||
|
|
||||||
## Inputs
|
## Code Examples
|
||||||
|
|
||||||
`Human` library can process all known input types:
|
Simple app that uses Human to process video input and
|
||||||
|
|
||||||
- `Image`, `ImageData`, `ImageBitmap`, `Canvas`, `OffscreenCanvas`, `Tensor`,
|
|
||||||
- `HTMLImageElement`, `HTMLCanvasElement`, `HTMLVideoElement`, `HTMLMediaElement`
|
|
||||||
|
|
||||||
Additionally, `HTMLVideoElement`, `HTMLMediaElement` can be a standard `<video>` tag that links to:
|
|
||||||
|
|
||||||
- WebCam on user's system
|
|
||||||
- Any supported video type
|
|
||||||
For example: `.mp4`, `.avi`, etc.
|
|
||||||
- Additional video types supported via *HTML5 Media Source Extensions*
|
|
||||||
Live streaming examples:
|
|
||||||
- **HLS** (*HTTP Live Streaming*) using `hls.js`
|
|
||||||
- **DASH** (Dynamic Adaptive Streaming over HTTP) using `dash.js`
|
|
||||||
- **WebRTC** media track using built-in support
|
|
||||||
|
|
||||||
<br>
|
|
||||||
|
|
||||||
## Example
|
|
||||||
|
|
||||||
Example simple app that uses Human to process video input and
|
|
||||||
draw output on screen using internal draw helper functions
|
draw output on screen using internal draw helper functions
|
||||||
|
|
||||||
```js
|
```js
|
||||||
// create instance of human with simple configuration using default values
|
// create instance of human with simple configuration using default values
|
||||||
const config = { backend: 'webgl' };
|
const config = { backend: 'webgl' };
|
||||||
const human = new Human(config);
|
const human = new Human.Human(config);
|
||||||
// select input HTMLVideoElement and output HTMLCanvasElement from page
|
// select input HTMLVideoElement and output HTMLCanvasElement from page
|
||||||
const inputVideo = document.getElementById('video-id');
|
const inputVideo = document.getElementById('video-id');
|
||||||
const outputCanvas = document.getElementById('canvas-id');
|
const outputCanvas = document.getElementById('canvas-id');
|
||||||
|
@ -234,6 +264,7 @@ function detectVideo() {
|
||||||
human.draw.gesture(outputCanvas, result.gesture);
|
human.draw.gesture(outputCanvas, result.gesture);
|
||||||
// and loop immediate to the next frame
|
// and loop immediate to the next frame
|
||||||
requestAnimationFrame(detectVideo);
|
requestAnimationFrame(detectVideo);
|
||||||
|
return result;
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -294,7 +325,7 @@ async function detectVideo() {
|
||||||
|
|
||||||
async function drawVideo() {
|
async function drawVideo() {
|
||||||
if (result) { // check if result is available
|
if (result) { // check if result is available
|
||||||
const interpolated = human.next(result); // calculate next interpolated frame
|
const interpolated = human.next(result); // get smoothened result using last-known results
|
||||||
human.draw.all(outputCanvas, interpolated); // draw the frame
|
human.draw.all(outputCanvas, interpolated); // draw the frame
|
||||||
}
|
}
|
||||||
requestAnimationFrame(drawVideo); // run draw loop
|
requestAnimationFrame(drawVideo); // run draw loop
|
||||||
|
@ -304,26 +335,108 @@ detectVideo(); // start detection loop
|
||||||
drawVideo(); // start draw loop
|
drawVideo(); // start draw loop
|
||||||
```
|
```
|
||||||
|
|
||||||
|
or same, but using built-in full video processing instead of running manual frame-by-frame loop:
|
||||||
|
|
||||||
|
```js
|
||||||
|
const human = new Human(); // create instance of Human
|
||||||
|
const inputVideo = document.getElementById('video-id');
|
||||||
|
const outputCanvas = document.getElementById('canvas-id');
|
||||||
|
|
||||||
|
async function drawResults() {
|
||||||
|
const interpolated = human.next(); // get smoothened result using last-known results
|
||||||
|
human.draw.all(outputCanvas, interpolated); // draw the frame
|
||||||
|
requestAnimationFrame(drawResults); // run draw loop
|
||||||
|
}
|
||||||
|
|
||||||
|
human.video(inputVideo); // start detection loop which continously updates results
|
||||||
|
drawResults(); // start draw loop
|
||||||
|
```
|
||||||
|
|
||||||
|
or using built-in webcam helper methods that take care of video handling completely:
|
||||||
|
|
||||||
|
```js
|
||||||
|
const human = new Human(); // create instance of Human
|
||||||
|
const outputCanvas = document.getElementById('canvas-id');
|
||||||
|
|
||||||
|
async function drawResults() {
|
||||||
|
const interpolated = human.next(); // get smoothened result using last-known results
|
||||||
|
human.draw.canvas(outputCanvas, human.webcam.element); // draw current webcam frame
|
||||||
|
human.draw.all(outputCanvas, interpolated); // draw the frame detectgion results
|
||||||
|
requestAnimationFrame(drawResults); // run draw loop
|
||||||
|
}
|
||||||
|
|
||||||
|
await human.webcam.start({ crop: true });
|
||||||
|
human.video(human.webcam.element); // start detection loop which continously updates results
|
||||||
|
drawResults(); // start draw loop
|
||||||
|
```
|
||||||
|
|
||||||
And for even better results, you can run detection in a separate web worker thread
|
And for even better results, you can run detection in a separate web worker thread
|
||||||
|
|
||||||
|
<br><hr><br>
|
||||||
|
|
||||||
|
## Inputs
|
||||||
|
|
||||||
|
`Human` library can process all known input types:
|
||||||
|
|
||||||
|
- `Image`, `ImageData`, `ImageBitmap`, `Canvas`, `OffscreenCanvas`, `Tensor`,
|
||||||
|
- `HTMLImageElement`, `HTMLCanvasElement`, `HTMLVideoElement`, `HTMLMediaElement`
|
||||||
|
|
||||||
|
Additionally, `HTMLVideoElement`, `HTMLMediaElement` can be a standard `<video>` tag that links to:
|
||||||
|
|
||||||
|
- WebCam on user's system
|
||||||
|
- Any supported video type
|
||||||
|
e.g. `.mp4`, `.avi`, etc.
|
||||||
|
- Additional video types supported via *HTML5 Media Source Extensions*
|
||||||
|
e.g.: **HLS** (*HTTP Live Streaming*) using `hls.js` or **DASH** (*Dynamic Adaptive Streaming over HTTP*) using `dash.js`
|
||||||
|
- **WebRTC** media track using built-in support
|
||||||
|
|
||||||
|
<br><hr><br>
|
||||||
|
|
||||||
|
## Detailed Usage
|
||||||
|
|
||||||
|
- [**Wiki Home**](https://github.com/vladmandic/human/wiki)
|
||||||
|
- [**List of all available methods, properies and namespaces**](https://github.com/vladmandic/human/wiki/Usage)
|
||||||
|
- [**TypeDoc API Specification - Main class**](https://vladmandic.github.io/human/typedoc/classes/Human.html)
|
||||||
|
- [**TypeDoc API Specification - Full**](https://vladmandic.github.io/human/typedoc/)
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
<br><hr><br>
|
||||||
|
|
||||||
|
## TypeDefs
|
||||||
|
|
||||||
|
`Human` is written using TypeScript strong typing and ships with full **TypeDefs** for all classes defined by the library bundled in `types/human.d.ts` and enabled by default
|
||||||
|
|
||||||
|
*Note*: This does not include embedded `tfjs`
|
||||||
|
If you want to use embedded `tfjs` inside `Human` (`human.tf` namespace) and still full **typedefs**, add this code:
|
||||||
|
|
||||||
|
> import type * as tfjs from '@vladmandic/human/dist/tfjs.esm';
|
||||||
|
> const tf = human.tf as typeof tfjs;
|
||||||
|
|
||||||
|
This is not enabled by default as `Human` does not ship with full **TFJS TypeDefs** due to size considerations
|
||||||
|
Enabling `tfjs` TypeDefs as above creates additional project (dev-only as only types are required) dependencies as defined in `@vladmandic/human/dist/tfjs.esm.d.ts`:
|
||||||
|
|
||||||
|
> @tensorflow/tfjs-core, @tensorflow/tfjs-converter, @tensorflow/tfjs-backend-wasm, @tensorflow/tfjs-backend-webgl
|
||||||
|
|
||||||
|
|
||||||
<br><hr><br>
|
<br><hr><br>
|
||||||
|
|
||||||
## Default models
|
## Default models
|
||||||
|
|
||||||
Default models in Human library are:
|
Default models in Human library are:
|
||||||
|
|
||||||
- **Face Detection**: MediaPipe BlazeFace Back variation
|
- **Face Detection**: *MediaPipe BlazeFace Back variation*
|
||||||
- **Face Mesh**: MediaPipe FaceMesh
|
- **Face Mesh**: *MediaPipe FaceMesh*
|
||||||
- **Face Iris Analysis**: MediaPipe Iris
|
- **Face Iris Analysis**: *MediaPipe Iris*
|
||||||
- **Face Description**: HSE FaceRes
|
- **Face Description**: *HSE FaceRes*
|
||||||
- **Emotion Detection**: Oarriaga Emotion
|
- **Emotion Detection**: *Oarriaga Emotion*
|
||||||
- **Body Analysis**: MoveNet Lightning variation
|
- **Body Analysis**: *MoveNet Lightning variation*
|
||||||
- **Hand Analysis**: HandTrack & MediaPipe HandLandmarks
|
- **Hand Analysis**: *HandTrack & MediaPipe HandLandmarks*
|
||||||
- **Body Segmentation**: Google Selfie
|
- **Body Segmentation**: *Google Selfie*
|
||||||
- **Object Detection**: CenterNet with MobileNet v3
|
- **Object Detection**: *CenterNet with MobileNet v3*
|
||||||
|
|
||||||
Note that alternative models are provided and can be enabled via configuration
|
Note that alternative models are provided and can be enabled via configuration
|
||||||
For example, `PoseNet` model can be switched for `BlazePose`, `EfficientPose` or `MoveNet` model depending on the use case
|
For example, body pose detection by default uses *MoveNet Lightning*, but can be switched to *MultiNet Thunder* for higher precision or *Multinet MultiPose* for multi-person detection or even *PoseNet*, *BlazePose* or *EfficientPose* depending on the use case
|
||||||
|
|
||||||
For more info, see [**Configuration Details**](https://github.com/vladmandic/human/wiki/Configuration) and [**List of Models**](https://github.com/vladmandic/human/wiki/Models)
|
For more info, see [**Configuration Details**](https://github.com/vladmandic/human/wiki/Configuration) and [**List of Models**](https://github.com/vladmandic/human/wiki/Models)
|
||||||
|
|
||||||
|
@ -335,9 +448,9 @@ For more info, see [**Configuration Details**](https://github.com/vladmandic/hum
|
||||||
|
|
||||||
<br><hr><br>
|
<br><hr><br>
|
||||||
|
|
||||||
`Human` library is written in `TypeScript` [4.5](https://www.typescriptlang.org/docs/handbook/intro.html)
|
`Human` library is written in [TypeScript](https://www.typescriptlang.org/docs/handbook/intro.html) **5.1** using [TensorFlow/JS](https://www.tensorflow.org/js/) **4.10** and conforming to latest `JavaScript` [ECMAScript version 2022](https://262.ecma-international.org/) standard
|
||||||
Conforming to latest `JavaScript` [ECMAScript version 2021](https://262.ecma-international.org/) standard
|
|
||||||
Build target is `JavaScript` [EMCAScript version 2018](https://262.ecma-international.org/11.0/)
|
Build target for distributables is `JavaScript` [EMCAScript version 2018](https://262.ecma-international.org/9.0/)
|
||||||
|
|
||||||
<br>
|
<br>
|
||||||
|
|
||||||
|
@ -346,6 +459,7 @@ and [**API Specification**](https://vladmandic.github.io/human/typedoc/classes/H
|
||||||
|
|
||||||
<br>
|
<br>
|
||||||
|
|
||||||
|
[](https://github.com/sponsors/vladmandic)
|
||||||

|

|
||||||

|

|
||||||

|

|
||||||
|
|
43
TODO.md
|
@ -1,37 +1,38 @@
|
||||||
# To-Do list for Human library
|
# To-Do list for Human library
|
||||||
|
|
||||||
## Work in Progress
|
## Work-in-Progress
|
||||||
|
|
||||||
<br>
|
<hr><br>
|
||||||
|
|
||||||
### Exploring
|
## Known Issues & Limitations
|
||||||
|
|
||||||
- Optical flow: <https://docs.opencv.org/3.3.1/db/d7f/tutorial_js_lucas_kanade.html>
|
### Face with Attention
|
||||||
- Advanced histogram equalization: Adaptive, Contrast Limited, CLAHE
|
|
||||||
- TFLite models: <https://js.tensorflow.org/api_tflite/0.0.1-alpha.4/>
|
|
||||||
- Body segmentation: `robust-video-matting`
|
|
||||||
|
|
||||||
<br><hr><br>
|
`FaceMesh-Attention` is not supported when using `WASM` backend due to missing kernel op in **TFJS**
|
||||||
|
No issues with default model `FaceMesh`
|
||||||
|
|
||||||
## Known Issues
|
### Object Detection
|
||||||
|
|
||||||
#### WebGPU
|
`NanoDet` model is not supported when using `WASM` backend due to missing kernel op in **TFJS**
|
||||||
|
No issues with default model `MB3-CenterNet`
|
||||||
|
|
||||||
Experimental support only until support is officially added in Chromium
|
## Body Detection using MoveNet-MultiPose
|
||||||
|
|
||||||
- Performance issues:
|
Model does not return valid detection scores (all other functionality is not impacted)
|
||||||
<https://github.com/tensorflow/tfjs/issues/5689>
|
|
||||||
|
|
||||||
### Face Detection
|
### Firefox
|
||||||
|
|
||||||
Enhanced rotation correction for face detection is not working in NodeJS due to missing kernel op in TFJS
|
Running in **web workers** requires `OffscreenCanvas` which is still disabled by default in **Firefox**
|
||||||
Feature is automatically disabled in NodeJS without user impact
|
Enable via `about:config` -> `gfx.offscreencanvas.enabled`
|
||||||
|
[Details](https://developer.mozilla.org/en-US/docs/Web/API/OffscreenCanvas#browser_compatibility)
|
||||||
|
|
||||||
- Backend NodeJS missing kernel op `RotateWithOffset`
|
### Safari
|
||||||
<https://github.com/tensorflow/tfjs/issues/5473>
|
|
||||||
|
|
||||||
<br><hr><br>
|
No support for running in **web workers** as Safari still does not support `OffscreenCanvas`
|
||||||
|
[Details](https://developer.mozilla.org/en-US/docs/Web/API/OffscreenCanvas#browser_compatibility)
|
||||||
|
|
||||||
## Pending Release Notes
|
## React-Native
|
||||||
|
|
||||||
N/A
|
`Human` support for **React-Native** is best-effort, but not part of the main development focus
|
||||||
|
|
||||||
|
<hr><br>
|
||||||
|
|
After Width: | Height: | Size: 70 KiB |
After Width: | Height: | Size: 47 KiB |
Before Width: | Height: | Size: 41 KiB After Width: | Height: | Size: 22 KiB |
Before Width: | Height: | Size: 34 KiB After Width: | Height: | Size: 14 KiB |
Before Width: | Height: | Size: 56 KiB |
After Width: | Height: | Size: 38 KiB |
145
build.js
|
@ -1,25 +1,127 @@
|
||||||
const fs = require('fs');
|
const fs = require('fs');
|
||||||
const log = require('@vladmandic/pilogger');
|
const path = require('path');
|
||||||
const Build = require('@vladmandic/build').Build;
|
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||||
const APIExtractor = require('@microsoft/api-extractor');
|
const Build = require('@vladmandic/build').Build; // eslint-disable-line node/no-unpublished-require
|
||||||
|
const APIExtractor = require('@microsoft/api-extractor'); // eslint-disable-line node/no-unpublished-require
|
||||||
|
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||||
|
const packageJSON = require('./package.json');
|
||||||
|
|
||||||
function copy(src, dst) {
|
const logFile = 'test/build.log';
|
||||||
if (!fs.existsSync(src)) return;
|
const modelsOut = 'models/models.json';
|
||||||
|
const modelsFolders = [
|
||||||
|
'./models',
|
||||||
|
'../human-models/models',
|
||||||
|
'../blazepose/model/',
|
||||||
|
'../anti-spoofing/model',
|
||||||
|
'../efficientpose/models',
|
||||||
|
'../insightface/models',
|
||||||
|
'../movenet/models',
|
||||||
|
'../nanodet/models',
|
||||||
|
];
|
||||||
|
|
||||||
|
const apiExtractorIgnoreList = [ // eslint-disable-line no-unused-vars
|
||||||
|
'ae-missing-release-tag',
|
||||||
|
'tsdoc-param-tag-missing-hyphen',
|
||||||
|
'tsdoc-escape-right-brace',
|
||||||
|
'tsdoc-undefined-tag',
|
||||||
|
'tsdoc-escape-greater-than',
|
||||||
|
'ae-unresolved-link',
|
||||||
|
'ae-forgotten-export',
|
||||||
|
'tsdoc-malformed-inline-tag',
|
||||||
|
'tsdoc-unnecessary-backslash',
|
||||||
|
];
|
||||||
|
|
||||||
|
const regEx = [
|
||||||
|
{ search: 'types="@webgpu/types/dist"', replace: 'path="../src/types/webgpu.d.ts"' },
|
||||||
|
{ search: 'types="offscreencanvas"', replace: 'path="../src/types/offscreencanvas.d.ts"' },
|
||||||
|
];
|
||||||
|
|
||||||
|
function copyFile(src, dst) {
|
||||||
|
if (!fs.existsSync(src)) {
|
||||||
|
log.warn('Copy:', { input: src, output: dst });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
log.state('Copy:', { input: src, output: dst });
|
||||||
const buffer = fs.readFileSync(src);
|
const buffer = fs.readFileSync(src);
|
||||||
fs.writeFileSync(dst, buffer);
|
fs.writeFileSync(dst, buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
const apiIgnoreList = ['ae-forgotten-export', 'ae-unresolved-link'];
|
function writeFile(str, dst) {
|
||||||
|
log.state('Write:', { output: dst });
|
||||||
|
fs.writeFileSync(dst, str);
|
||||||
|
}
|
||||||
|
|
||||||
|
function regExFile(src, entries) {
|
||||||
|
if (!fs.existsSync(src)) {
|
||||||
|
log.warn('Filter:', { src });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
log.state('Filter:', { input: src });
|
||||||
|
for (const entry of entries) {
|
||||||
|
const buffer = fs.readFileSync(src, 'UTF-8');
|
||||||
|
const lines = buffer.split(/\r?\n/);
|
||||||
|
const out = [];
|
||||||
|
for (const line of lines) {
|
||||||
|
if (line.includes(entry.search)) out.push(line.replace(entry.search, entry.replace));
|
||||||
|
else out.push(line);
|
||||||
|
}
|
||||||
|
fs.writeFileSync(src, out.join('\n'));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function analyzeModels() {
|
||||||
|
log.info('Analyze models:', { folders: modelsFolders.length, result: modelsOut });
|
||||||
|
let totalSize = 0;
|
||||||
|
const models = {};
|
||||||
|
const allModels = [];
|
||||||
|
for (const folder of modelsFolders) {
|
||||||
|
try {
|
||||||
|
if (!fs.existsSync(folder)) continue;
|
||||||
|
const stat = fs.statSync(folder);
|
||||||
|
if (!stat.isDirectory) continue;
|
||||||
|
const dir = fs.readdirSync(folder);
|
||||||
|
const found = dir.map((f) => `file://${folder}/${f}`).filter((f) => f.endsWith('json'));
|
||||||
|
log.state('Models', { folder, models: found.length });
|
||||||
|
allModels.push(...found);
|
||||||
|
} catch {
|
||||||
|
// log.warn('Cannot enumerate:', modelFolder);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (const url of allModels) {
|
||||||
|
// if (!f.endsWith('.json')) continue;
|
||||||
|
// const url = `file://${modelsDir}/${f}`;
|
||||||
|
const model = new tf.GraphModel(url); // create model prototype and decide if load from cache or from original modelurl
|
||||||
|
model.findIOHandler();
|
||||||
|
const artifacts = await model.handler.load();
|
||||||
|
const size = artifacts?.weightData?.byteLength || 0;
|
||||||
|
totalSize += size;
|
||||||
|
const name = path.basename(url).replace('.json', '');
|
||||||
|
if (!models[name]) models[name] = size;
|
||||||
|
}
|
||||||
|
const json = JSON.stringify(models, null, 2);
|
||||||
|
fs.writeFileSync(modelsOut, json);
|
||||||
|
log.state('Models:', { count: Object.keys(models).length, totalSize });
|
||||||
|
}
|
||||||
|
|
||||||
async function main() {
|
async function main() {
|
||||||
|
log.logFile(logFile);
|
||||||
|
log.data('Build', { name: packageJSON.name, version: packageJSON.version });
|
||||||
|
|
||||||
// run production build
|
// run production build
|
||||||
const build = new Build();
|
const build = new Build();
|
||||||
await build.run('production');
|
await build.run('production');
|
||||||
|
|
||||||
// patch tfjs typedefs
|
// patch tfjs typedefs
|
||||||
log.state('Copy:', { input: 'tfjs/tfjs.esm.d.ts' });
|
copyFile('node_modules/@vladmandic/tfjs/types/tfjs-core.d.ts', 'types/tfjs-core.d.ts');
|
||||||
copy('tfjs/tfjs.esm.d.ts', 'types/lib/dist/tfjs.esm.d.ts');
|
copyFile('node_modules/@vladmandic/tfjs/types/tfjs.d.ts', 'types/tfjs.esm.d.ts');
|
||||||
|
copyFile('src/types/tsconfig.json', 'types/tsconfig.json');
|
||||||
|
copyFile('src/types/eslint.json', 'types/.eslintrc.json');
|
||||||
|
copyFile('src/types/tfjs.esm.d.ts', 'dist/tfjs.esm.d.ts');
|
||||||
|
regExFile('types/tfjs-core.d.ts', regEx);
|
||||||
|
|
||||||
// run api-extractor to create typedef rollup
|
// run api-extractor to create typedef rollup
|
||||||
const extractorConfig = APIExtractor.ExtractorConfig.loadFileAndPrepare('api-extractor.json');
|
const extractorConfig = APIExtractor.ExtractorConfig.loadFileAndPrepare('.api-extractor.json');
|
||||||
|
try {
|
||||||
const extractorResult = APIExtractor.Extractor.invoke(extractorConfig, {
|
const extractorResult = APIExtractor.Extractor.invoke(extractorConfig, {
|
||||||
localBuild: true,
|
localBuild: true,
|
||||||
showVerboseMessages: false,
|
showVerboseMessages: false,
|
||||||
|
@ -27,20 +129,25 @@ async function main() {
|
||||||
msg.handled = true;
|
msg.handled = true;
|
||||||
if (msg.logLevel === 'none' || msg.logLevel === 'verbose' || msg.logLevel === 'info') return;
|
if (msg.logLevel === 'none' || msg.logLevel === 'verbose' || msg.logLevel === 'info') return;
|
||||||
if (msg.sourceFilePath?.includes('/node_modules/')) return;
|
if (msg.sourceFilePath?.includes('/node_modules/')) return;
|
||||||
if (apiIgnoreList.reduce((prev, curr) => prev || msg.messageId.includes(curr), false)) return;
|
// if (apiExtractorIgnoreList.reduce((prev, curr) => prev || msg.messageId.includes(curr), false)) return; // those are external issues outside of human control
|
||||||
log.data('API', { level: msg.logLevel, category: msg.category, id: msg.messageId, file: msg.sourceFilePath, line: msg.sourceFileLine, text: msg.text });
|
log.data('API', { level: msg.logLevel, category: msg.category, id: msg.messageId, file: msg.sourceFilePath, line: msg.sourceFileLine, text: msg.text });
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
log.state('API-Extractor:', { succeeeded: extractorResult.succeeded, errors: extractorResult.errorCount, warnings: extractorResult.warningCount });
|
log.state('API-Extractor:', { succeeeded: extractorResult.succeeded, errors: extractorResult.errorCount, warnings: extractorResult.warningCount });
|
||||||
// distribute typedefs
|
} catch (err) {
|
||||||
log.state('Copy:', { input: 'types/human.d.ts' });
|
log.error('API-Extractor:', err);
|
||||||
copy('types/human.d.ts', 'dist/human.esm-nobundle.d.ts');
|
}
|
||||||
copy('types/human.d.ts', 'dist/human.esm.d.ts');
|
regExFile('types/human.d.ts', regEx);
|
||||||
copy('types/human.d.ts', 'dist/human.d.ts');
|
writeFile('export * from \'../types/human\';', 'dist/human.esm-nobundle.d.ts');
|
||||||
copy('types/human.d.ts', 'dist/human.node-gpu.d.ts');
|
writeFile('export * from \'../types/human\';', 'dist/human.esm.d.ts');
|
||||||
copy('types/human.d.ts', 'dist/human.node.d.ts');
|
writeFile('export * from \'../types/human\';', 'dist/human.d.ts');
|
||||||
copy('types/human.d.ts', 'dist/human.node-wasm.d.ts');
|
writeFile('export * from \'../types/human\';', 'dist/human.node-gpu.d.ts');
|
||||||
log.info('Human Build complete...');
|
writeFile('export * from \'../types/human\';', 'dist/human.node.d.ts');
|
||||||
|
writeFile('export * from \'../types/human\';', 'dist/human.node-wasm.d.ts');
|
||||||
|
|
||||||
|
// generate model signature
|
||||||
|
await analyzeModels();
|
||||||
|
log.info('Human Build complete...', { logFile });
|
||||||
}
|
}
|
||||||
|
|
||||||
main();
|
main();
|
||||||
|
|
|
@ -8,6 +8,7 @@ For details on other demos see Wiki: [**Demos**](https://github.com/vladmandic/h
|
||||||
`index.html`: Full demo using `Human` ESM module running in Browsers,
|
`index.html`: Full demo using `Human` ESM module running in Browsers,
|
||||||
|
|
||||||
Includes:
|
Includes:
|
||||||
|
|
||||||
- Selectable inputs:
|
- Selectable inputs:
|
||||||
- Sample images
|
- Sample images
|
||||||
- Image via drag & drop
|
- Image via drag & drop
|
||||||
|
@ -37,12 +38,14 @@ Includes:
|
||||||
in `index.js:ui`
|
in `index.js:ui`
|
||||||
|
|
||||||
```js
|
```js
|
||||||
|
const ui = {
|
||||||
console: true, // log messages to browser console
|
console: true, // log messages to browser console
|
||||||
useWorker: true, // use web workers for processing
|
useWorker: true, // use web workers for processing
|
||||||
buffered: true, // should output be buffered between frames
|
buffered: true, // should output be buffered between frames
|
||||||
interpolated: true, // should output be interpolated for smoothness between frames
|
interpolated: true, // should output be interpolated for smoothness between frames
|
||||||
results: false, // show results tree
|
results: false, // show results tree
|
||||||
useWebRTC: false, // use webrtc as camera source instead of local webcam
|
useWebRTC: false, // use webrtc as camera source instead of local webcam
|
||||||
|
};
|
||||||
```
|
```
|
||||||
|
|
||||||
Demo implements several ways to use `Human` library,
|
Demo implements several ways to use `Human` library,
|
||||||
|
|
|
@ -1,4 +0,0 @@
|
||||||
# Human Benchmarks
|
|
||||||
|
|
||||||
- `node.js` runs benchmark using `tensorflow` backend in **NodeJS**
|
|
||||||
- `index.html` runs benchmark using `wasm`, `webgl`, `humangl` and `webgpu` backends in **Browser**
|
|
|
@ -1,86 +0,0 @@
|
||||||
<!DOCTYPE html>
|
|
||||||
<html lang="en">
|
|
||||||
<head>
|
|
||||||
<meta charset="utf-8">
|
|
||||||
<title>Human</title>
|
|
||||||
<meta name="viewport" content="width=device-width" id="viewport">
|
|
||||||
<meta name="keywords" content="Human">
|
|
||||||
<meta name="application-name" content="Human">
|
|
||||||
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
|
||||||
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
|
||||||
<meta name="theme-color" content="#000000">
|
|
||||||
<link rel="manifest" href="../manifest.webmanifest">
|
|
||||||
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
|
|
||||||
<link rel="apple-touch-icon" href="../../assets/icon.png">
|
|
||||||
<style>
|
|
||||||
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
|
|
||||||
html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
|
|
||||||
body { margin: 0; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
|
|
||||||
body::-webkit-scrollbar { display: none; }
|
|
||||||
.status { position: absolute; width: 100vw; bottom: 10%; text-align: center; font-size: 3rem; font-weight: 100; text-shadow: 2px 2px #303030; }
|
|
||||||
.log { position: absolute; bottom: 0; margin: 0.4rem 0.4rem 0 0.4rem; font-size: 0.9rem; }
|
|
||||||
</style>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<div id="status" class="status"></div>
|
|
||||||
<img id="image" src="../../samples/in/group-1.jpg" alt="test image" style="display: none">
|
|
||||||
<div id="log" class="log"></div>
|
|
||||||
<script type="module">
|
|
||||||
import Human from '../../dist/human.esm.js';
|
|
||||||
|
|
||||||
const loop = 20;
|
|
||||||
const backends = ['wasm', 'webgl', 'humangl', 'webgpu'];
|
|
||||||
|
|
||||||
// eslint-disable-next-line no-console
|
|
||||||
const log = (...msg) => console.log(...msg);
|
|
||||||
|
|
||||||
const myConfig = {
|
|
||||||
modelBasePath: 'https://vladmandic.github.io/human/models',
|
|
||||||
debug: true,
|
|
||||||
async: true,
|
|
||||||
cacheSensitivity: 0,
|
|
||||||
filter: { enabled: false },
|
|
||||||
face: {
|
|
||||||
enabled: true,
|
|
||||||
detector: { enabled: true, rotation: false },
|
|
||||||
mesh: { enabled: true },
|
|
||||||
iris: { enabled: true },
|
|
||||||
description: { enabled: true },
|
|
||||||
emotion: { enabled: false },
|
|
||||||
antispoof: { enabled: true },
|
|
||||||
liveness: { enabled: true },
|
|
||||||
},
|
|
||||||
hand: { enabled: true },
|
|
||||||
body: { enabled: true },
|
|
||||||
object: { enabled: true },
|
|
||||||
};
|
|
||||||
|
|
||||||
async function benchmark(backend) {
|
|
||||||
myConfig.backend = backend;
|
|
||||||
const human = new Human(myConfig);
|
|
||||||
await human.tf.ready();
|
|
||||||
log('Human:', human.version);
|
|
||||||
await human.load();
|
|
||||||
const loaded = Object.keys(human.models).filter((a) => human.models[a]);
|
|
||||||
log('Loaded:', loaded);
|
|
||||||
log('Memory state:', human.tf.engine().memory());
|
|
||||||
const element = document.getElementById('image');
|
|
||||||
const processed = await human.image(element);
|
|
||||||
const t0 = human.now();
|
|
||||||
await human.detect(processed.tensor, myConfig);
|
|
||||||
const t1 = human.now();
|
|
||||||
log('Backend:', human.tf.getBackend());
|
|
||||||
log('Warmup:', Math.round(t1 - t0));
|
|
||||||
for (let i = 0; i < loop; i++) await human.detect(processed.tensor, myConfig);
|
|
||||||
const t2 = human.now();
|
|
||||||
log('Average:', Math.round((t2 - t1) / loop));
|
|
||||||
}
|
|
||||||
|
|
||||||
async function main() {
|
|
||||||
for (const backend of backends) await benchmark(backend);
|
|
||||||
}
|
|
||||||
|
|
||||||
window.onload = main;
|
|
||||||
</script>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
|
@ -1,65 +0,0 @@
|
||||||
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
|
|
||||||
const tf = require('@tensorflow/tfjs-node-gpu');
|
|
||||||
const log = require('@vladmandic/pilogger');
|
|
||||||
const canvasJS = require('canvas');
|
|
||||||
const Human = require('../../dist/human.node-gpu.js').default;
|
|
||||||
|
|
||||||
const input = './samples/in/group-1.jpg';
|
|
||||||
const loop = 20;
|
|
||||||
|
|
||||||
const myConfig = {
|
|
||||||
backend: 'tensorflow',
|
|
||||||
modelBasePath: 'https://vladmandic.github.io/human/models',
|
|
||||||
wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.9.0/dist/',
|
|
||||||
debug: true,
|
|
||||||
async: true,
|
|
||||||
cacheSensitivity: 0,
|
|
||||||
filter: { enabled: false },
|
|
||||||
face: {
|
|
||||||
enabled: true,
|
|
||||||
detector: { enabled: true, rotation: false },
|
|
||||||
mesh: { enabled: true },
|
|
||||||
iris: { enabled: true },
|
|
||||||
description: { enabled: true },
|
|
||||||
emotion: { enabled: true },
|
|
||||||
antispoof: { enabled: true },
|
|
||||||
liveness: { enabled: true },
|
|
||||||
},
|
|
||||||
hand: { enabled: true },
|
|
||||||
body: { enabled: true },
|
|
||||||
object: { enabled: true },
|
|
||||||
};
|
|
||||||
|
|
||||||
async function getImage(human) {
|
|
||||||
const img = await canvasJS.loadImage(input);
|
|
||||||
const canvas = canvasJS.createCanvas(img.width, img.height);
|
|
||||||
const ctx = canvas.getContext('2d');
|
|
||||||
ctx.drawImage(img, 0, 0, img.width, img.height);
|
|
||||||
const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
|
|
||||||
const tensor = human.tf.tensor(Array.from(imageData.data), [canvas.height, canvas.width, 4], 'int32'); // create rgba image tensor from flat array
|
|
||||||
log.info('Image:', input, tensor.shape);
|
|
||||||
return tensor;
|
|
||||||
}
|
|
||||||
|
|
||||||
async function main() {
|
|
||||||
log.header();
|
|
||||||
const human = new Human(myConfig);
|
|
||||||
await human.tf.ready();
|
|
||||||
log.info('Human:', human.version);
|
|
||||||
await human.load();
|
|
||||||
const loaded = Object.keys(human.models).filter((a) => human.models[a]);
|
|
||||||
log.info('Loaded:', loaded);
|
|
||||||
log.info('Memory state:', human.tf.engine().memory());
|
|
||||||
const tensor = await getImage(human);
|
|
||||||
log.state('Processing:', tensor['shape']);
|
|
||||||
const t0 = human.now();
|
|
||||||
await human.detect(tensor, myConfig);
|
|
||||||
const t1 = human.now();
|
|
||||||
log.state('Backend:', human.tf.getBackend());
|
|
||||||
log.data('Warmup:', Math.round(t1 - t0));
|
|
||||||
for (let i = 0; i < loop; i++) await human.detect(tensor, myConfig);
|
|
||||||
const t2 = human.now();
|
|
||||||
log.data('Average:', Math.round((t2 - t1) / loop));
|
|
||||||
}
|
|
||||||
|
|
||||||
main();
|
|
|
@ -0,0 +1,160 @@
|
||||||
|
/**
|
||||||
|
* Human demo for browsers
|
||||||
|
*
|
||||||
|
* Demo for face detection
|
||||||
|
*/
|
||||||
|
|
||||||
|
/** @type {Human} */
|
||||||
|
import { Human } from '../../dist/human.esm.js';
|
||||||
|
|
||||||
|
let loader;
|
||||||
|
|
||||||
|
const humanConfig = { // user configuration for human, used to fine-tune behavior
|
||||||
|
cacheSensitivity: 0,
|
||||||
|
debug: true,
|
||||||
|
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
|
||||||
|
filter: { enabled: true, equalization: false, flip: false },
|
||||||
|
face: {
|
||||||
|
enabled: true,
|
||||||
|
detector: { rotation: false, maxDetected: 100, minConfidence: 0.2, return: true, square: false },
|
||||||
|
iris: { enabled: true },
|
||||||
|
description: { enabled: true },
|
||||||
|
emotion: { enabled: true },
|
||||||
|
antispoof: { enabled: true },
|
||||||
|
liveness: { enabled: true },
|
||||||
|
},
|
||||||
|
body: { enabled: false },
|
||||||
|
hand: { enabled: false },
|
||||||
|
object: { enabled: false },
|
||||||
|
gesture: { enabled: false },
|
||||||
|
segmentation: { enabled: false },
|
||||||
|
};
|
||||||
|
|
||||||
|
const human = new Human(humanConfig); // new instance of human
|
||||||
|
|
||||||
|
export const showLoader = (msg) => { loader.setAttribute('msg', msg); loader.style.display = 'block'; };
|
||||||
|
export const hideLoader = () => loader.style.display = 'none';
|
||||||
|
|
||||||
|
class ComponentLoader extends HTMLElement { // watch for attributes
|
||||||
|
message = document.createElement('div');
|
||||||
|
|
||||||
|
static get observedAttributes() { return ['msg']; }
|
||||||
|
|
||||||
|
attributeChangedCallback(_name, _prevVal, currVal) {
|
||||||
|
this.message.innerHTML = currVal;
|
||||||
|
}
|
||||||
|
|
||||||
|
connectedCallback() { // triggered on insert
|
||||||
|
this.attachShadow({ mode: 'open' });
|
||||||
|
const css = document.createElement('style');
|
||||||
|
css.innerHTML = `
|
||||||
|
.loader-container { top: 450px; justify-content: center; position: fixed; width: 100%; }
|
||||||
|
.loader-message { font-size: 1.5rem; padding: 1rem; }
|
||||||
|
.loader { width: 300px; height: 300px; border: 3px solid transparent; border-radius: 50%; border-top: 4px solid #f15e41; animation: spin 4s linear infinite; position: relative; }
|
||||||
|
.loader::before, .loader::after { content: ""; position: absolute; top: 6px; bottom: 6px; left: 6px; right: 6px; border-radius: 50%; border: 4px solid transparent; }
|
||||||
|
.loader::before { border-top-color: #bad375; animation: 3s spin linear infinite; }
|
||||||
|
.loader::after { border-top-color: #26a9e0; animation: spin 1.5s linear infinite; }
|
||||||
|
@keyframes spin { from { transform: rotate(0deg); } to { transform: rotate(360deg); } }
|
||||||
|
`;
|
||||||
|
const container = document.createElement('div');
|
||||||
|
container.id = 'loader-container';
|
||||||
|
container.className = 'loader-container';
|
||||||
|
loader = document.createElement('div');
|
||||||
|
loader.id = 'loader';
|
||||||
|
loader.className = 'loader';
|
||||||
|
this.message.id = 'loader-message';
|
||||||
|
this.message.className = 'loader-message';
|
||||||
|
this.message.innerHTML = '';
|
||||||
|
container.appendChild(this.message);
|
||||||
|
container.appendChild(loader);
|
||||||
|
this.shadowRoot?.append(css, container);
|
||||||
|
loader = this; // eslint-disable-line @typescript-eslint/no-this-alias
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
customElements.define('component-loader', ComponentLoader);
|
||||||
|
|
||||||
|
function addFace(face, source) {
|
||||||
|
const deg = (rad) => Math.round((rad || 0) * 180 / Math.PI);
|
||||||
|
const canvas = document.createElement('canvas');
|
||||||
|
const emotion = face.emotion?.map((e) => `${Math.round(100 * e.score)}% ${e.emotion}`) || [];
|
||||||
|
const rotation = `pitch ${deg(face.rotation?.angle.pitch)}° | roll ${deg(face.rotation?.angle.roll)}° | yaw ${deg(face.rotation?.angle.yaw)}°`;
|
||||||
|
const gaze = `direction ${deg(face.rotation?.gaze.bearing)}° strength ${Math.round(100 * (face.rotation.gaze.strength || 0))}%`;
|
||||||
|
canvas.title = `
|
||||||
|
source: ${source}
|
||||||
|
score: ${Math.round(100 * face.boxScore)}% detection ${Math.round(100 * face.faceScore)}% analysis
|
||||||
|
age: ${face.age} years | gender: ${face.gender} score ${Math.round(100 * face.genderScore)}%
|
||||||
|
emotion: ${emotion.join(' | ')}
|
||||||
|
head rotation: ${rotation}
|
||||||
|
eyes gaze: ${gaze}
|
||||||
|
camera distance: ${face.distance}m | ${Math.round(100 * face.distance / 2.54)}in
|
||||||
|
check: ${Math.round(100 * face.real)}% real ${Math.round(100 * face.live)}% live
|
||||||
|
`.replace(/ /g, ' ');
|
||||||
|
canvas.onclick = (e) => {
|
||||||
|
e.preventDefault();
|
||||||
|
document.getElementById('description').innerHTML = canvas.title;
|
||||||
|
};
|
||||||
|
human.draw.tensor(face.tensor, canvas);
|
||||||
|
human.tf.dispose(face.tensor);
|
||||||
|
return canvas;
|
||||||
|
}
|
||||||
|
|
||||||
|
async function addFaces(imgEl) {
|
||||||
|
showLoader('human: busy');
|
||||||
|
const faceEl = document.getElementById('faces');
|
||||||
|
faceEl.innerHTML = '';
|
||||||
|
const res = await human.detect(imgEl);
|
||||||
|
console.log(res); // eslint-disable-line no-console
|
||||||
|
document.getElementById('description').innerHTML = `detected ${res.face.length} faces`;
|
||||||
|
for (const face of res.face) {
|
||||||
|
const canvas = addFace(face, imgEl.src.substring(0, 64));
|
||||||
|
faceEl.appendChild(canvas);
|
||||||
|
}
|
||||||
|
hideLoader();
|
||||||
|
}
|
||||||
|
|
||||||
|
function addImage(imageUri) {
|
||||||
|
const imgEl = new Image(256, 256);
|
||||||
|
imgEl.onload = () => {
|
||||||
|
const images = document.getElementById('images');
|
||||||
|
images.appendChild(imgEl); // add image if loaded ok
|
||||||
|
images.scroll(images?.offsetWidth, 0);
|
||||||
|
};
|
||||||
|
imgEl.onerror = () => console.error('addImage', { imageUri }); // eslint-disable-line no-console
|
||||||
|
imgEl.onclick = () => addFaces(imgEl);
|
||||||
|
imgEl.title = imageUri.substring(0, 64);
|
||||||
|
imgEl.src = encodeURI(imageUri);
|
||||||
|
}
|
||||||
|
|
||||||
|
async function initDragAndDrop() {
|
||||||
|
const reader = new FileReader();
|
||||||
|
reader.onload = async (e) => {
|
||||||
|
if (e.target.result.startsWith('data:image')) await addImage(e.target.result);
|
||||||
|
};
|
||||||
|
document.body.addEventListener('dragenter', (evt) => evt.preventDefault());
|
||||||
|
document.body.addEventListener('dragleave', (evt) => evt.preventDefault());
|
||||||
|
document.body.addEventListener('dragover', (evt) => evt.preventDefault());
|
||||||
|
document.body.addEventListener('drop', async (evt) => {
|
||||||
|
evt.preventDefault();
|
||||||
|
evt.dataTransfer.dropEffect = 'copy';
|
||||||
|
for (const f of evt.dataTransfer.files) reader.readAsDataURL(f);
|
||||||
|
});
|
||||||
|
document.body.onclick = (e) => {
|
||||||
|
if (e.target.localName !== 'canvas') document.getElementById('description').innerHTML = '';
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
async function main() {
|
||||||
|
showLoader('loading models');
|
||||||
|
await human.load();
|
||||||
|
showLoader('compiling models');
|
||||||
|
await human.warmup();
|
||||||
|
showLoader('loading images');
|
||||||
|
const images = ['group-1.jpg', 'group-2.jpg', 'group-3.jpg', 'group-4.jpg', 'group-5.jpg', 'group-6.jpg', 'group-7.jpg', 'solvay1927.jpg', 'stock-group-1.jpg', 'stock-group-2.jpg', 'stock-models-6.jpg', 'stock-models-7.jpg'];
|
||||||
|
const imageUris = images.map((a) => `../../samples/in/${a}`);
|
||||||
|
for (let i = 0; i < imageUris.length; i++) addImage(imageUris[i]);
|
||||||
|
initDragAndDrop();
|
||||||
|
hideLoader();
|
||||||
|
}
|
||||||
|
|
||||||
|
window.onload = main;
|
|
@ -0,0 +1,43 @@
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8">
|
||||||
|
<title>Human</title>
|
||||||
|
<!-- <meta http-equiv="content-type" content="text/html; charset=utf-8"> -->
|
||||||
|
<meta name="viewport" content="width=device-width, shrink-to-fit=yes">
|
||||||
|
<meta name="keywords" content="Human">
|
||||||
|
<meta name="application-name" content="Human">
|
||||||
|
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||||
|
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||||
|
<meta name="theme-color" content="#000000">
|
||||||
|
<link rel="manifest" href="../manifest.webmanifest">
|
||||||
|
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
|
||||||
|
<link rel="apple-touch-icon" href="../../assets/icon.png">
|
||||||
|
<script src="./facedetect.js" type="module"></script>
|
||||||
|
<style>
|
||||||
|
img { object-fit: contain; }
|
||||||
|
img:hover { filter: grayscale(1); transform: scale(1.08); transition : all 0.3s ease; }
|
||||||
|
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
|
||||||
|
html { font-family: 'Lato', 'Segoe UI'; font-size: 24px; font-variant: small-caps; }
|
||||||
|
body { margin: 24px; background: black; color: white; overflow: hidden; text-align: -webkit-center; width: 100vw; height: 100vh; }
|
||||||
|
::-webkit-scrollbar { height: 8px; border: 0; border-radius: 0; }
|
||||||
|
::-webkit-scrollbar-thumb { background: grey }
|
||||||
|
::-webkit-scrollbar-track { margin: 3px; }
|
||||||
|
canvas { width: 192px; height: 192px; margin: 2px; padding: 2px; cursor: grab; transform: scale(1.00); transition : all 0.3s ease; }
|
||||||
|
canvas:hover { filter: grayscale(1); transform: scale(1.08); transition : all 0.3s ease; }
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<component-loader></component-loader>
|
||||||
|
<div style="display: flex">
|
||||||
|
<div>
|
||||||
|
<div style="margin: 24px">select image to show detected faces<br>drag & drop to add your images</div>
|
||||||
|
<div id="images" style="display: flex; width: 98vw; overflow-x: auto; overflow-y: hidden; scroll-behavior: smooth"></div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div id="list" style="height: 10px"></div>
|
||||||
|
<div style="margin: 24px">hover or click on face to show details</div>
|
||||||
|
<div id="faces" style="overflow-y: auto"></div>
|
||||||
|
<div id="description" style="white-space: pre;"></div>
|
||||||
|
</body>
|
||||||
|
</html>
|
|
@ -38,4 +38,5 @@ designed to serve as a quick check when used together with other indicators:
|
||||||
**FaceID** is compatible with
|
**FaceID** is compatible with
|
||||||
- `faceres.json` (default) perfoms combined age/gender/descriptor analysis
|
- `faceres.json` (default) perfoms combined age/gender/descriptor analysis
|
||||||
- `faceres-deep.json` higher resolution variation of `faceres`
|
- `faceres-deep.json` higher resolution variation of `faceres`
|
||||||
|
- `insightface` alternative model for face descriptor analysis
|
||||||
- `mobilefacenet` alternative model for face descriptor analysis
|
- `mobilefacenet` alternative model for face descriptor analysis
|
||||||
|
|
|
@ -19,14 +19,23 @@
|
||||||
body { margin: 0; padding: 16px; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
|
body { margin: 0; padding: 16px; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
|
||||||
body::-webkit-scrollbar { display: none; }
|
body::-webkit-scrollbar { display: none; }
|
||||||
.button { padding: 2px; cursor: pointer; box-shadow: 2px 2px black; width: 64px; text-align: center; place-content: center; margin-left: 16px; height: 16px; display: none }
|
.button { padding: 2px; cursor: pointer; box-shadow: 2px 2px black; width: 64px; text-align: center; place-content: center; margin-left: 16px; height: 16px; display: none }
|
||||||
.ok { position: absolute; top: 64px; right: 20px; width: 100px; background-color: grey; padding: 4px; color: black; font-size: 14px }
|
.ok { position: absolute; top: 64px; right: 20px; width: 150px; background-color: grey; padding: 4px; color: black; font-size: 14px }
|
||||||
</style>
|
</style>
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
|
<div style="padding: 8px">
|
||||||
|
<h1 style="margin: 0">faceid demo using human library</h1>
|
||||||
|
look directly at camera and make sure that detection passes all of the required tests noted on the right hand side of the screen<br>
|
||||||
|
if input does not satisfies tests within specific timeout, no image will be selected<br>
|
||||||
|
once face image is approved, it will be compared with existing face database<br>
|
||||||
|
you can also store face descriptor with label in a browser's indexdb for future usage<br>
|
||||||
|
<br>
|
||||||
|
<i>note: this is not equivalent to full faceid methods as used by modern mobile phones or windows hello<br>
|
||||||
|
as they rely on additional infrared sensors and depth-sensing and not just camera image for additional levels of security</i>
|
||||||
|
</div>
|
||||||
<canvas id="canvas" style="padding: 8px"></canvas>
|
<canvas id="canvas" style="padding: 8px"></canvas>
|
||||||
<canvas id="source" style="padding: 8px"></canvas>
|
<canvas id="source" style="padding: 8px"></canvas>
|
||||||
<video id="video" playsinline style="display: none"></video>
|
<video id="video" playsinline style="display: none"></video>
|
||||||
<pre id="fps" style="position: absolute; bottom: 16px; right: 20px; background-color: grey; padding: 8px; box-shadow: 2px 2px black"></pre>
|
|
||||||
<pre id="log" style="padding: 8px"></pre>
|
<pre id="log" style="padding: 8px"></pre>
|
||||||
<div id="match" style="display: none; padding: 8px">
|
<div id="match" style="display: none; padding: 8px">
|
||||||
<label for="name">name:</label>
|
<label for="name">name:</label>
|
||||||
|
@ -34,7 +43,7 @@
|
||||||
<span id="save" class="button" style="background-color: royalblue">save</span>
|
<span id="save" class="button" style="background-color: royalblue">save</span>
|
||||||
<span id="delete" class="button" style="background-color: lightcoral">delete</span>
|
<span id="delete" class="button" style="background-color: lightcoral">delete</span>
|
||||||
</div>
|
</div>
|
||||||
<div id="retry" class="button" style="background-color: darkslategray; width: 350px; margin-top: 32px; padding: 4px">retry</div>
|
<div id="retry" class="button" style="background-color: darkslategray; width: 93%; margin-top: 32px; padding: 12px">retry</div>
|
||||||
<div id="ok"></div>
|
<div id="ok"></div>
|
||||||
</body>
|
</body>
|
||||||
</html>
|
</html>
|
||||||
|
|
|
@ -7,17 +7,20 @@
|
||||||
* @license MIT
|
* @license MIT
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { Human, TensorLike, FaceResult } from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
|
import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
|
||||||
import * as indexDb from './indexdb'; // methods to deal with indexdb
|
import * as indexDb from './indexdb'; // methods to deal with indexdb
|
||||||
|
|
||||||
const humanConfig = { // user configuration for human, used to fine-tune behavior
|
const humanConfig = { // user configuration for human, used to fine-tune behavior
|
||||||
|
cacheSensitivity: 0.01,
|
||||||
modelBasePath: '../../models',
|
modelBasePath: '../../models',
|
||||||
filter: { equalization: true }, // lets run with histogram equilizer
|
filter: { enabled: true, equalization: true }, // lets run with histogram equilizer
|
||||||
|
debug: true,
|
||||||
face: {
|
face: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
detector: { rotation: true, return: true, cropFactor: 1.6, mask: false }, // return tensor is used to get detected face image
|
detector: { rotation: true, return: true, mask: false }, // return tensor is used to get detected face image
|
||||||
description: { enabled: true }, // default model for face descriptor extraction is faceres
|
description: { enabled: true }, // default model for face descriptor extraction is faceres
|
||||||
mobilefacenet: { enabled: false, modelPath: 'https://vladmandic.github.io/human-models/models/mobilefacenet.json' }, // alternative model
|
// mobilefacenet: { enabled: true, modelPath: 'https://vladmandic.github.io/human-models/models/mobilefacenet.json' }, // alternative model
|
||||||
|
// insightface: { enabled: true, modelPath: 'https://vladmandic.github.io/insightface/models/insightface-mobilenet-swish.json' }, // alternative model
|
||||||
iris: { enabled: true }, // needed to determine gaze direction
|
iris: { enabled: true }, // needed to determine gaze direction
|
||||||
emotion: { enabled: false }, // not needed
|
emotion: { enabled: false }, // not needed
|
||||||
antispoof: { enabled: true }, // enable optional antispoof module
|
antispoof: { enabled: true }, // enable optional antispoof module
|
||||||
|
@ -35,29 +38,50 @@ const matchOptions = { order: 2, multiplier: 25, min: 0.2, max: 0.8 }; // for fa
|
||||||
const options = {
|
const options = {
|
||||||
minConfidence: 0.6, // overal face confidence for box, face, gender, real, live
|
minConfidence: 0.6, // overal face confidence for box, face, gender, real, live
|
||||||
minSize: 224, // min input to face descriptor model before degradation
|
minSize: 224, // min input to face descriptor model before degradation
|
||||||
maxTime: 10000, // max time before giving up
|
maxTime: 30000, // max time before giving up
|
||||||
blinkMin: 10, // minimum duration of a valid blink
|
blinkMin: 10, // minimum duration of a valid blink
|
||||||
blinkMax: 800, // maximum duration of a valid blink
|
blinkMax: 800, // maximum duration of a valid blink
|
||||||
threshold: 0.5, // minimum similarity
|
threshold: 0.5, // minimum similarity
|
||||||
|
distanceMin: 0.4, // closest that face is allowed to be to the cammera in cm
|
||||||
|
distanceMax: 1.0, // farthest that face is allowed to be to the cammera in cm
|
||||||
mask: humanConfig.face.detector.mask,
|
mask: humanConfig.face.detector.mask,
|
||||||
rotation: humanConfig.face.detector.rotation,
|
rotation: humanConfig.face.detector.rotation,
|
||||||
cropFactor: humanConfig.face.detector.cropFactor,
|
|
||||||
...matchOptions,
|
...matchOptions,
|
||||||
};
|
};
|
||||||
|
|
||||||
const ok = { // must meet all rules
|
const ok: Record<string, { status: boolean | undefined, val: number }> = { // must meet all rules
|
||||||
faceCount: false,
|
faceCount: { status: false, val: 0 },
|
||||||
faceConfidence: false,
|
faceConfidence: { status: false, val: 0 },
|
||||||
facingCenter: false,
|
facingCenter: { status: false, val: 0 },
|
||||||
lookingCenter: false,
|
lookingCenter: { status: false, val: 0 },
|
||||||
blinkDetected: false,
|
blinkDetected: { status: false, val: 0 },
|
||||||
faceSize: false,
|
faceSize: { status: false, val: 0 },
|
||||||
antispoofCheck: false,
|
antispoofCheck: { status: false, val: 0 },
|
||||||
livenessCheck: false,
|
livenessCheck: { status: false, val: 0 },
|
||||||
elapsedMs: 0, // total time while waiting for valid face
|
distance: { status: false, val: 0 },
|
||||||
|
age: { status: false, val: 0 },
|
||||||
|
gender: { status: false, val: 0 },
|
||||||
|
timeout: { status: true, val: 0 },
|
||||||
|
descriptor: { status: false, val: 0 },
|
||||||
|
elapsedMs: { status: undefined, val: 0 }, // total time while waiting for valid face
|
||||||
|
detectFPS: { status: undefined, val: 0 }, // mark detection fps performance
|
||||||
|
drawFPS: { status: undefined, val: 0 }, // mark redraw fps performance
|
||||||
};
|
};
|
||||||
const allOk = () => ok.faceCount && ok.faceSize && ok.blinkDetected && ok.facingCenter && ok.lookingCenter && ok.faceConfidence && ok.antispoofCheck && ok.livenessCheck;
|
|
||||||
const current: { face: FaceResult | null, record: indexDb.FaceRecord | null } = { face: null, record: null }; // current face record and matched database record
|
const allOk = () => ok.faceCount.status
|
||||||
|
&& ok.faceSize.status
|
||||||
|
&& ok.blinkDetected.status
|
||||||
|
&& ok.facingCenter.status
|
||||||
|
&& ok.lookingCenter.status
|
||||||
|
&& ok.faceConfidence.status
|
||||||
|
&& ok.antispoofCheck.status
|
||||||
|
&& ok.livenessCheck.status
|
||||||
|
&& ok.distance.status
|
||||||
|
&& ok.descriptor.status
|
||||||
|
&& ok.age.status
|
||||||
|
&& ok.gender.status;
|
||||||
|
|
||||||
|
const current: { face: H.FaceResult | null, record: indexDb.FaceRecord | null } = { face: null, record: null }; // current face record and matched database record
|
||||||
|
|
||||||
const blink = { // internal timers for blink start/end/duration
|
const blink = { // internal timers for blink start/end/duration
|
||||||
start: 0,
|
start: 0,
|
||||||
|
@ -66,9 +90,9 @@ const blink = { // internal timers for blink start/end/duration
|
||||||
};
|
};
|
||||||
|
|
||||||
// let db: Array<{ name: string, source: string, embedding: number[] }> = []; // holds loaded face descriptor database
|
// let db: Array<{ name: string, source: string, embedding: number[] }> = []; // holds loaded face descriptor database
|
||||||
const human = new Human(humanConfig); // create instance of human with overrides from user configuration
|
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
|
||||||
|
|
||||||
human.env['perfadd'] = false; // is performance data showing instant or total values
|
human.env.perfadd = false; // is performance data showing instant or total values
|
||||||
human.draw.options.font = 'small-caps 18px "Lato"'; // set font used to draw labels when using draw methods
|
human.draw.options.font = 'small-caps 18px "Lato"'; // set font used to draw labels when using draw methods
|
||||||
human.draw.options.lineHeight = 20;
|
human.draw.options.lineHeight = 20;
|
||||||
|
|
||||||
|
@ -86,105 +110,119 @@ const dom = { // grab instances of dom objects so we dont have to look them up l
|
||||||
ok: document.getElementById('ok') as HTMLDivElement,
|
ok: document.getElementById('ok') as HTMLDivElement,
|
||||||
};
|
};
|
||||||
const timestamp = { detect: 0, draw: 0 }; // holds information used to calculate performance and possible memory leaks
|
const timestamp = { detect: 0, draw: 0 }; // holds information used to calculate performance and possible memory leaks
|
||||||
const fps = { detect: 0, draw: 0 }; // holds calculated fps information for both detect and screen refresh
|
|
||||||
let startTime = 0;
|
let startTime = 0;
|
||||||
|
|
||||||
const log = (...msg) => { // helper method to output messages
|
const log = (...msg) => { // helper method to output messages
|
||||||
dom.log.innerText += msg.join(' ') + '\n';
|
dom.log.innerText += msg.join(' ') + '\n';
|
||||||
// eslint-disable-next-line no-console
|
console.log(...msg); // eslint-disable-line no-console
|
||||||
console.log(...msg);
|
|
||||||
};
|
};
|
||||||
const printFPS = (msg) => dom.fps.innerText = msg; // print status element
|
|
||||||
|
|
||||||
async function webCam() { // initialize webcam
|
async function webCam() { // initialize webcam
|
||||||
printFPS('starting webcam...');
|
|
||||||
// @ts-ignore resizeMode is not yet defined in tslib
|
// @ts-ignore resizeMode is not yet defined in tslib
|
||||||
const cameraOptions: MediaStreamConstraints = { audio: false, video: { facingMode: 'user', resizeMode: 'none', width: { ideal: document.body.clientWidth } } };
|
const cameraOptions: MediaStreamConstraints = { audio: false, video: { facingMode: 'user', resizeMode: 'none', width: { ideal: document.body.clientWidth } } };
|
||||||
const stream: MediaStream = await navigator.mediaDevices.getUserMedia(cameraOptions);
|
const stream: MediaStream = await navigator.mediaDevices.getUserMedia(cameraOptions);
|
||||||
const ready = new Promise((resolve) => { dom.video.onloadeddata = () => resolve(true); });
|
const ready = new Promise((resolve) => { dom.video.onloadeddata = () => resolve(true); });
|
||||||
dom.video.srcObject = stream;
|
dom.video.srcObject = stream;
|
||||||
dom.video.play();
|
void dom.video.play();
|
||||||
await ready;
|
await ready;
|
||||||
dom.canvas.width = dom.video.videoWidth;
|
dom.canvas.width = dom.video.videoWidth;
|
||||||
dom.canvas.height = dom.video.videoHeight;
|
dom.canvas.height = dom.video.videoHeight;
|
||||||
|
dom.canvas.style.width = '50%';
|
||||||
|
dom.canvas.style.height = '50%';
|
||||||
if (human.env.initial) log('video:', dom.video.videoWidth, dom.video.videoHeight, '|', stream.getVideoTracks()[0].label);
|
if (human.env.initial) log('video:', dom.video.videoWidth, dom.video.videoHeight, '|', stream.getVideoTracks()[0].label);
|
||||||
dom.canvas.onclick = () => { // pause when clicked on screen and resume on next click
|
dom.canvas.onclick = () => { // pause when clicked on screen and resume on next click
|
||||||
if (dom.video.paused) dom.video.play();
|
if (dom.video.paused) void dom.video.play();
|
||||||
else dom.video.pause();
|
else dom.video.pause();
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
async function detectionLoop() { // main detection loop
|
async function detectionLoop() { // main detection loop
|
||||||
if (!dom.video.paused) {
|
if (!dom.video.paused) {
|
||||||
if (current.face && current.face.tensor) human.tf.dispose(current.face.tensor); // dispose previous tensor
|
if (current.face?.tensor) human.tf.dispose(current.face.tensor); // dispose previous tensor
|
||||||
await human.detect(dom.video); // actual detection; were not capturing output in a local variable as it can also be reached via human.result
|
await human.detect(dom.video); // actual detection; were not capturing output in a local variable as it can also be reached via human.result
|
||||||
const now = human.now();
|
const now = human.now();
|
||||||
fps.detect = 1000 / (now - timestamp.detect);
|
ok.detectFPS.val = Math.round(10000 / (now - timestamp.detect)) / 10;
|
||||||
timestamp.detect = now;
|
timestamp.detect = now;
|
||||||
requestAnimationFrame(detectionLoop); // start new frame immediately
|
requestAnimationFrame(detectionLoop); // start new frame immediately
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async function validationLoop(): Promise<FaceResult> { // main screen refresh loop
|
function drawValidationTests() {
|
||||||
const interpolated = await human.next(human.result); // smoothen result using last-known results
|
|
||||||
await human.draw.canvas(dom.video, dom.canvas); // draw canvas to screen
|
|
||||||
await human.draw.all(dom.canvas, interpolated); // draw labels, boxes, lines, etc.
|
|
||||||
const now = human.now();
|
|
||||||
fps.draw = 1000 / (now - timestamp.draw);
|
|
||||||
timestamp.draw = now;
|
|
||||||
printFPS(`fps: ${fps.detect.toFixed(1).padStart(5, ' ')} detect | ${fps.draw.toFixed(1).padStart(5, ' ')} draw`); // write status
|
|
||||||
ok.faceCount = human.result.face.length === 1; // must be exactly detected face
|
|
||||||
if (ok.faceCount) { // skip the rest if no face
|
|
||||||
const gestures: string[] = Object.values(human.result.gesture).map((gesture) => gesture.gesture); // flatten all gestures
|
|
||||||
if (gestures.includes('blink left eye') || gestures.includes('blink right eye')) blink.start = human.now(); // blink starts when eyes get closed
|
|
||||||
if (blink.start > 0 && !gestures.includes('blink left eye') && !gestures.includes('blink right eye')) blink.end = human.now(); // if blink started how long until eyes are back open
|
|
||||||
ok.blinkDetected = ok.blinkDetected || (Math.abs(blink.end - blink.start) > options.blinkMin && Math.abs(blink.end - blink.start) < options.blinkMax);
|
|
||||||
if (ok.blinkDetected && blink.time === 0) blink.time = Math.trunc(blink.end - blink.start);
|
|
||||||
ok.facingCenter = gestures.includes('facing center');
|
|
||||||
ok.lookingCenter = gestures.includes('looking center'); // must face camera and look at camera
|
|
||||||
ok.faceConfidence = (human.result.face[0].boxScore || 0) > options.minConfidence && (human.result.face[0].faceScore || 0) > options.minConfidence && (human.result.face[0].genderScore || 0) > options.minConfidence;
|
|
||||||
ok.antispoofCheck = (human.result.face[0].real || 0) > options.minConfidence;
|
|
||||||
ok.livenessCheck = (human.result.face[0].live || 0) > options.minConfidence;
|
|
||||||
ok.faceSize = human.result.face[0].box[2] >= options.minSize && human.result.face[0].box[3] >= options.minSize;
|
|
||||||
}
|
|
||||||
let y = 32;
|
let y = 32;
|
||||||
for (const [key, val] of Object.entries(ok)) {
|
for (const [key, val] of Object.entries(ok)) {
|
||||||
let el = document.getElementById(`ok-${key}`);
|
let el = document.getElementById(`ok-${key}`);
|
||||||
if (!el) {
|
if (!el) {
|
||||||
el = document.createElement('div');
|
el = document.createElement('div');
|
||||||
|
el.id = `ok-${key}`;
|
||||||
el.innerText = key;
|
el.innerText = key;
|
||||||
el.className = 'ok';
|
el.className = 'ok';
|
||||||
el.style.top = `${y}px`;
|
el.style.top = `${y}px`;
|
||||||
dom.ok.appendChild(el);
|
dom.ok.appendChild(el);
|
||||||
}
|
}
|
||||||
if (typeof val === 'boolean') el.style.backgroundColor = val ? 'lightgreen' : 'lightcoral';
|
if (typeof val.status === 'boolean') el.style.backgroundColor = val.status ? 'lightgreen' : 'lightcoral';
|
||||||
else el.innerText = `${key}:${val}`;
|
const status = val.status ? 'ok' : 'fail';
|
||||||
|
el.innerText = `${key}: ${val.val === 0 ? status : val.val}`;
|
||||||
y += 28;
|
y += 28;
|
||||||
}
|
}
|
||||||
if (allOk()) { // all criteria met
|
}
|
||||||
|
|
||||||
|
async function validationLoop(): Promise<H.FaceResult> { // main screen refresh loop
|
||||||
|
const interpolated = human.next(human.result); // smoothen result using last-known results
|
||||||
|
human.draw.canvas(dom.video, dom.canvas); // draw canvas to screen
|
||||||
|
await human.draw.all(dom.canvas, interpolated); // draw labels, boxes, lines, etc.
|
||||||
|
const now = human.now();
|
||||||
|
ok.drawFPS.val = Math.round(10000 / (now - timestamp.draw)) / 10;
|
||||||
|
timestamp.draw = now;
|
||||||
|
ok.faceCount.val = human.result.face.length;
|
||||||
|
ok.faceCount.status = ok.faceCount.val === 1; // must be exactly detected face
|
||||||
|
if (ok.faceCount.status) { // skip the rest if no face
|
||||||
|
const gestures: string[] = Object.values(human.result.gesture).map((gesture: H.GestureResult) => gesture.gesture); // flatten all gestures
|
||||||
|
if (gestures.includes('blink left eye') || gestures.includes('blink right eye')) blink.start = human.now(); // blink starts when eyes get closed
|
||||||
|
if (blink.start > 0 && !gestures.includes('blink left eye') && !gestures.includes('blink right eye')) blink.end = human.now(); // if blink started how long until eyes are back open
|
||||||
|
ok.blinkDetected.status = ok.blinkDetected.status || (Math.abs(blink.end - blink.start) > options.blinkMin && Math.abs(blink.end - blink.start) < options.blinkMax);
|
||||||
|
if (ok.blinkDetected.status && blink.time === 0) blink.time = Math.trunc(blink.end - blink.start);
|
||||||
|
ok.facingCenter.status = gestures.includes('facing center');
|
||||||
|
ok.lookingCenter.status = gestures.includes('looking center'); // must face camera and look at camera
|
||||||
|
ok.faceConfidence.val = human.result.face[0].faceScore || human.result.face[0].boxScore || 0;
|
||||||
|
ok.faceConfidence.status = ok.faceConfidence.val >= options.minConfidence;
|
||||||
|
ok.antispoofCheck.val = human.result.face[0].real || 0;
|
||||||
|
ok.antispoofCheck.status = ok.antispoofCheck.val >= options.minConfidence;
|
||||||
|
ok.livenessCheck.val = human.result.face[0].live || 0;
|
||||||
|
ok.livenessCheck.status = ok.livenessCheck.val >= options.minConfidence;
|
||||||
|
ok.faceSize.val = Math.min(human.result.face[0].box[2], human.result.face[0].box[3]);
|
||||||
|
ok.faceSize.status = ok.faceSize.val >= options.minSize;
|
||||||
|
ok.distance.val = human.result.face[0].distance || 0;
|
||||||
|
ok.distance.status = (ok.distance.val >= options.distanceMin) && (ok.distance.val <= options.distanceMax);
|
||||||
|
ok.descriptor.val = human.result.face[0].embedding?.length || 0;
|
||||||
|
ok.descriptor.status = ok.descriptor.val > 0;
|
||||||
|
ok.age.val = human.result.face[0].age || 0;
|
||||||
|
ok.age.status = ok.age.val > 0;
|
||||||
|
ok.gender.val = human.result.face[0].genderScore || 0;
|
||||||
|
ok.gender.status = ok.gender.val >= options.minConfidence;
|
||||||
|
}
|
||||||
|
// run again
|
||||||
|
ok.timeout.status = ok.elapsedMs.val <= options.maxTime;
|
||||||
|
drawValidationTests();
|
||||||
|
if (allOk() || !ok.timeout.status) { // all criteria met
|
||||||
dom.video.pause();
|
dom.video.pause();
|
||||||
return human.result.face[0];
|
return human.result.face[0];
|
||||||
}
|
}
|
||||||
if (ok.elapsedMs > options.maxTime) { // give up
|
ok.elapsedMs.val = Math.trunc(human.now() - startTime);
|
||||||
dom.video.pause();
|
|
||||||
return human.result.face[0];
|
|
||||||
} else { // run again
|
|
||||||
ok.elapsedMs = Math.trunc(human.now() - startTime);
|
|
||||||
return new Promise((resolve) => {
|
return new Promise((resolve) => {
|
||||||
setTimeout(async () => {
|
setTimeout(async () => {
|
||||||
const res = await validationLoop(); // run validation loop until conditions are met
|
await validationLoop(); // run validation loop until conditions are met
|
||||||
if (res) resolve(human.result.face[0]); // recursive promise resolve
|
resolve(human.result.face[0]); // recursive promise resolve
|
||||||
}, 30); // use to slow down refresh from max refresh rate to target of 30 fps
|
}, 30); // use to slow down refresh from max refresh rate to target of 30 fps
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
async function saveRecords() {
|
async function saveRecords() {
|
||||||
if (dom.name.value.length > 0) {
|
if (dom.name.value.length > 0) {
|
||||||
const image = dom.canvas.getContext('2d')?.getImageData(0, 0, dom.canvas.width, dom.canvas.height) as ImageData;
|
const image = dom.canvas.getContext('2d')?.getImageData(0, 0, dom.canvas.width, dom.canvas.height) as ImageData;
|
||||||
const rec = { id: 0, name: dom.name.value, descriptor: current.face?.embedding as number[], image };
|
const rec = { id: 0, name: dom.name.value, descriptor: current.face?.embedding as number[], image };
|
||||||
await indexDb.save(rec);
|
await indexDb.save(rec);
|
||||||
log('saved face record:', rec.name);
|
log('saved face record:', rec.name, 'descriptor length:', current.face?.embedding?.length);
|
||||||
|
log('known face records:', await indexDb.count());
|
||||||
} else {
|
} else {
|
||||||
log('invalid name');
|
log('invalid name');
|
||||||
}
|
}
|
||||||
|
@ -197,20 +235,21 @@ async function deleteRecord() {
|
||||||
}
|
}
|
||||||
|
|
||||||
async function detectFace() {
|
async function detectFace() {
|
||||||
|
dom.canvas.style.height = '';
|
||||||
dom.canvas.getContext('2d')?.clearRect(0, 0, options.minSize, options.minSize);
|
dom.canvas.getContext('2d')?.clearRect(0, 0, options.minSize, options.minSize);
|
||||||
if (!current.face || !current.face.tensor || !current.face.embedding) return false;
|
if (!current?.face?.tensor || !current?.face?.embedding) return false;
|
||||||
// eslint-disable-next-line no-console
|
console.log('face record:', current.face); // eslint-disable-line no-console
|
||||||
console.log('face record:', current.face);
|
log(`detected face: ${current.face.gender} ${current.face.age || 0}y distance ${100 * (current.face.distance || 0)}cm/${Math.round(100 * (current.face.distance || 0) / 2.54)}in`);
|
||||||
human.tf.browser.toPixels(current.face.tensor as unknown as TensorLike, dom.canvas);
|
await human.draw.tensor(current.face.tensor, dom.canvas);
|
||||||
if (await indexDb.count() === 0) {
|
if (await indexDb.count() === 0) {
|
||||||
log('face database is empty');
|
log('face database is empty: nothing to compare face with');
|
||||||
document.body.style.background = 'black';
|
document.body.style.background = 'black';
|
||||||
dom.delete.style.display = 'none';
|
dom.delete.style.display = 'none';
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
const db = await indexDb.load();
|
const db = await indexDb.load();
|
||||||
const descriptors = db.map((rec) => rec.descriptor);
|
const descriptors = db.map((rec) => rec.descriptor).filter((desc) => desc.length > 0);
|
||||||
const res = await human.match(current.face.embedding, descriptors, matchOptions);
|
const res = human.match.find(current.face.embedding, descriptors, matchOptions);
|
||||||
current.record = db[res.index] || null;
|
current.record = db[res.index] || null;
|
||||||
if (current.record) {
|
if (current.record) {
|
||||||
log(`best match: ${current.record.name} | id: ${current.record.id} | similarity: ${Math.round(1000 * res.similarity) / 10}%`);
|
log(`best match: ${current.record.name} | id: ${current.record.id} | similarity: ${Math.round(1000 * res.similarity) / 10}%`);
|
||||||
|
@ -223,17 +262,20 @@ async function detectFace() {
|
||||||
}
|
}
|
||||||
|
|
||||||
async function main() { // main entry point
|
async function main() { // main entry point
|
||||||
ok.faceCount = false;
|
ok.faceCount.status = false;
|
||||||
ok.faceConfidence = false;
|
ok.faceConfidence.status = false;
|
||||||
ok.facingCenter = false;
|
ok.facingCenter.status = false;
|
||||||
ok.blinkDetected = false;
|
ok.blinkDetected.status = false;
|
||||||
ok.faceSize = false;
|
ok.faceSize.status = false;
|
||||||
ok.antispoofCheck = false;
|
ok.antispoofCheck.status = false;
|
||||||
ok.livenessCheck = false;
|
ok.livenessCheck.status = false;
|
||||||
ok.elapsedMs = 0;
|
ok.age.status = false;
|
||||||
|
ok.gender.status = false;
|
||||||
|
ok.elapsedMs.val = 0;
|
||||||
dom.match.style.display = 'none';
|
dom.match.style.display = 'none';
|
||||||
dom.retry.style.display = 'none';
|
dom.retry.style.display = 'none';
|
||||||
dom.source.style.display = 'none';
|
dom.source.style.display = 'none';
|
||||||
|
dom.canvas.style.height = '50%';
|
||||||
document.body.style.background = 'black';
|
document.body.style.background = 'black';
|
||||||
await webCam();
|
await webCam();
|
||||||
await detectionLoop(); // start detection loop
|
await detectionLoop(); // start detection loop
|
||||||
|
@ -251,19 +293,21 @@ async function main() { // main entry point
|
||||||
if (!allOk()) { // is all criteria met?
|
if (!allOk()) { // is all criteria met?
|
||||||
log('did not find valid face');
|
log('did not find valid face');
|
||||||
return false;
|
return false;
|
||||||
} else {
|
|
||||||
return detectFace();
|
|
||||||
}
|
}
|
||||||
|
return detectFace();
|
||||||
}
|
}
|
||||||
|
|
||||||
async function init() {
|
async function init() {
|
||||||
log('human version:', human.version, '| tfjs version:', human.tf.version['tfjs-core']);
|
log('human version:', human.version, '| tfjs version:', human.tf.version['tfjs-core']);
|
||||||
log('options:', JSON.stringify(options).replace(/{|}|"|\[|\]/g, '').replace(/,/g, ' '));
|
log('options:', JSON.stringify(options).replace(/{|}|"|\[|\]/g, '').replace(/,/g, ' '));
|
||||||
printFPS('loading...');
|
log('initializing webcam...');
|
||||||
log('known face records:', await indexDb.count());
|
|
||||||
await webCam(); // start webcam
|
await webCam(); // start webcam
|
||||||
|
log('loading human models...');
|
||||||
await human.load(); // preload all models
|
await human.load(); // preload all models
|
||||||
printFPS('initializing...');
|
log('initializing human...');
|
||||||
|
log('face embedding model:', humanConfig.face.description.enabled ? 'faceres' : '', humanConfig.face['mobilefacenet']?.enabled ? 'mobilefacenet' : '', humanConfig.face['insightface']?.enabled ? 'insightface' : '');
|
||||||
|
log('loading face database...');
|
||||||
|
log('known face records:', await indexDb.count());
|
||||||
dom.retry.addEventListener('click', main);
|
dom.retry.addEventListener('click', main);
|
||||||
dom.save.addEventListener('click', saveRecords);
|
dom.save.addEventListener('click', saveRecords);
|
||||||
dom.delete.addEventListener('click', deleteRecord);
|
dom.delete.addEventListener('click', deleteRecord);
|
||||||
|
|
|
@ -3,10 +3,9 @@ let db: IDBDatabase; // instance of indexdb
|
||||||
const database = 'human';
|
const database = 'human';
|
||||||
const table = 'person';
|
const table = 'person';
|
||||||
|
|
||||||
export type FaceRecord = { id: number, name: string, descriptor: number[], image: ImageData };
|
export interface FaceRecord { id: number, name: string, descriptor: number[], image: ImageData }
|
||||||
|
|
||||||
// eslint-disable-next-line no-console
|
const log = (...msg) => console.log('indexdb', ...msg); // eslint-disable-line no-console
|
||||||
const log = (...msg) => console.log('indexdb', ...msg);
|
|
||||||
|
|
||||||
export async function open() {
|
export async function open() {
|
||||||
if (db) return true;
|
if (db) return true;
|
||||||
|
@ -19,7 +18,7 @@ export async function open() {
|
||||||
db.createObjectStore(table, { keyPath: 'id', autoIncrement: true });
|
db.createObjectStore(table, { keyPath: 'id', autoIncrement: true });
|
||||||
};
|
};
|
||||||
request.onsuccess = (evt) => { // open
|
request.onsuccess = (evt) => { // open
|
||||||
db = (evt.target as IDBOpenDBRequest).result as IDBDatabase;
|
db = (evt.target as IDBOpenDBRequest).result;
|
||||||
log('open:', db);
|
log('open:', db);
|
||||||
resolve(true);
|
resolve(true);
|
||||||
};
|
};
|
||||||
|
@ -27,7 +26,7 @@ export async function open() {
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function load(): Promise<FaceRecord[]> {
|
export async function load(): Promise<FaceRecord[]> {
|
||||||
const faceDB: Array<FaceRecord> = [];
|
const faceDB: FaceRecord[] = [];
|
||||||
if (!db) await open(); // open or create if not already done
|
if (!db) await open(); // open or create if not already done
|
||||||
return new Promise((resolve) => {
|
return new Promise((resolve) => {
|
||||||
const cursor: IDBRequest = db.transaction([table], 'readwrite').objectStore(table).openCursor(null, 'next');
|
const cursor: IDBRequest = db.transaction([table], 'readwrite').objectStore(table).openCursor(null, 'next');
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
## Browser Face Recognition Demo
|
## Browser Face Recognition Demo
|
||||||
|
|
||||||
- `demo/facematch`: Demo for Browsers that uses all face description and embedding features to
|
- `demo/facematch`: Demo for Browsers that uses all face description and embedding features to
|
||||||
detect, extract and identify all faces plus calculate simmilarity between them
|
detect, extract and identify all faces plus calculate similarity between them
|
||||||
|
|
||||||
It highlights functionality such as:
|
It highlights functionality such as:
|
||||||
|
|
||||||
|
@ -72,12 +72,13 @@ Non-linear performance that increases with number of worker threads due to commu
|
||||||
|
|
||||||
> node node-match
|
> node node-match
|
||||||
|
|
||||||
|
<!-- eslint-skip -->
|
||||||
```js
|
```js
|
||||||
2021-10-13 07:53:36 INFO: options: { dbFile: './faces.json', dbMax: 10000, threadPoolSize: 6, workerSrc: './node-match-worker.js', debug: false, minThreshold: 0.9, descLength: 1024 }
|
INFO: options: { dbFile: './faces.json', dbMax: 10000, threadPoolSize: 6, workerSrc: './node-match-worker.js', debug: false, minThreshold: 0.9, descLength: 1024 }
|
||||||
2021-10-13 07:53:36 DATA: created shared buffer: { maxDescriptors: 10000, totalBytes: 40960000, totalElements: 10240000 }
|
DATA: created shared buffer: { maxDescriptors: 10000, totalBytes: 40960000, totalElements: 10240000 }
|
||||||
2021-10-13 07:53:36 DATA: db loaded: { existingRecords: 0, newRecords: 5700 }
|
DATA: db loaded: { existingRecords: 0, newRecords: 5700 }
|
||||||
2021-10-13 07:53:36 INFO: starting worker thread pool: { totalWorkers: 6, alreadyActive: 0 }
|
INFO: starting worker thread pool: { totalWorkers: 6, alreadyActive: 0 }
|
||||||
2021-10-13 07:53:36 STATE: submitted: { matchJobs: 100, poolSize: 6, activeWorkers: 6 }
|
STATE: submitted: { matchJobs: 100, poolSize: 6, activeWorkers: 6 }
|
||||||
2021-10-13 07:53:38 STATE: { matchJobsFinished: 100, totalTimeMs: 1769, averageTimeMs: 17.69 }
|
STATE: { matchJobsFinished: 100, totalTimeMs: 1769, averageTimeMs: 17.69 }
|
||||||
2021-10-13 07:53:38 INFO: closing workers: { poolSize: 6, activeWorkers: 6 }
|
INFO: closing workers: { poolSize: 6, activeWorkers: 6 }
|
||||||
```
|
```
|
||||||
|
|
|
@ -1,18 +1,17 @@
|
||||||
// @ts-nocheck
|
|
||||||
/**
|
/**
|
||||||
* Human demo for browsers
|
* Human demo for browsers
|
||||||
*
|
*
|
||||||
* Demo for face descriptor analysis and face simmilarity analysis
|
* Demo for face descriptor analysis and face similarity analysis
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/** @type {Human} */
|
/** @type {Human} */
|
||||||
import Human from '../../dist/human.esm.js';
|
import { Human } from '../../dist/human.esm.js';
|
||||||
|
|
||||||
const userConfig = {
|
const userConfig = {
|
||||||
backend: 'humangl',
|
backend: 'humangl',
|
||||||
async: true,
|
async: true,
|
||||||
warmup: 'none',
|
warmup: 'none',
|
||||||
cacheSensitivity: 0,
|
cacheSensitivity: 0.01,
|
||||||
debug: true,
|
debug: true,
|
||||||
modelBasePath: '../../models/',
|
modelBasePath: '../../models/',
|
||||||
deallocate: true,
|
deallocate: true,
|
||||||
|
@ -23,7 +22,6 @@ const userConfig = {
|
||||||
},
|
},
|
||||||
face: {
|
face: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
// detector: { rotation: false, return: true, maxDetected: 50, iouThreshold: 0.206, minConfidence: 0.122 },
|
|
||||||
detector: { return: true, rotation: true, maxDetected: 50, iouThreshold: 0.01, minConfidence: 0.2 },
|
detector: { return: true, rotation: true, maxDetected: 50, iouThreshold: 0.01, minConfidence: 0.2 },
|
||||||
mesh: { enabled: true },
|
mesh: { enabled: true },
|
||||||
iris: { enabled: false },
|
iris: { enabled: false },
|
||||||
|
@ -46,8 +44,7 @@ const minScore = 0.4;
|
||||||
function log(...msg) {
|
function log(...msg) {
|
||||||
const dt = new Date();
|
const dt = new Date();
|
||||||
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
|
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
|
||||||
// eslint-disable-next-line no-console
|
console.log(ts, ...msg); // eslint-disable-line no-console
|
||||||
console.log(ts, ...msg);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function title(msg) {
|
function title(msg) {
|
||||||
|
@ -66,25 +63,16 @@ async function loadFaceMatchDB() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async function SelectFaceCanvas(face) {
|
async function selectFaceCanvas(face) {
|
||||||
// if we have face image tensor, enhance it and display it
|
// if we have face image tensor, enhance it and display it
|
||||||
let embedding;
|
let embedding;
|
||||||
document.getElementById('orig').style.filter = 'blur(16px)';
|
document.getElementById('orig').style.filter = 'blur(16px)';
|
||||||
if (face.tensor) {
|
if (face.tensor) {
|
||||||
title('Sorting Faces by Similarity');
|
title('Sorting Faces by Similarity');
|
||||||
const enhanced = human.enhance(face);
|
|
||||||
if (enhanced) {
|
|
||||||
const c = document.getElementById('orig');
|
const c = document.getElementById('orig');
|
||||||
const squeeze = human.tf.squeeze(enhanced);
|
await human.draw.tensor(face.tensor, c);
|
||||||
const normalize = human.tf.div(squeeze, 255);
|
|
||||||
await human.tf.browser.toPixels(normalize, c);
|
|
||||||
human.tf.dispose([enhanced, squeeze, normalize]);
|
|
||||||
const ctx = c.getContext('2d');
|
|
||||||
ctx.font = 'small-caps 0.4rem "Lato"';
|
|
||||||
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
|
|
||||||
}
|
|
||||||
const arr = db.map((rec) => rec.embedding);
|
const arr = db.map((rec) => rec.embedding);
|
||||||
const res = await human.match(face.embedding, arr);
|
const res = await human.match.find(face.embedding, arr);
|
||||||
log('Match:', db[res.index].name);
|
log('Match:', db[res.index].name);
|
||||||
const emotion = face.emotion[0] ? `${Math.round(100 * face.emotion[0].score)}% ${face.emotion[0].emotion}` : 'N/A';
|
const emotion = face.emotion[0] ? `${Math.round(100 * face.emotion[0].score)}% ${face.emotion[0].emotion}` : 'N/A';
|
||||||
document.getElementById('desc').innerHTML = `
|
document.getElementById('desc').innerHTML = `
|
||||||
|
@ -105,11 +93,11 @@ async function SelectFaceCanvas(face) {
|
||||||
for (const canvas of canvases) {
|
for (const canvas of canvases) {
|
||||||
// calculate similarity from selected face to current one in the loop
|
// calculate similarity from selected face to current one in the loop
|
||||||
const current = all[canvas.tag.sample][canvas.tag.face];
|
const current = all[canvas.tag.sample][canvas.tag.face];
|
||||||
const similarity = human.similarity(face.embedding, current.embedding);
|
const similarity = human.match.similarity(face.embedding, current.embedding);
|
||||||
canvas.tag.similarity = similarity;
|
canvas.tag.similarity = similarity;
|
||||||
// get best match
|
// get best match
|
||||||
// draw the canvas
|
// draw the canvas
|
||||||
await human.tf.browser.toPixels(current.tensor, canvas);
|
await human.draw.tensor(current.tensor, canvas);
|
||||||
const ctx = canvas.getContext('2d');
|
const ctx = canvas.getContext('2d');
|
||||||
ctx.font = 'small-caps 1rem "Lato"';
|
ctx.font = 'small-caps 1rem "Lato"';
|
||||||
ctx.fillStyle = 'rgba(0, 0, 0, 1)';
|
ctx.fillStyle = 'rgba(0, 0, 0, 1)';
|
||||||
|
@ -122,7 +110,7 @@ async function SelectFaceCanvas(face) {
|
||||||
ctx.font = 'small-caps 1rem "Lato"';
|
ctx.font = 'small-caps 1rem "Lato"';
|
||||||
const start = human.now();
|
const start = human.now();
|
||||||
const arr = db.map((rec) => rec.embedding);
|
const arr = db.map((rec) => rec.embedding);
|
||||||
const res = await human.match(current.embedding, arr);
|
const res = await human.match.find(current.embedding, arr);
|
||||||
time += (human.now() - start);
|
time += (human.now() - start);
|
||||||
if (res.similarity > minScore) ctx.fillText(`DB: ${(100 * res.similarity).toFixed(1)}% ${db[res.index].name}`, 4, canvas.height - 30);
|
if (res.similarity > minScore) ctx.fillText(`DB: ${(100 * res.similarity).toFixed(1)}% ${db[res.index].name}`, 4, canvas.height - 30);
|
||||||
}
|
}
|
||||||
|
@ -137,7 +125,7 @@ async function SelectFaceCanvas(face) {
|
||||||
title('Selected Face');
|
title('Selected Face');
|
||||||
}
|
}
|
||||||
|
|
||||||
async function AddFaceCanvas(index, res, fileName) {
|
async function addFaceCanvas(index, res, fileName) {
|
||||||
all[index] = res.face;
|
all[index] = res.face;
|
||||||
for (const i in res.face) {
|
for (const i in res.face) {
|
||||||
if (!res.face[i].tensor) continue; // did not get valid results
|
if (!res.face[i].tensor) continue; // did not get valid results
|
||||||
|
@ -156,35 +144,37 @@ async function AddFaceCanvas(index, res, fileName) {
|
||||||
gender: ${Math.round(100 * res.face[i].genderScore)}% ${res.face[i].gender}
|
gender: ${Math.round(100 * res.face[i].genderScore)}% ${res.face[i].gender}
|
||||||
emotion: ${emotion}
|
emotion: ${emotion}
|
||||||
`.replace(/ /g, ' ');
|
`.replace(/ /g, ' ');
|
||||||
await human.tf.browser.toPixels(res.face[i].tensor, canvas);
|
await human.draw.tensor(res.face[i].tensor, canvas);
|
||||||
const ctx = canvas.getContext('2d');
|
const ctx = canvas.getContext('2d');
|
||||||
if (!ctx) return false;
|
if (!ctx) return;
|
||||||
ctx.font = 'small-caps 0.8rem "Lato"';
|
ctx.font = 'small-caps 0.8rem "Lato"';
|
||||||
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
|
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
|
||||||
ctx.fillText(`${res.face[i].age}y ${(100 * (res.face[i].genderScore || 0)).toFixed(1)}% ${res.face[i].gender}`, 4, canvas.height - 6);
|
ctx.fillText(`${res.face[i].age}y ${(100 * (res.face[i].genderScore || 0)).toFixed(1)}% ${res.face[i].gender}`, 4, canvas.height - 6);
|
||||||
const arr = db.map((rec) => rec.embedding);
|
const arr = db.map((rec) => rec.embedding);
|
||||||
const result = human.match(res.face[i].embedding, arr);
|
const result = human.match.find(res.face[i].embedding, arr);
|
||||||
ctx.font = 'small-caps 1rem "Lato"';
|
ctx.font = 'small-caps 1rem "Lato"';
|
||||||
if (result.similarity && res.similarity > minScore) ctx.fillText(`${(100 * result.similarity).toFixed(1)}% ${db[result.index].name}`, 4, canvas.height - 30);
|
if (result.similarity && res.similarity > minScore) ctx.fillText(`${(100 * result.similarity).toFixed(1)}% ${db[result.index].name}`, 4, canvas.height - 30);
|
||||||
document.getElementById('faces').appendChild(canvas);
|
document.getElementById('faces').appendChild(canvas);
|
||||||
canvas.addEventListener('click', (evt) => {
|
canvas.addEventListener('click', (evt) => {
|
||||||
log('Select:', 'Image:', evt.target.tag.sample, 'Face:', evt.target.tag.face, 'Source:', evt.target.tag.source, all[evt.target.tag.sample][evt.target.tag.face]);
|
log('Select:', 'Image:', evt.target.tag.sample, 'Face:', evt.target.tag.face, 'Source:', evt.target.tag.source, all[evt.target.tag.sample][evt.target.tag.face]);
|
||||||
SelectFaceCanvas(all[evt.target.tag.sample][evt.target.tag.face]);
|
selectFaceCanvas(all[evt.target.tag.sample][evt.target.tag.face]);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async function AddImageElement(index, image, length) {
|
async function addImageElement(index, image, length) {
|
||||||
const faces = all.reduce((prev, curr) => prev += curr.length, 0);
|
const faces = all.reduce((prev, curr) => prev += curr.length, 0);
|
||||||
title(`Analyzing Input Images<br> ${Math.round(100 * index / length)}% [${index} / ${length}]<br>Found ${faces} Faces`);
|
title(`Analyzing Input Images<br> ${Math.round(100 * index / length)}% [${index} / ${length}]<br>Found ${faces} Faces`);
|
||||||
return new Promise((resolve) => {
|
return new Promise((resolve) => {
|
||||||
const img = new Image(128, 128);
|
const img = new Image(128, 128);
|
||||||
img.onload = () => { // must wait until image is loaded
|
img.onload = () => { // must wait until image is loaded
|
||||||
document.getElementById('images').appendChild(img); // and finally we can add it
|
document.getElementById('images').appendChild(img); // and finally we can add it
|
||||||
human.detect(img, userConfig).then((res) => {
|
human.detect(img, userConfig)
|
||||||
AddFaceCanvas(index, res, image); // then wait until image is analyzed
|
.then((res) => { // eslint-disable-line promise/always-return
|
||||||
|
addFaceCanvas(index, res, image); // then wait until image is analyzed
|
||||||
resolve(true);
|
resolve(true);
|
||||||
});
|
})
|
||||||
|
.catch(() => log('human detect error'));
|
||||||
};
|
};
|
||||||
img.onerror = () => {
|
img.onerror = () => {
|
||||||
log('Add image error:', index + 1, image);
|
log('Add image error:', index + 1, image);
|
||||||
|
@ -222,18 +212,23 @@ async function main() {
|
||||||
// could not dynamically enumerate images so using static list
|
// could not dynamically enumerate images so using static list
|
||||||
if (images.length === 0) {
|
if (images.length === 0) {
|
||||||
images = [
|
images = [
|
||||||
'ai-body.jpg', 'solvay1927.jpg', 'ai-upper.jpg',
|
'ai-face.jpg', 'ai-upper.jpg', 'ai-body.jpg', 'solvay1927.jpg',
|
||||||
'person-carolina.jpg', 'person-celeste.jpg', 'person-leila1.jpg', 'person-leila2.jpg', 'person-lexi.jpg', 'person-linda.jpg', 'person-nicole.jpg', 'person-tasia.jpg',
|
|
||||||
'person-tetiana.jpg', 'person-vlado1.jpg', 'person-vlado5.jpg', 'person-vlado.jpg', 'person-christina.jpg', 'person-lauren.jpg',
|
|
||||||
'group-1.jpg', 'group-2.jpg', 'group-3.jpg', 'group-4.jpg', 'group-5.jpg', 'group-6.jpg', 'group-7.jpg',
|
'group-1.jpg', 'group-2.jpg', 'group-3.jpg', 'group-4.jpg', 'group-5.jpg', 'group-6.jpg', 'group-7.jpg',
|
||||||
'daz3d-brianna.jpg', 'daz3d-chiyo.jpg', 'daz3d-cody.jpg', 'daz3d-drew-01.jpg', 'daz3d-drew-02.jpg', 'daz3d-ella-01.jpg', 'daz3d-ella-02.jpg', 'daz3d-gillian.jpg',
|
'person-celeste.jpg', 'person-christina.jpg', 'person-lauren.jpg', 'person-lexi.jpg', 'person-linda.jpg', 'person-nicole.jpg', 'person-tasia.jpg', 'person-tetiana.jpg', 'person-vlado.jpg', 'person-vlado1.jpg', 'person-vlado5.jpg',
|
||||||
'daz3d-hye-01.jpg', 'daz3d-hye-02.jpg', 'daz3d-kaia.jpg', 'daz3d-karen.jpg', 'daz3d-kiaria-01.jpg', 'daz3d-kiaria-02.jpg', 'daz3d-lilah-01.jpg', 'daz3d-lilah-02.jpg',
|
'stock-group-1.jpg', 'stock-group-2.jpg',
|
||||||
'daz3d-lilah-03.jpg', 'daz3d-lila.jpg', 'daz3d-lindsey.jpg', 'daz3d-megah.jpg', 'daz3d-selina-01.jpg', 'daz3d-selina-02.jpg', 'daz3d-snow.jpg',
|
'stock-models-1.jpg', 'stock-models-2.jpg', 'stock-models-3.jpg', 'stock-models-4.jpg', 'stock-models-5.jpg', 'stock-models-6.jpg', 'stock-models-7.jpg', 'stock-models-8.jpg', 'stock-models-9.jpg',
|
||||||
'daz3d-sunshine.jpg', 'daz3d-taia.jpg', 'daz3d-tuesday-01.jpg', 'daz3d-tuesday-02.jpg', 'daz3d-tuesday-03.jpg', 'daz3d-zoe.jpg', 'daz3d-ginnifer.jpg',
|
'stock-teen-1.jpg', 'stock-teen-2.jpg', 'stock-teen-3.jpg', 'stock-teen-4.jpg', 'stock-teen-5.jpg', 'stock-teen-6.jpg', 'stock-teen-7.jpg', 'stock-teen-8.jpg',
|
||||||
'daz3d-_emotions01.jpg', 'daz3d-_emotions02.jpg', 'daz3d-_emotions03.jpg', 'daz3d-_emotions04.jpg', 'daz3d-_emotions05.jpg',
|
'stock-models-10.jpg', 'stock-models-11.jpg', 'stock-models-12.jpg', 'stock-models-13.jpg', 'stock-models-14.jpg', 'stock-models-15.jpg', 'stock-models-16.jpg',
|
||||||
|
'cgi-model-1.jpg', 'cgi-model-2.jpg', 'cgi-model-3.jpg', 'cgi-model-4.jpg', 'cgi-model-5.jpg', 'cgi-model-6.jpg', 'cgi-model-7.jpg', 'cgi-model-8.jpg', 'cgi-model-9.jpg',
|
||||||
|
'cgi-model-10.jpg', 'cgi-model-11.jpg', 'cgi-model-12.jpg', 'cgi-model-13.jpg', 'cgi-model-14.jpg', 'cgi-model-15.jpg', 'cgi-model-18.jpg', 'cgi-model-19.jpg',
|
||||||
|
'cgi-model-20.jpg', 'cgi-model-21.jpg', 'cgi-model-22.jpg', 'cgi-model-23.jpg', 'cgi-model-24.jpg', 'cgi-model-25.jpg', 'cgi-model-26.jpg', 'cgi-model-27.jpg', 'cgi-model-28.jpg', 'cgi-model-29.jpg',
|
||||||
|
'cgi-model-30.jpg', 'cgi-model-31.jpg', 'cgi-model-33.jpg', 'cgi-model-34.jpg',
|
||||||
|
'cgi-multiangle-1.jpg', 'cgi-multiangle-2.jpg', 'cgi-multiangle-3.jpg', 'cgi-multiangle-4.jpg', 'cgi-multiangle-6.jpg', 'cgi-multiangle-7.jpg', 'cgi-multiangle-8.jpg', 'cgi-multiangle-9.jpg', 'cgi-multiangle-10.jpg', 'cgi-multiangle-11.jpg',
|
||||||
|
'stock-emotions-a-1.jpg', 'stock-emotions-a-2.jpg', 'stock-emotions-a-3.jpg', 'stock-emotions-a-4.jpg', 'stock-emotions-a-5.jpg', 'stock-emotions-a-6.jpg', 'stock-emotions-a-7.jpg', 'stock-emotions-a-8.jpg',
|
||||||
|
'stock-emotions-b-1.jpg', 'stock-emotions-b-2.jpg', 'stock-emotions-b-3.jpg', 'stock-emotions-b-4.jpg', 'stock-emotions-b-5.jpg', 'stock-emotions-b-6.jpg', 'stock-emotions-b-7.jpg', 'stock-emotions-b-8.jpg',
|
||||||
];
|
];
|
||||||
// add prefix for gitpages
|
// add prefix for gitpages
|
||||||
images = images.map((a) => `/human/samples/in/${a}`);
|
images = images.map((a) => `../../samples/in/${a}`);
|
||||||
log('Adding static image list:', images);
|
log('Adding static image list:', images);
|
||||||
} else {
|
} else {
|
||||||
log('Discovered images:', images);
|
log('Discovered images:', images);
|
||||||
|
@ -242,7 +237,7 @@ async function main() {
|
||||||
// images = ['/samples/in/person-lexi.jpg', '/samples/in/person-carolina.jpg', '/samples/in/solvay1927.jpg'];
|
// images = ['/samples/in/person-lexi.jpg', '/samples/in/person-carolina.jpg', '/samples/in/solvay1927.jpg'];
|
||||||
|
|
||||||
const t0 = human.now();
|
const t0 = human.now();
|
||||||
for (let i = 0; i < images.length; i++) await AddImageElement(i, images[i], images.length);
|
for (let i = 0; i < images.length; i++) await addImageElement(i, images[i], images.length);
|
||||||
const t1 = human.now();
|
const t1 = human.now();
|
||||||
|
|
||||||
// print stats
|
// print stats
|
||||||
|
@ -256,7 +251,7 @@ async function main() {
|
||||||
title('');
|
title('');
|
||||||
log('Ready');
|
log('Ready');
|
||||||
human.validate(userConfig);
|
human.validate(userConfig);
|
||||||
human.similarity([], []);
|
human.match.similarity([], []);
|
||||||
}
|
}
|
||||||
|
|
||||||
window.onload = main;
|
window.onload = main;
|
||||||
|
|
|
@ -1,3 +1,8 @@
|
||||||
|
/**
|
||||||
|
* Runs in a worker thread started by `node-match` demo app
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
const threads = require('worker_threads');
|
const threads = require('worker_threads');
|
||||||
|
|
||||||
let debug = false;
|
let debug = false;
|
||||||
|
@ -33,7 +38,8 @@ function match(descBuffer, options = { order: 2, multiplier: 20 }) {
|
||||||
if (best < threshold || best === 0) break; // short circuit
|
if (best < threshold || best === 0) break; // short circuit
|
||||||
}
|
}
|
||||||
best = (options.order === 2) ? Math.sqrt(best) : best ** (1 / options.order);
|
best = (options.order === 2) ? Math.sqrt(best) : best ** (1 / options.order);
|
||||||
return { index, distance: best, similarity: Math.max(0, 100 - best) / 100.0 };
|
const similarity = Math.round(100 * Math.max(0, 100 - best) / 100.0) / 100;
|
||||||
|
return { index, distance: best, similarity };
|
||||||
}
|
}
|
||||||
|
|
||||||
threads.parentPort?.on('message', (msg) => {
|
threads.parentPort?.on('message', (msg) => {
|
||||||
|
@ -47,7 +53,7 @@ threads.parentPort?.on('message', (msg) => {
|
||||||
if (msg instanceof SharedArrayBuffer) { // called only once to receive reference to shared array buffer
|
if (msg instanceof SharedArrayBuffer) { // called only once to receive reference to shared array buffer
|
||||||
buffer = msg;
|
buffer = msg;
|
||||||
view = new Float32Array(buffer); // initialize f64 view into buffer
|
view = new Float32Array(buffer); // initialize f64 view into buffer
|
||||||
if (debug) threads.parentPort?.postMessage(`buffer: ${buffer?.byteLength}`);
|
if (debug) threads.parentPort?.postMessage(`buffer: ${buffer.byteLength}`);
|
||||||
}
|
}
|
||||||
if (typeof msg.records !== 'undefined') { // recived every time when number of records changes
|
if (typeof msg.records !== 'undefined') { // recived every time when number of records changes
|
||||||
records = msg.records;
|
records = msg.records;
|
||||||
|
@ -55,15 +61,15 @@ threads.parentPort?.on('message', (msg) => {
|
||||||
}
|
}
|
||||||
if (typeof msg.debug !== 'undefined') { // set verbose logging
|
if (typeof msg.debug !== 'undefined') { // set verbose logging
|
||||||
debug = msg.debug;
|
debug = msg.debug;
|
||||||
if (debug) threads.parentPort?.postMessage(`debug: ${debug}`);
|
// if (debug) threads.parentPort?.postMessage(`debug: ${debug}`);
|
||||||
}
|
}
|
||||||
if (typeof msg.threshold !== 'undefined') { // set minimum similarity threshold
|
if (typeof msg.threshold !== 'undefined') { // set minimum similarity threshold
|
||||||
threshold = msg.threshold;
|
threshold = msg.threshold;
|
||||||
if (debug) threads.parentPort?.postMessage(`threshold: ${threshold}`);
|
// if (debug) threads.parentPort?.postMessage(`threshold: ${threshold}`);
|
||||||
}
|
}
|
||||||
if (typeof msg.shutdown !== 'undefined') { // got message to close worker
|
if (typeof msg.shutdown !== 'undefined') { // got message to close worker
|
||||||
if (debug) threads.parentPort?.postMessage('shutting down');
|
if (debug) threads.parentPort?.postMessage('shutting down');
|
||||||
process.exit(0);
|
process.exit(0); // eslint-disable-line no-process-exit
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,13 @@
|
||||||
|
/**
|
||||||
|
* Human demo app for NodeJS that generates random facial descriptors
|
||||||
|
* and uses NodeJS multi-threading to start multiple threads for face matching
|
||||||
|
* uses `node-match-worker.js` to perform actual face matching analysis
|
||||||
|
*/
|
||||||
|
|
||||||
const fs = require('fs');
|
const fs = require('fs');
|
||||||
const path = require('path');
|
const path = require('path');
|
||||||
const log = require('@vladmandic/pilogger');
|
|
||||||
const threads = require('worker_threads');
|
const threads = require('worker_threads');
|
||||||
|
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||||
|
|
||||||
// global optinos
|
// global optinos
|
||||||
const options = {
|
const options = {
|
||||||
|
@ -9,7 +15,7 @@ const options = {
|
||||||
dbMax: 10000, // maximum number of records to hold in memory
|
dbMax: 10000, // maximum number of records to hold in memory
|
||||||
threadPoolSize: 12, // number of worker threads to create in thread pool
|
threadPoolSize: 12, // number of worker threads to create in thread pool
|
||||||
workerSrc: './node-match-worker.js', // code that executes in the worker thread
|
workerSrc: './node-match-worker.js', // code that executes in the worker thread
|
||||||
debug: false, // verbose messages
|
debug: true, // verbose messages
|
||||||
minThreshold: 0.5, // match returns first record that meets the similarity threshold, set to 0 to always scan all records
|
minThreshold: 0.5, // match returns first record that meets the similarity threshold, set to 0 to always scan all records
|
||||||
descLength: 1024, // descriptor length
|
descLength: 1024, // descriptor length
|
||||||
};
|
};
|
||||||
|
@ -154,7 +160,7 @@ async function createBuffer() {
|
||||||
data.buffer = new SharedArrayBuffer(4 * options.dbMax * options.descLength); // preallocate max number of records as sharedarraybuffers cannot grow
|
data.buffer = new SharedArrayBuffer(4 * options.dbMax * options.descLength); // preallocate max number of records as sharedarraybuffers cannot grow
|
||||||
data.view = new Float32Array(data.buffer); // create view into buffer
|
data.view = new Float32Array(data.buffer); // create view into buffer
|
||||||
data.labels.length = 0;
|
data.labels.length = 0;
|
||||||
log.data('created shared buffer:', { maxDescriptors: (data.view?.length || 0) / options.descLength, totalBytes: data.buffer.byteLength, totalElements: data.view?.length });
|
log.data('created shared buffer:', { maxDescriptors: (data.view.length || 0) / options.descLength, totalBytes: data.buffer.byteLength, totalElements: data.view.length });
|
||||||
}
|
}
|
||||||
|
|
||||||
async function main() {
|
async function main() {
|
||||||
|
@ -170,7 +176,7 @@ async function main() {
|
||||||
data.requestID++; // increase request id
|
data.requestID++; // increase request id
|
||||||
if (testOptions.fuzDescriptors) match(fuzDescriptor(descriptor)); // fuz descriptor for harder match
|
if (testOptions.fuzDescriptors) match(fuzDescriptor(descriptor)); // fuz descriptor for harder match
|
||||||
else match(descriptor);
|
else match(descriptor);
|
||||||
if (options.debug) log.info('submited job', data.requestID); // we already know what we're searching for so we can compare results
|
if (options.debug) log.debug('submited job', data.requestID); // we already know what we're searching for so we can compare results
|
||||||
}
|
}
|
||||||
log.state('submitted:', { matchJobs: testOptions.maxJobs, poolSize: data.workers.length, activeWorkers: data.workers.filter((worker) => !!worker).length });
|
log.state('submitted:', { matchJobs: testOptions.maxJobs, poolSize: data.workers.length, activeWorkers: data.workers.filter((worker) => !!worker).length });
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
// @ts-nocheck
|
|
||||||
// based on: https://github.com/munrocket/gl-bench
|
// based on: https://github.com/munrocket/gl-bench
|
||||||
|
|
||||||
const UICSS = `
|
const UICSS = `
|
||||||
|
@ -43,9 +42,7 @@ class GLBench {
|
||||||
constructor(gl, settings = {}) {
|
constructor(gl, settings = {}) {
|
||||||
this.css = UICSS;
|
this.css = UICSS;
|
||||||
this.svg = UISVG;
|
this.svg = UISVG;
|
||||||
// eslint-disable-next-line @typescript-eslint/no-empty-function
|
|
||||||
this.paramLogger = () => {};
|
this.paramLogger = () => {};
|
||||||
// eslint-disable-next-line @typescript-eslint/no-empty-function
|
|
||||||
this.chartLogger = () => {};
|
this.chartLogger = () => {};
|
||||||
this.chartLen = 20;
|
this.chartLen = 20;
|
||||||
this.chartHz = 20;
|
this.chartHz = 20;
|
||||||
|
@ -92,7 +89,6 @@ class GLBench {
|
||||||
|
|
||||||
const addProfiler = (fn, self, target) => {
|
const addProfiler = (fn, self, target) => {
|
||||||
const t = self.now();
|
const t = self.now();
|
||||||
// eslint-disable-next-line prefer-rest-params
|
|
||||||
fn.apply(target, arguments);
|
fn.apply(target, arguments);
|
||||||
if (self.trackGPU) self.finished.push(glFinish(t, self.activeAccums.slice(0)));
|
if (self.trackGPU) self.finished.push(glFinish(t, self.activeAccums.slice(0)));
|
||||||
};
|
};
|
||||||
|
@ -107,13 +103,11 @@ class GLBench {
|
||||||
if (gl[fn]) {
|
if (gl[fn]) {
|
||||||
gl[fn] = addProfiler(gl[fn], this, gl);
|
gl[fn] = addProfiler(gl[fn], this, gl);
|
||||||
} else {
|
} else {
|
||||||
// eslint-disable-next-line no-console
|
|
||||||
console.log('bench: cannot attach to webgl function');
|
console.log('bench: cannot attach to webgl function');
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
gl.getExtension = ((fn, self) => {
|
gl.getExtension = ((fn, self) => {
|
||||||
// eslint-disable-next-line prefer-rest-params
|
|
||||||
const ext = fn.apply(gl, arguments);
|
const ext = fn.apply(gl, arguments);
|
||||||
if (ext) {
|
if (ext) {
|
||||||
['drawElementsInstancedANGLE', 'drawBuffersWEBGL'].forEach((fn2) => {
|
['drawElementsInstancedANGLE', 'drawBuffersWEBGL'].forEach((fn2) => {
|
||||||
|
@ -148,7 +142,6 @@ class GLBench {
|
||||||
return (i, cpu, gpu, mem, fps, totalTime, frameId) => {
|
return (i, cpu, gpu, mem, fps, totalTime, frameId) => {
|
||||||
nodes['gl-cpu'][i].style.strokeDasharray = (cpu * 0.27).toFixed(0) + ' 100';
|
nodes['gl-cpu'][i].style.strokeDasharray = (cpu * 0.27).toFixed(0) + ' 100';
|
||||||
nodes['gl-gpu'][i].style.strokeDasharray = (gpu * 0.27).toFixed(0) + ' 100';
|
nodes['gl-gpu'][i].style.strokeDasharray = (gpu * 0.27).toFixed(0) + ' 100';
|
||||||
// eslint-disable-next-line no-nested-ternary
|
|
||||||
nodes['gl-mem'][i].innerHTML = names[i] ? names[i] : (mem ? 'mem: ' + mem.toFixed(0) + 'mb' : '');
|
nodes['gl-mem'][i].innerHTML = names[i] ? names[i] : (mem ? 'mem: ' + mem.toFixed(0) + 'mb' : '');
|
||||||
nodes['gl-fps'][i].innerHTML = 'FPS: ' + fps.toFixed(1);
|
nodes['gl-fps'][i].innerHTML = 'FPS: ' + fps.toFixed(1);
|
||||||
logger(names[i], cpu, gpu, mem, fps, totalTime, frameId);
|
logger(names[i], cpu, gpu, mem, fps, totalTime, frameId);
|
||||||
|
|
|
@ -64,9 +64,7 @@ function createNode() {
|
||||||
hideChildren() {
|
hideChildren() {
|
||||||
if (Array.isArray(this.children)) {
|
if (Array.isArray(this.children)) {
|
||||||
this.children.forEach((item) => {
|
this.children.forEach((item) => {
|
||||||
// @ts-ignore
|
|
||||||
item['elem']['classList'].add('hide');
|
item['elem']['classList'].add('hide');
|
||||||
// @ts-ignore
|
|
||||||
if (item['expanded']) item.hideChildren();
|
if (item['expanded']) item.hideChildren();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -74,9 +72,7 @@ function createNode() {
|
||||||
showChildren() {
|
showChildren() {
|
||||||
if (Array.isArray(this.children)) {
|
if (Array.isArray(this.children)) {
|
||||||
this.children.forEach((item) => {
|
this.children.forEach((item) => {
|
||||||
// @ts-ignore
|
|
||||||
item['elem']['classList'].remove('hide');
|
item['elem']['classList'].remove('hide');
|
||||||
// @ts-ignore
|
|
||||||
if (item['expanded']) item.showChildren();
|
if (item['expanded']) item.showChildren();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -119,7 +119,6 @@ class Menu {
|
||||||
|
|
||||||
this.menu.appendChild(this.container);
|
this.menu.appendChild(this.container);
|
||||||
if (typeof parent === 'object') parent.appendChild(this.menu);
|
if (typeof parent === 'object') parent.appendChild(this.menu);
|
||||||
// @ts-ignore undefined
|
|
||||||
else document.getElementById(parent).appendChild(this.menu);
|
else document.getElementById(parent).appendChild(this.menu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -184,7 +183,6 @@ class Menu {
|
||||||
this.hidden = !this.hidden;
|
this.hidden = !this.hidden;
|
||||||
const all = document.getElementsByClassName('menu');
|
const all = document.getElementsByClassName('menu');
|
||||||
for (const item of all) {
|
for (const item of all) {
|
||||||
// @ts-ignore
|
|
||||||
item.style.display = this.hidden ? 'none' : 'block';
|
item.style.display = this.hidden ? 'none' : 'block';
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -241,7 +239,6 @@ class Menu {
|
||||||
el.addEventListener('change', (evt) => {
|
el.addEventListener('change', (evt) => {
|
||||||
if (evt.target) {
|
if (evt.target) {
|
||||||
object[variable] = parseInt(evt.target['value']) === parseFloat(evt.target['value']) ? parseInt(evt.target['value']) : parseFloat(evt.target['value']);
|
object[variable] = parseInt(evt.target['value']) === parseFloat(evt.target['value']) ? parseInt(evt.target['value']) : parseFloat(evt.target['value']);
|
||||||
// @ts-ignore
|
|
||||||
evt.target.setAttribute('value', evt.target['value']);
|
evt.target.setAttribute('value', evt.target['value']);
|
||||||
if (callback) callback(evt.target['value']);
|
if (callback) callback(evt.target['value']);
|
||||||
}
|
}
|
||||||
|
@ -286,7 +283,6 @@ class Menu {
|
||||||
return el;
|
return el;
|
||||||
}
|
}
|
||||||
|
|
||||||
// eslint-disable-next-line class-methods-use-this
|
|
||||||
updateValue(title, val, suffix = '') {
|
updateValue(title, val, suffix = '') {
|
||||||
const el = document.getElementById(`menu-val-${title}`);
|
const el = document.getElementById(`menu-val-${title}`);
|
||||||
if (el) el.innerText = `${title}: ${val}${suffix}`;
|
if (el) el.innerText = `${title}: ${val}${suffix}`;
|
||||||
|
@ -303,11 +299,9 @@ class Menu {
|
||||||
return el;
|
return el;
|
||||||
}
|
}
|
||||||
|
|
||||||
// eslint-disable-next-line class-methods-use-this
|
|
||||||
async updateChart(id, values) {
|
async updateChart(id, values) {
|
||||||
if (!values || (values.length === 0)) return;
|
if (!values || (values.length === 0)) return;
|
||||||
/** @type {HTMLCanvasElement} */
|
/** @type {HTMLCanvasElement} */
|
||||||
// @ts-ignore undefined
|
|
||||||
const canvas = document.getElementById(`menu-canvas-${id}`);
|
const canvas = document.getElementById(`menu-canvas-${id}`);
|
||||||
if (!canvas) return;
|
if (!canvas) return;
|
||||||
const ctx = canvas.getContext('2d');
|
const ctx = canvas.getContext('2d');
|
||||||
|
|
|
@ -4,8 +4,7 @@ async function log(...msg) {
|
||||||
if (debug) {
|
if (debug) {
|
||||||
const dt = new Date();
|
const dt = new Date();
|
||||||
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
|
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
|
||||||
// eslint-disable-next-line no-console
|
console.log(ts, 'webrtc', ...msg); // eslint-disable-line no-console
|
||||||
console.log(ts, 'webrtc', ...msg);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
* PWA Service Worker for Human main demo
|
* PWA Service Worker for Human main demo
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/* eslint-disable no-restricted-globals */
|
||||||
/// <reference lib="webworker" />
|
/// <reference lib="webworker" />
|
||||||
|
|
||||||
const skipCaching = false;
|
const skipCaching = false;
|
||||||
|
@ -19,8 +20,7 @@ const stats = { hit: 0, miss: 0 };
|
||||||
const log = (...msg) => {
|
const log = (...msg) => {
|
||||||
const dt = new Date();
|
const dt = new Date();
|
||||||
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
|
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
|
||||||
// eslint-disable-next-line no-console
|
console.log(ts, 'pwa', ...msg); // eslint-disable-line no-console
|
||||||
console.log(ts, 'pwa', ...msg);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
async function updateCached(req) {
|
async function updateCached(req) {
|
||||||
|
@ -31,7 +31,7 @@ async function updateCached(req) {
|
||||||
caches
|
caches
|
||||||
.open(cacheName)
|
.open(cacheName)
|
||||||
.then((cache) => cache.put(req, update))
|
.then((cache) => cache.put(req, update))
|
||||||
.catch((err) => log('cache update error', err));
|
.catch((err) => log('cache update error', err)); // eslint-disable-line promise/no-nesting
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
})
|
})
|
||||||
|
@ -75,14 +75,13 @@ async function getCached(evt) {
|
||||||
}
|
}
|
||||||
|
|
||||||
function cacheInit() {
|
function cacheInit() {
|
||||||
// eslint-disable-next-line promise/catch-or-return
|
|
||||||
caches.open(cacheName)
|
caches.open(cacheName)
|
||||||
// eslint-disable-next-line promise/no-nesting
|
|
||||||
.then((cache) => cache.addAll(cacheFiles)
|
.then((cache) => cache.addAll(cacheFiles)
|
||||||
.then(
|
.then( // eslint-disable-line promise/no-nesting
|
||||||
() => log('cache refresh:', cacheFiles.length, 'files'),
|
() => log('cache refresh:', cacheFiles.length, 'files'),
|
||||||
(err) => log('cache error', err),
|
(err) => log('cache error', err),
|
||||||
));
|
))
|
||||||
|
.catch(() => log('cache error'));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!listening) {
|
if (!listening) {
|
||||||
|
@ -99,14 +98,12 @@ if (!listening) {
|
||||||
|
|
||||||
self.addEventListener('install', (evt) => {
|
self.addEventListener('install', (evt) => {
|
||||||
log('install');
|
log('install');
|
||||||
// @ts-ignore scope for self is ServiceWorkerGlobalScope not Window
|
|
||||||
self.skipWaiting();
|
self.skipWaiting();
|
||||||
evt.waitUntil(cacheInit);
|
evt.waitUntil(cacheInit);
|
||||||
});
|
});
|
||||||
|
|
||||||
self.addEventListener('activate', (evt) => {
|
self.addEventListener('activate', (evt) => {
|
||||||
log('activate');
|
log('activate');
|
||||||
// @ts-ignore scope for self is ServiceWorkerGlobalScope not Window
|
|
||||||
evt.waitUntil(self.clients.claim());
|
evt.waitUntil(self.clients.claim());
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -114,7 +111,7 @@ if (!listening) {
|
||||||
const uri = new URL(evt.request.url);
|
const uri = new URL(evt.request.url);
|
||||||
// if (uri.pathname === '/') { log('cache skip /', evt.request); return; } // skip root access requests
|
// if (uri.pathname === '/') { log('cache skip /', evt.request); return; } // skip root access requests
|
||||||
if (evt.request.cache === 'only-if-cached' && evt.request.mode !== 'same-origin') return; // required due to chrome bug
|
if (evt.request.cache === 'only-if-cached' && evt.request.mode !== 'same-origin') return; // required due to chrome bug
|
||||||
if (uri.origin !== location.origin) return; // skip non-local requests
|
if (uri.origin !== self.location.origin) return; // skip non-local requests
|
||||||
if (evt.request.method !== 'GET') return; // only cache get requests
|
if (evt.request.method !== 'GET') return; // only cache get requests
|
||||||
if (evt.request.url.includes('/api/')) return; // don't cache api requests, failures are handled at the time of call
|
if (evt.request.url.includes('/api/')) return; // don't cache api requests, failures are handled at the time of call
|
||||||
|
|
||||||
|
@ -129,7 +126,7 @@ if (!listening) {
|
||||||
log(`PWA: ${evt.type}`);
|
log(`PWA: ${evt.type}`);
|
||||||
if (refreshed) return;
|
if (refreshed) return;
|
||||||
refreshed = true;
|
refreshed = true;
|
||||||
location.reload();
|
self.location.reload();
|
||||||
});
|
});
|
||||||
|
|
||||||
listening = true;
|
listening = true;
|
||||||
|
|
|
@ -6,10 +6,9 @@
|
||||||
/// <reference lib="webworker"/>
|
/// <reference lib="webworker"/>
|
||||||
|
|
||||||
// load Human using IIFE script as Chome Mobile does not support Modules as Workers
|
// load Human using IIFE script as Chome Mobile does not support Modules as Workers
|
||||||
self.importScripts('../dist/human.js');
|
self.importScripts('../dist/human.js'); // eslint-disable-line no-restricted-globals
|
||||||
|
|
||||||
let busy = false;
|
let busy = false;
|
||||||
// @ts-ignore
|
|
||||||
// eslint-disable-next-line new-cap, no-undef
|
// eslint-disable-next-line new-cap, no-undef
|
||||||
const human = new Human.default();
|
const human = new Human.default();
|
||||||
|
|
||||||
|
|
|
@ -35,7 +35,7 @@
|
||||||
.video { display: none; }
|
.video { display: none; }
|
||||||
.canvas { margin: 0 auto; }
|
.canvas { margin: 0 auto; }
|
||||||
.bench { position: absolute; right: 0; bottom: 0; }
|
.bench { position: absolute; right: 0; bottom: 0; }
|
||||||
.compare-image { width: 256px; position: absolute; top: 150px; left: 30px; box-shadow: 0 0 2px 2px black; background: black; display: none; }
|
.compare-image { width: 200px; position: absolute; top: 150px; left: 30px; box-shadow: 0 0 2px 2px black; background: black; display: none; }
|
||||||
.loader { width: 300px; height: 300px; border: 3px solid transparent; border-radius: 50%; border-top: 4px solid #f15e41; animation: spin 4s linear infinite; position: absolute; bottom: 15%; left: 50%; margin-left: -150px; z-index: 15; }
|
.loader { width: 300px; height: 300px; border: 3px solid transparent; border-radius: 50%; border-top: 4px solid #f15e41; animation: spin 4s linear infinite; position: absolute; bottom: 15%; left: 50%; margin-left: -150px; z-index: 15; }
|
||||||
.loader::before, .loader::after { content: ""; position: absolute; top: 6px; bottom: 6px; left: 6px; right: 6px; border-radius: 50%; border: 4px solid transparent; }
|
.loader::before, .loader::after { content: ""; position: absolute; top: 6px; bottom: 6px; left: 6px; right: 6px; border-radius: 50%; border: 4px solid transparent; }
|
||||||
.loader::before { border-top-color: #bad375; animation: 3s spin linear infinite; }
|
.loader::before { border-top-color: #bad375; animation: 3s spin linear infinite; }
|
||||||
|
@ -89,9 +89,9 @@
|
||||||
<body>
|
<body>
|
||||||
<div id="play" class="play icon-play"></div>
|
<div id="play" class="play icon-play"></div>
|
||||||
<div id="background">
|
<div id="background">
|
||||||
<div class='wave one'></div>
|
<div class="wave one"></div>
|
||||||
<div class='wave two'></div>
|
<div class="wave two"></div>
|
||||||
<div class='wave three'></div>
|
<div class="wave three"></div>
|
||||||
</div>
|
</div>
|
||||||
<div id="loader" class="loader"></div>
|
<div id="loader" class="loader"></div>
|
||||||
<div id="status" class="status"></div>
|
<div id="status" class="status"></div>
|
||||||
|
@ -107,13 +107,9 @@
|
||||||
<video id="video" playsinline class="video"></video>
|
<video id="video" playsinline class="video"></video>
|
||||||
</div>
|
</div>
|
||||||
<div id="compare-container" class="compare-image">
|
<div id="compare-container" class="compare-image">
|
||||||
<canvas id="compare-canvas" width="256" height="256"></canvas>
|
<canvas id="compare-canvas" width="200" height="200"></canvas>
|
||||||
<div id="similarity"></div>
|
<div id="similarity"></div>
|
||||||
</div>
|
</div>
|
||||||
<div id="segmentation-container" class="compare-image">
|
|
||||||
<canvas id="segmentation-mask" width="256" height="256" style="width: 256px; height: 256px;"></canvas>
|
|
||||||
<canvas id="segmentation-canvas" width="256" height="256" style="width: 256px; height: 256px;"></canvas>
|
|
||||||
</div>
|
|
||||||
<div id="samples-container" class="samples-container"></div>
|
<div id="samples-container" class="samples-container"></div>
|
||||||
<div id="hint" class="hint"></div>
|
<div id="hint" class="hint"></div>
|
||||||
<div id="log" class="log"></div>
|
<div id="log" class="log"></div>
|
||||||
|
|
169
demo/index.js
|
@ -18,11 +18,12 @@
|
||||||
* ui={}: contains all variables exposed in the UI
|
* ui={}: contains all variables exposed in the UI
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// test url <https://human.local/?worker=false&async=false&bench=false&draw=true&warmup=full&backend=humangl>
|
// WARNING!!!
|
||||||
|
// This demo is written using older code style and a lot of manual setup
|
||||||
|
// Newer versions of Human have richer functionality allowing for much cleaner & easier usage
|
||||||
|
// It is recommended to use other demos such as `demo/typescript` for usage examples
|
||||||
|
|
||||||
// @ts-nocheck // typescript checks disabled as this is pure javascript
|
import { Human } from '../dist/human.esm.js'; // equivalent of @vladmandic/human
|
||||||
|
|
||||||
import Human from '../dist/human.esm.js'; // equivalent of @vladmandic/human
|
|
||||||
import Menu from './helpers/menu.js';
|
import Menu from './helpers/menu.js';
|
||||||
import GLBench from './helpers/gl-bench.js';
|
import GLBench from './helpers/gl-bench.js';
|
||||||
import webRTC from './helpers/webrtc.js';
|
import webRTC from './helpers/webrtc.js';
|
||||||
|
@ -36,7 +37,7 @@ let userConfig = {
|
||||||
// hand: { enabled: false },
|
// hand: { enabled: false },
|
||||||
/*
|
/*
|
||||||
warmup: 'none',
|
warmup: 'none',
|
||||||
backend: 'humangl',
|
backend: 'webgl',
|
||||||
debug: true,
|
debug: true,
|
||||||
wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.9.0/dist/',
|
wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.9.0/dist/',
|
||||||
async: false,
|
async: false,
|
||||||
|
@ -56,6 +57,12 @@ let userConfig = {
|
||||||
// body: { enabled: true, modelPath: 'movenet-multipose.json' },
|
// body: { enabled: true, modelPath: 'movenet-multipose.json' },
|
||||||
segmentation: { enabled: false },
|
segmentation: { enabled: false },
|
||||||
*/
|
*/
|
||||||
|
/*
|
||||||
|
face: { iris: { enabled: false }, emotion: { enabled: false } },
|
||||||
|
hand: { enabled: false },
|
||||||
|
body: { enabled: false },
|
||||||
|
gesture: { enabled: false },
|
||||||
|
*/
|
||||||
};
|
};
|
||||||
|
|
||||||
const drawOptions = {
|
const drawOptions = {
|
||||||
|
@ -79,7 +86,7 @@ const ui = {
|
||||||
facing: true, // camera facing front or back
|
facing: true, // camera facing front or back
|
||||||
baseBackground: 'rgba(50, 50, 50, 1)', // 'grey'
|
baseBackground: 'rgba(50, 50, 50, 1)', // 'grey'
|
||||||
columns: 2, // when processing sample images create this many columns
|
columns: 2, // when processing sample images create this many columns
|
||||||
useWorker: true, // use web workers for processing
|
useWorker: false, // use web workers for processing
|
||||||
worker: 'index-worker.js',
|
worker: 'index-worker.js',
|
||||||
maxFPSframes: 10, // keep fps history for how many frames
|
maxFPSframes: 10, // keep fps history for how many frames
|
||||||
modelsPreload: false, // preload human models on startup
|
modelsPreload: false, // preload human models on startup
|
||||||
|
@ -107,7 +114,6 @@ const ui = {
|
||||||
results: false, // show results tree
|
results: false, // show results tree
|
||||||
lastFrame: 0, // time of last frame processing
|
lastFrame: 0, // time of last frame processing
|
||||||
viewportSet: false, // internal, has custom viewport been set
|
viewportSet: false, // internal, has custom viewport been set
|
||||||
background: null, // holds instance of segmentation background image
|
|
||||||
transferCanvas: null, // canvas used to transfer data to and from worker
|
transferCanvas: null, // canvas used to transfer data to and from worker
|
||||||
|
|
||||||
// webrtc
|
// webrtc
|
||||||
|
@ -147,7 +153,7 @@ let bench;
|
||||||
let lastDetectedResult = {};
|
let lastDetectedResult = {};
|
||||||
|
|
||||||
// helper function: async pause
|
// helper function: async pause
|
||||||
// eslint-disable-next-line @typescript-eslint/no-unused-vars, no-unused-vars
|
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
|
||||||
const delay = (ms) => new Promise((resolve) => { setTimeout(resolve, ms); });
|
const delay = (ms) => new Promise((resolve) => { setTimeout(resolve, ms); });
|
||||||
|
|
||||||
// helper function: translates json to human readable string
|
// helper function: translates json to human readable string
|
||||||
|
@ -165,8 +171,7 @@ function str(...msg) {
|
||||||
function log(...msg) {
|
function log(...msg) {
|
||||||
const dt = new Date();
|
const dt = new Date();
|
||||||
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
|
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
|
||||||
// eslint-disable-next-line no-console
|
if (ui.console) console.log(ts, ...msg); // eslint-disable-line no-console
|
||||||
if (ui.console) console.log(ts, ...msg);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let prevStatus = '';
|
let prevStatus = '';
|
||||||
|
@ -180,17 +185,16 @@ function status(msg) {
|
||||||
prevStatus = msg;
|
prevStatus = msg;
|
||||||
} else {
|
} else {
|
||||||
const video = document.getElementById('video');
|
const video = document.getElementById('video');
|
||||||
const playing = (video.srcObject !== null) && !video.paused;
|
const playing = isLive(video) && !video.paused; // eslint-disable-line no-use-before-define
|
||||||
document.getElementById('play').style.display = playing ? 'none' : 'block';
|
document.getElementById('play').style.display = playing ? 'none' : 'block';
|
||||||
document.getElementById('loader').style.display = 'none';
|
document.getElementById('loader').style.display = 'none';
|
||||||
div.innerText = '';
|
div.innerText = '';
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async function videoPlay() {
|
async function videoPlay(videoElement = document.getElementById('video')) {
|
||||||
document.getElementById('btnStartText').innerHTML = 'pause video';
|
document.getElementById('btnStartText').innerHTML = 'pause video';
|
||||||
await document.getElementById('video').play();
|
await videoElement.play();
|
||||||
// status();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async function videoPause() {
|
async function videoPause() {
|
||||||
|
@ -203,29 +207,28 @@ async function videoPause() {
|
||||||
|
|
||||||
const compare = { enabled: false, original: null };
|
const compare = { enabled: false, original: null };
|
||||||
async function calcSimmilarity(result) {
|
async function calcSimmilarity(result) {
|
||||||
|
document.getElementById('compare-container').onclick = () => {
|
||||||
|
log('resetting face compare baseline:');
|
||||||
|
compare.original = null;
|
||||||
|
};
|
||||||
document.getElementById('compare-container').style.display = compare.enabled ? 'block' : 'none';
|
document.getElementById('compare-container').style.display = compare.enabled ? 'block' : 'none';
|
||||||
if (!compare.enabled) return;
|
if (!compare.enabled) {
|
||||||
|
compare.original = null;
|
||||||
|
return;
|
||||||
|
}
|
||||||
if (!result || !result.face || !result.face[0] || !result.face[0].embedding) return;
|
if (!result || !result.face || !result.face[0] || !result.face[0].embedding) return;
|
||||||
if (!(result.face.length > 0) || (result.face[0].embedding.length <= 64)) return;
|
if (!(result.face.length > 0) || (result.face[0].embedding.length <= 64)) return;
|
||||||
if (!compare.original) {
|
if (!compare.original) {
|
||||||
compare.original = result;
|
compare.original = result;
|
||||||
log('setting face compare baseline:', result.face[0]);
|
log('setting face compare baseline:', result.face[0]);
|
||||||
if (result.face[0].tensor) {
|
if (result.face[0].tensor) {
|
||||||
const enhanced = human.enhance(result.face[0]);
|
|
||||||
if (enhanced) {
|
|
||||||
const c = document.getElementById('orig');
|
const c = document.getElementById('orig');
|
||||||
const squeeze = human.tf.squeeze(enhanced);
|
human.draw.tensor(result.face[0].tensor, c);
|
||||||
const norm = human.tf.div(squeeze, 255);
|
|
||||||
human.tf.browser.toPixels(norm, c);
|
|
||||||
human.tf.dispose(enhanced);
|
|
||||||
human.tf.dispose(squeeze);
|
|
||||||
human.tf.dispose(norm);
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
document.getElementById('compare-canvas').getContext('2d').drawImage(compare.original.canvas, 0, 0, 200, 200);
|
document.getElementById('compare-canvas').getContext('2d').drawImage(compare.original.canvas, 0, 0, 200, 200);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
const similarity = human.similarity(compare.original.face[0].embedding, result.face[0].embedding);
|
const similarity = human.match.similarity(compare.original.face[0].embedding, result.face[0].embedding);
|
||||||
document.getElementById('similarity').innerText = `similarity: ${Math.trunc(1000 * similarity) / 10}%`;
|
document.getElementById('similarity').innerText = `similarity: ${Math.trunc(1000 * similarity) / 10}%`;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -253,21 +256,7 @@ async function drawResults(input) {
|
||||||
// draw fps chart
|
// draw fps chart
|
||||||
await menu.process.updateChart('FPS', ui.detectFPS);
|
await menu.process.updateChart('FPS', ui.detectFPS);
|
||||||
|
|
||||||
document.getElementById('segmentation-container').style.display = userConfig.segmentation.enabled ? 'block' : 'none';
|
if (!result.canvas || ui.buffered) { // refresh with input if using buffered output or if missing canvas
|
||||||
if (userConfig.segmentation.enabled && ui.buffered) { // refresh segmentation if using buffered output
|
|
||||||
const seg = await human.segmentation(input, ui.background);
|
|
||||||
if (seg.alpha) {
|
|
||||||
const canvasSegMask = document.getElementById('segmentation-mask');
|
|
||||||
const ctxSegMask = canvasSegMask.getContext('2d');
|
|
||||||
ctxSegMask.clearRect(0, 0, canvasSegMask.width, canvasSegMask.height); // need to clear as seg.alpha is alpha based canvas so it adds
|
|
||||||
ctxSegMask.drawImage(seg.alpha, 0, 0, seg.alpha.width, seg.alpha.height, 0, 0, canvasSegMask.width, canvasSegMask.height);
|
|
||||||
const canvasSegCanvas = document.getElementById('segmentation-canvas');
|
|
||||||
const ctxSegCanvas = canvasSegCanvas.getContext('2d');
|
|
||||||
ctxSegCanvas.clearRect(0, 0, canvasSegCanvas.width, canvasSegCanvas.height); // need to clear as seg.alpha is alpha based canvas so it adds
|
|
||||||
ctxSegCanvas.drawImage(seg.canvas, 0, 0, seg.alpha.width, seg.alpha.height, 0, 0, canvasSegCanvas.width, canvasSegCanvas.height);
|
|
||||||
}
|
|
||||||
// result.canvas = seg.alpha;
|
|
||||||
} else if (!result.canvas || ui.buffered) { // refresh with input if using buffered output or if missing canvas
|
|
||||||
const image = await human.image(input, false);
|
const image = await human.image(input, false);
|
||||||
result.canvas = image.canvas;
|
result.canvas = image.canvas;
|
||||||
human.tf.dispose(image.tensor);
|
human.tf.dispose(image.tensor);
|
||||||
|
@ -336,14 +325,12 @@ async function drawResults(input) {
|
||||||
videoPause();
|
videoPause();
|
||||||
ui.drawThread = null;
|
ui.drawThread = null;
|
||||||
}
|
}
|
||||||
} else {
|
} else if (ui.drawThread) {
|
||||||
if (ui.drawThread) {
|
|
||||||
log('stopping buffered refresh');
|
log('stopping buffered refresh');
|
||||||
cancelAnimationFrame(ui.drawThread);
|
cancelAnimationFrame(ui.drawThread);
|
||||||
ui.drawThread = null;
|
ui.drawThread = null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// setup webcam
|
// setup webcam
|
||||||
let initialCameraAccess = true;
|
let initialCameraAccess = true;
|
||||||
|
@ -432,8 +419,7 @@ async function setupCamera() {
|
||||||
ui.menuWidth.input.setAttribute('value', video.videoWidth);
|
ui.menuWidth.input.setAttribute('value', video.videoWidth);
|
||||||
ui.menuHeight.input.setAttribute('value', video.videoHeight);
|
ui.menuHeight.input.setAttribute('value', video.videoHeight);
|
||||||
if (live || ui.autoPlay) await videoPlay();
|
if (live || ui.autoPlay) await videoPlay();
|
||||||
// eslint-disable-next-line no-use-before-define
|
if ((live || ui.autoPlay) && !ui.detectThread) runHumanDetect(video, canvas); // eslint-disable-line no-use-before-define
|
||||||
if ((live || ui.autoPlay) && !ui.detectThread) runHumanDetect(video, canvas);
|
|
||||||
return 'camera stream ready';
|
return 'camera stream ready';
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -487,8 +473,7 @@ function webWorker(input, image, canvas, timestamp) {
|
||||||
ui.framesDetect++;
|
ui.framesDetect++;
|
||||||
if (!ui.drawThread) drawResults(input);
|
if (!ui.drawThread) drawResults(input);
|
||||||
if (isLive(input)) {
|
if (isLive(input)) {
|
||||||
// eslint-disable-next-line no-use-before-define
|
ui.detectThread = requestAnimationFrame((now) => runHumanDetect(input, canvas, now)); // eslint-disable-line no-use-before-define
|
||||||
ui.detectThread = requestAnimationFrame((now) => runHumanDetect(input, canvas, now));
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -525,19 +510,9 @@ function runHumanDetect(input, canvas, timestamp) {
|
||||||
// perform detection in worker
|
// perform detection in worker
|
||||||
webWorker(input, data, canvas, timestamp);
|
webWorker(input, data, canvas, timestamp);
|
||||||
} else {
|
} else {
|
||||||
human.detect(input, userConfig).then((result) => {
|
human.detect(input, userConfig)
|
||||||
|
.then((result) => {
|
||||||
status();
|
status();
|
||||||
/*
|
|
||||||
setTimeout(async () => { // simulate gl context lost 2sec after initial detection
|
|
||||||
const ext = human.gl && human.gl.gl ? human.gl.gl.getExtension('WEBGL_lose_context') : {};
|
|
||||||
if (ext && ext.loseContext) {
|
|
||||||
log('simulate context lost:', human.env.webgl, human.gl, ext);
|
|
||||||
human.gl.gl.getExtension('WEBGL_lose_context').loseContext();
|
|
||||||
await videoPause();
|
|
||||||
status('Exception: WebGL');
|
|
||||||
}
|
|
||||||
}, 2000);
|
|
||||||
*/
|
|
||||||
if (result.performance && result.performance.total) ui.detectFPS.push(1000 / result.performance.total);
|
if (result.performance && result.performance.total) ui.detectFPS.push(1000 / result.performance.total);
|
||||||
if (ui.detectFPS.length > ui.maxFPSframes) ui.detectFPS.shift();
|
if (ui.detectFPS.length > ui.maxFPSframes) ui.detectFPS.shift();
|
||||||
if (ui.bench) {
|
if (ui.bench) {
|
||||||
|
@ -554,7 +529,9 @@ function runHumanDetect(input, canvas, timestamp) {
|
||||||
ui.framesDetect++;
|
ui.framesDetect++;
|
||||||
ui.detectThread = requestAnimationFrame((now) => runHumanDetect(input, canvas, now));
|
ui.detectThread = requestAnimationFrame((now) => runHumanDetect(input, canvas, now));
|
||||||
}
|
}
|
||||||
});
|
return result;
|
||||||
|
})
|
||||||
|
.catch(() => log('human detect error'));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -601,8 +578,7 @@ async function processImage(input, title) {
|
||||||
// copy to clipboard on click
|
// copy to clipboard on click
|
||||||
if (typeof ClipboardItem !== 'undefined' && navigator.clipboard) {
|
if (typeof ClipboardItem !== 'undefined' && navigator.clipboard) {
|
||||||
evt.target.toBlob((blob) => {
|
evt.target.toBlob((blob) => {
|
||||||
// eslint-disable-next-line no-undef
|
const item = new ClipboardItem({ 'image/png': blob }); // eslint-disable-line no-undef
|
||||||
const item = new ClipboardItem({ 'image/png': blob });
|
|
||||||
navigator.clipboard.write([item]);
|
navigator.clipboard.write([item]);
|
||||||
log('copied image to clipboard');
|
log('copied image to clipboard');
|
||||||
});
|
});
|
||||||
|
@ -630,20 +606,17 @@ async function processImage(input, title) {
|
||||||
|
|
||||||
async function processVideo(input, title) {
|
async function processVideo(input, title) {
|
||||||
status(`processing video: ${title}`);
|
status(`processing video: ${title}`);
|
||||||
const video = document.createElement('video');
|
const video = document.getElementById('video');
|
||||||
const canvas = document.getElementById('canvas');
|
const canvas = document.getElementById('canvas');
|
||||||
video.id = 'video-file';
|
|
||||||
video.controls = true;
|
|
||||||
video.loop = true;
|
|
||||||
// video.onerror = async () => status(`video loading error: ${video.error.message}`);
|
|
||||||
video.addEventListener('error', () => status(`video loading error: ${video.error.message}`));
|
video.addEventListener('error', () => status(`video loading error: ${video.error.message}`));
|
||||||
video.addEventListener('canplay', async () => {
|
video.addEventListener('canplay', async () => {
|
||||||
for (const m of Object.values(menu)) m.hide();
|
for (const m of Object.values(menu)) m.hide();
|
||||||
document.getElementById('samples-container').style.display = 'none';
|
document.getElementById('samples-container').style.display = 'none';
|
||||||
canvas.style.display = 'block';
|
canvas.style.display = 'block';
|
||||||
await videoPlay();
|
await videoPlay();
|
||||||
if (!ui.detectThread) runHumanDetect(video, canvas);
|
runHumanDetect(video, canvas);
|
||||||
});
|
});
|
||||||
|
video.srcObject = null;
|
||||||
video.src = input;
|
video.src = input;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -654,9 +627,8 @@ async function detectVideo() {
|
||||||
const canvas = document.getElementById('canvas');
|
const canvas = document.getElementById('canvas');
|
||||||
canvas.style.display = 'block';
|
canvas.style.display = 'block';
|
||||||
cancelAnimationFrame(ui.detectThread);
|
cancelAnimationFrame(ui.detectThread);
|
||||||
if ((video.srcObject !== null) && !video.paused) {
|
if (isLive(video) && !video.paused) {
|
||||||
await videoPause();
|
await videoPause();
|
||||||
// if (ui.drawThread) cancelAnimationFrame(ui.drawThread);
|
|
||||||
} else {
|
} else {
|
||||||
const cameraError = await setupCamera();
|
const cameraError = await setupCamera();
|
||||||
if (!cameraError) {
|
if (!cameraError) {
|
||||||
|
@ -732,7 +704,6 @@ function setupMenu() {
|
||||||
menu.image.addBool('technicolor', userConfig.filter, 'technicolor', (val) => userConfig.filter.technicolor = val);
|
menu.image.addBool('technicolor', userConfig.filter, 'technicolor', (val) => userConfig.filter.technicolor = val);
|
||||||
menu.image.addBool('polaroid', userConfig.filter, 'polaroid', (val) => userConfig.filter.polaroid = val);
|
menu.image.addBool('polaroid', userConfig.filter, 'polaroid', (val) => userConfig.filter.polaroid = val);
|
||||||
menu.image.addHTML('<input type="file" id="file-input" class="input-file"></input>   input');
|
menu.image.addHTML('<input type="file" id="file-input" class="input-file"></input>   input');
|
||||||
menu.image.addHTML('<input type="file" id="file-background" class="input-file"></input>   background');
|
|
||||||
|
|
||||||
menu.process = new Menu(document.body, '', { top, left: x[2] });
|
menu.process = new Menu(document.body, '', { top, left: x[2] });
|
||||||
menu.process.addList('backend', ['cpu', 'webgl', 'wasm', 'humangl'], userConfig.backend, (val) => userConfig.backend = val);
|
menu.process.addList('backend', ['cpu', 'webgl', 'wasm', 'humangl'], userConfig.backend, (val) => userConfig.backend = val);
|
||||||
|
@ -780,8 +751,6 @@ function setupMenu() {
|
||||||
menu.models.addHTML('<hr style="border-style: inset; border-color: dimgray">');
|
menu.models.addHTML('<hr style="border-style: inset; border-color: dimgray">');
|
||||||
menu.models.addBool('gestures', userConfig.gesture, 'enabled', (val) => userConfig.gesture.enabled = val);
|
menu.models.addBool('gestures', userConfig.gesture, 'enabled', (val) => userConfig.gesture.enabled = val);
|
||||||
menu.models.addHTML('<hr style="border-style: inset; border-color: dimgray">');
|
menu.models.addHTML('<hr style="border-style: inset; border-color: dimgray">');
|
||||||
menu.models.addBool('body segmentation', userConfig.segmentation, 'enabled', (val) => userConfig.segmentation.enabled = val);
|
|
||||||
menu.models.addHTML('<hr style="border-style: inset; border-color: dimgray">');
|
|
||||||
menu.models.addBool('object detection', userConfig.object, 'enabled', (val) => userConfig.object.enabled = val);
|
menu.models.addBool('object detection', userConfig.object, 'enabled', (val) => userConfig.object.enabled = val);
|
||||||
menu.models.addHTML('<hr style="border-style: inset; border-color: dimgray">');
|
menu.models.addHTML('<hr style="border-style: inset; border-color: dimgray">');
|
||||||
menu.models.addBool('face compare', compare, 'enabled', (val) => {
|
menu.models.addBool('face compare', compare, 'enabled', (val) => {
|
||||||
|
@ -801,6 +770,7 @@ function setupMenu() {
|
||||||
|
|
||||||
async function resize() {
|
async function resize() {
|
||||||
window.onresize = null;
|
window.onresize = null;
|
||||||
|
log('resize');
|
||||||
// best setting for mobile, ignored for desktop
|
// best setting for mobile, ignored for desktop
|
||||||
// can set dynamic value such as Math.min(1, Math.round(100 * window.innerWidth / 960) / 100);
|
// can set dynamic value such as Math.min(1, Math.round(100 * window.innerWidth / 960) / 100);
|
||||||
const viewportScale = 0.7;
|
const viewportScale = 0.7;
|
||||||
|
@ -849,42 +819,12 @@ async function processDataURL(f, action) {
|
||||||
if (e.target.result.startsWith('data:video')) await processVideo(e.target.result, f.name);
|
if (e.target.result.startsWith('data:video')) await processVideo(e.target.result, f.name);
|
||||||
document.getElementById('canvas').style.display = 'none';
|
document.getElementById('canvas').style.display = 'none';
|
||||||
}
|
}
|
||||||
if (action === 'background') {
|
|
||||||
const image = new Image();
|
|
||||||
image.onerror = async () => status('image loading error');
|
|
||||||
image.onload = async () => {
|
|
||||||
ui.background = image;
|
|
||||||
if (document.getElementById('canvas').style.display === 'block') { // replace canvas used for video
|
|
||||||
const canvas = document.getElementById('canvas');
|
|
||||||
const ctx = canvas.getContext('2d');
|
|
||||||
const seg = await human.segmentation(canvas, ui.background, userConfig);
|
|
||||||
if (seg.canvas) ctx.drawImage(seg.canvas, 0, 0);
|
|
||||||
} else {
|
|
||||||
const canvases = document.getElementById('samples-container').children; // replace loaded images
|
|
||||||
for (const canvas of canvases) {
|
|
||||||
const ctx = canvas.getContext('2d');
|
|
||||||
const seg = await human.segmentation(canvas, ui.background, userConfig);
|
|
||||||
if (seg.canvas) ctx.drawImage(seg.canvas, 0, 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
image.src = e.target.result;
|
|
||||||
}
|
|
||||||
resolve(true);
|
resolve(true);
|
||||||
};
|
};
|
||||||
reader.readAsDataURL(f);
|
reader.readAsDataURL(f);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
async function runSegmentation() {
|
|
||||||
document.getElementById('file-background').onchange = async (evt) => {
|
|
||||||
userConfig.segmentation.enabled = true;
|
|
||||||
evt.preventDefault();
|
|
||||||
if (evt.target.files.length < 2) ui.columns = 1;
|
|
||||||
for (const f of evt.target.files) await processDataURL(f, 'background');
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
async function dragAndDrop() {
|
async function dragAndDrop() {
|
||||||
document.body.addEventListener('dragenter', (evt) => evt.preventDefault());
|
document.body.addEventListener('dragenter', (evt) => evt.preventDefault());
|
||||||
document.body.addEventListener('dragleave', (evt) => evt.preventDefault());
|
document.body.addEventListener('dragleave', (evt) => evt.preventDefault());
|
||||||
|
@ -922,10 +862,10 @@ async function pwaRegister() {
|
||||||
const regs = await navigator.serviceWorker.getRegistrations();
|
const regs = await navigator.serviceWorker.getRegistrations();
|
||||||
for (const reg of regs) {
|
for (const reg of regs) {
|
||||||
log('pwa found:', reg.scope);
|
log('pwa found:', reg.scope);
|
||||||
if (reg.scope.startsWith(location.origin)) found = reg;
|
if (reg.scope.startsWith(window.location.origin)) found = reg;
|
||||||
}
|
}
|
||||||
if (!found) {
|
if (!found) {
|
||||||
const reg = await navigator.serviceWorker.register(pwa.scriptFile, { scope: location.pathname });
|
const reg = await navigator.serviceWorker.register(pwa.scriptFile, { scope: window.location.pathname });
|
||||||
found = reg;
|
found = reg;
|
||||||
log('pwa registered:', reg.scope);
|
log('pwa registered:', reg.scope);
|
||||||
}
|
}
|
||||||
|
@ -957,8 +897,7 @@ async function main() {
|
||||||
if (ui.detectThread) cancelAnimationFrame(ui.detectThread);
|
if (ui.detectThread) cancelAnimationFrame(ui.detectThread);
|
||||||
if (ui.drawThread) cancelAnimationFrame(ui.drawThread);
|
if (ui.drawThread) cancelAnimationFrame(ui.drawThread);
|
||||||
const msg = evt.reason.message || evt.reason || evt;
|
const msg = evt.reason.message || evt.reason || evt;
|
||||||
// eslint-disable-next-line no-console
|
console.error(msg); // eslint-disable-line no-console
|
||||||
console.error(msg);
|
|
||||||
document.getElementById('log').innerHTML = msg;
|
document.getElementById('log').innerHTML = msg;
|
||||||
status(`exception: ${msg}`);
|
status(`exception: ${msg}`);
|
||||||
evt.preventDefault();
|
evt.preventDefault();
|
||||||
|
@ -981,7 +920,7 @@ async function main() {
|
||||||
await pwaRegister();
|
await pwaRegister();
|
||||||
|
|
||||||
// parse url search params
|
// parse url search params
|
||||||
const params = new URLSearchParams(location.search);
|
const params = new URLSearchParams(window.location.search);
|
||||||
log('url options:', params.toString());
|
log('url options:', params.toString());
|
||||||
if (params.has('worker')) {
|
if (params.has('worker')) {
|
||||||
ui.useWorker = JSON.parse(params.get('worker'));
|
ui.useWorker = JSON.parse(params.get('worker'));
|
||||||
|
@ -1024,10 +963,8 @@ async function main() {
|
||||||
// we've merged human defaults with user config and now lets store it back so it can be accessed by methods such as menu
|
// we've merged human defaults with user config and now lets store it back so it can be accessed by methods such as menu
|
||||||
userConfig = human.config;
|
userConfig = human.config;
|
||||||
if (typeof tf !== 'undefined') {
|
if (typeof tf !== 'undefined') {
|
||||||
// eslint-disable-next-line no-undef
|
log('TensorFlow external version:', tf.version); // eslint-disable-line no-undef
|
||||||
log('TensorFlow external version:', tf.version);
|
human.tf = tf; // eslint-disable-line no-undef
|
||||||
// eslint-disable-next-line no-undef
|
|
||||||
human.tf = tf; // use externally loaded version of tfjs
|
|
||||||
}
|
}
|
||||||
log('tfjs version:', human.tf.version.tfjs);
|
log('tfjs version:', human.tf.version.tfjs);
|
||||||
|
|
||||||
|
@ -1040,8 +977,7 @@ async function main() {
|
||||||
if (ui.modelsPreload && !ui.useWorker) {
|
if (ui.modelsPreload && !ui.useWorker) {
|
||||||
status('loading');
|
status('loading');
|
||||||
await human.load(userConfig); // this is not required, just pre-loads all models
|
await human.load(userConfig); // this is not required, just pre-loads all models
|
||||||
const loaded = Object.keys(human.models).filter((a) => human.models[a]);
|
log('demo loaded models:', human.models.loaded());
|
||||||
log('demo loaded models:', loaded);
|
|
||||||
} else {
|
} else {
|
||||||
await human.init();
|
await human.init();
|
||||||
}
|
}
|
||||||
|
@ -1063,9 +999,6 @@ async function main() {
|
||||||
// init drag & drop
|
// init drag & drop
|
||||||
await dragAndDrop();
|
await dragAndDrop();
|
||||||
|
|
||||||
// init segmentation
|
|
||||||
await runSegmentation();
|
|
||||||
|
|
||||||
if (params.has('image')) {
|
if (params.has('image')) {
|
||||||
try {
|
try {
|
||||||
const image = JSON.parse(params.get('image'));
|
const image = JSON.parse(params.get('image'));
|
||||||
|
@ -1086,7 +1019,7 @@ async function main() {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (human.config.debug) log('environment:', human.env);
|
if (human.config.debug) log('environment:', human.env);
|
||||||
if (human.config.backend === 'humangl' && human.config.debug) log('backend:', human.gl);
|
if (human.config.backend === 'webgl' && human.config.debug) log('backend:', human.gl);
|
||||||
}
|
}
|
||||||
|
|
||||||
window.onload = main;
|
window.onload = main;
|
||||||
|
|
|
@ -16,10 +16,11 @@
|
||||||
node demo/nodejs/node-multiprocess.js
|
node demo/nodejs/node-multiprocess.js
|
||||||
```
|
```
|
||||||
|
|
||||||
|
<!-- eslint-skip -->
|
||||||
```json
|
```json
|
||||||
2021-06-01 08:54:19 INFO: @vladmandic/human version 2.0.0
|
2021-06-01 08:54:19 INFO: @vladmandic/human version 2.0.0
|
||||||
2021-06-01 08:54:19 INFO: User: vlado Platform: linux Arch: x64 Node: v16.0.0
|
2021-06-01 08:54:19 INFO: User: vlado Platform: linux Arch: x64 Node: v16.0.0
|
||||||
2021-06-01 08:54:19 INFO: FaceAPI multi-process test
|
2021-06-01 08:54:19 INFO: Human multi-process test
|
||||||
2021-06-01 08:54:19 STATE: Enumerated images: ./assets 15
|
2021-06-01 08:54:19 STATE: Enumerated images: ./assets 15
|
||||||
2021-06-01 08:54:19 STATE: Main: started worker: 130362
|
2021-06-01 08:54:19 STATE: Main: started worker: 130362
|
||||||
2021-06-01 08:54:19 STATE: Main: started worker: 130363
|
2021-06-01 08:54:19 STATE: Main: started worker: 130363
|
||||||
|
|
|
@ -9,10 +9,10 @@
|
||||||
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||||
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||||
<meta name="theme-color" content="#000000">
|
<meta name="theme-color" content="#000000">
|
||||||
<link rel="manifest" href="../manifest.webmanifest">
|
<link rel="manifest" href="../../manifest.webmanifest">
|
||||||
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
|
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
|
||||||
<link rel="apple-touch-icon" href="../../assets/icon.png">
|
<link rel="apple-touch-icon" href="../../assets/icon.png">
|
||||||
<script src="./index.js" type="module"></script>
|
<script src="../multithread/index.js" type="module"></script>
|
||||||
<style>
|
<style>
|
||||||
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
|
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
|
||||||
html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
|
html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
|
||||||
|
|
|
@ -5,15 +5,15 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import Human from '../../dist/human.esm.js'; // equivalent of @vladmandic/human
|
import { Human } from '../../dist/human.esm.js'; // equivalent of @vladmandic/human
|
||||||
import GLBench from '../helpers/gl-bench.js';
|
import GLBench from '../helpers/gl-bench.js';
|
||||||
|
|
||||||
const workerJS = './worker.js';
|
const workerJS = '../multithread/worker.js';
|
||||||
|
|
||||||
const config = {
|
const config = {
|
||||||
main: { // processes input and runs gesture analysis
|
main: { // processes input and runs gesture analysis
|
||||||
warmup: 'none',
|
warmup: 'none',
|
||||||
backend: 'humangl',
|
backend: 'webgl',
|
||||||
modelBasePath: '../../models/',
|
modelBasePath: '../../models/',
|
||||||
async: false,
|
async: false,
|
||||||
filter: { enabled: true },
|
filter: { enabled: true },
|
||||||
|
@ -26,7 +26,7 @@ const config = {
|
||||||
},
|
},
|
||||||
face: { // runs all face models
|
face: { // runs all face models
|
||||||
warmup: 'none',
|
warmup: 'none',
|
||||||
backend: 'humangl',
|
backend: 'webgl',
|
||||||
modelBasePath: '../../models/',
|
modelBasePath: '../../models/',
|
||||||
async: false,
|
async: false,
|
||||||
filter: { enabled: false },
|
filter: { enabled: false },
|
||||||
|
@ -39,7 +39,7 @@ const config = {
|
||||||
},
|
},
|
||||||
body: { // runs body model
|
body: { // runs body model
|
||||||
warmup: 'none',
|
warmup: 'none',
|
||||||
backend: 'humangl',
|
backend: 'webgl',
|
||||||
modelBasePath: '../../models/',
|
modelBasePath: '../../models/',
|
||||||
async: false,
|
async: false,
|
||||||
filter: { enabled: false },
|
filter: { enabled: false },
|
||||||
|
@ -52,7 +52,7 @@ const config = {
|
||||||
},
|
},
|
||||||
hand: { // runs hands model
|
hand: { // runs hands model
|
||||||
warmup: 'none',
|
warmup: 'none',
|
||||||
backend: 'humangl',
|
backend: 'webgl',
|
||||||
modelBasePath: '../../models/',
|
modelBasePath: '../../models/',
|
||||||
async: false,
|
async: false,
|
||||||
filter: { enabled: false },
|
filter: { enabled: false },
|
||||||
|
@ -65,7 +65,7 @@ const config = {
|
||||||
},
|
},
|
||||||
object: { // runs object model
|
object: { // runs object model
|
||||||
warmup: 'none',
|
warmup: 'none',
|
||||||
backend: 'humangl',
|
backend: 'webgl',
|
||||||
modelBasePath: '../../models/',
|
modelBasePath: '../../models/',
|
||||||
async: false,
|
async: false,
|
||||||
filter: { enabled: false },
|
filter: { enabled: false },
|
||||||
|
@ -130,8 +130,7 @@ const result = { // initialize empty result object which will be partially fille
|
||||||
function log(...msg) {
|
function log(...msg) {
|
||||||
const dt = new Date();
|
const dt = new Date();
|
||||||
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
|
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
|
||||||
// eslint-disable-next-line no-console
|
console.log(ts, ...msg); // eslint-disable-line no-console
|
||||||
console.log(ts, ...msg);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async function drawResults() {
|
async function drawResults() {
|
||||||
|
@ -203,7 +202,9 @@ async function setupCamera() {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
// enumerate devices for diag purposes
|
// enumerate devices for diag purposes
|
||||||
navigator.mediaDevices.enumerateDevices().then((devices) => log('enumerated devices:', devices));
|
navigator.mediaDevices.enumerateDevices()
|
||||||
|
.then((devices) => log('enumerated devices:', devices))
|
||||||
|
.catch(() => log('mediaDevices error'));
|
||||||
log('camera constraints', constraints);
|
log('camera constraints', constraints);
|
||||||
try {
|
try {
|
||||||
stream = await navigator.mediaDevices.getUserMedia(constraints);
|
stream = await navigator.mediaDevices.getUserMedia(constraints);
|
||||||
|
@ -230,7 +231,7 @@ async function setupCamera() {
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
// attach input to video element
|
// attach input to video element
|
||||||
if (stream && video) video['srcObject'] = stream;
|
if (stream && video) video.srcObject = stream;
|
||||||
return promise;
|
return promise;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6,11 +6,10 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
const fs = require('fs');
|
const fs = require('fs');
|
||||||
const log = require('@vladmandic/pilogger');
|
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||||
|
|
||||||
// workers actual import tfjs and faceapi modules
|
// workers actual import tfjs and human modules
|
||||||
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
|
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||||
const tf = require('@tensorflow/tfjs-node');
|
|
||||||
const Human = require('../../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
|
const Human = require('../../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
|
||||||
|
|
||||||
let human = null;
|
let human = null;
|
||||||
|
@ -36,7 +35,7 @@ const myConfig = {
|
||||||
object: { enabled: true },
|
object: { enabled: true },
|
||||||
};
|
};
|
||||||
|
|
||||||
// read image from a file and create tensor to be used by faceapi
|
// read image from a file and create tensor to be used by human
|
||||||
// this way we don't need any monkey patches
|
// this way we don't need any monkey patches
|
||||||
// you can add any pre-proocessing here such as resizing, etc.
|
// you can add any pre-proocessing here such as resizing, etc.
|
||||||
async function image(img) {
|
async function image(img) {
|
||||||
|
@ -45,7 +44,7 @@ async function image(img) {
|
||||||
return tensor;
|
return tensor;
|
||||||
}
|
}
|
||||||
|
|
||||||
// actual faceapi detection
|
// actual human detection
|
||||||
async function detect(img) {
|
async function detect(img) {
|
||||||
const tensor = await image(img);
|
const tensor = await image(img);
|
||||||
const result = await human.detect(tensor);
|
const result = await human.detect(tensor);
|
||||||
|
@ -64,11 +63,9 @@ async function main() {
|
||||||
|
|
||||||
// on worker start first initialize message handler so we don't miss any messages
|
// on worker start first initialize message handler so we don't miss any messages
|
||||||
process.on('message', (msg) => {
|
process.on('message', (msg) => {
|
||||||
// @ts-ignore
|
// if main told worker to exit
|
||||||
if (msg.exit && process.exit) process.exit(); // if main told worker to exit
|
if (msg.exit && process.exit) process.exit(); // eslint-disable-line no-process-exit
|
||||||
// @ts-ignore
|
|
||||||
if (msg.test && process.send) process.send({ test: true });
|
if (msg.test && process.send) process.send({ test: true });
|
||||||
// @ts-ignore
|
|
||||||
if (msg.image) detect(msg.image); // if main told worker to process image
|
if (msg.image) detect(msg.image); // if main told worker to process image
|
||||||
log.data('Worker received message:', process.pid, msg); // generic log
|
log.data('Worker received message:', process.pid, msg); // generic log
|
||||||
});
|
});
|
||||||
|
|
|
@ -8,13 +8,12 @@
|
||||||
|
|
||||||
const fs = require('fs');
|
const fs = require('fs');
|
||||||
const path = require('path');
|
const path = require('path');
|
||||||
// eslint-disable-next-line import/no-extraneous-dependencies, node/no-unpublished-require
|
const childProcess = require('child_process'); // eslint-disable-line camelcase
|
||||||
const log = require('@vladmandic/pilogger'); // this is my simple logger with few extra features
|
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||||
const child_process = require('child_process');
|
|
||||||
// note that main process does not import human or tfjs at all, it's all done from worker process
|
// note that main process does not import human or tfjs at all, it's all done from worker process
|
||||||
|
|
||||||
const workerFile = 'demo/nodejs/node-multiprocess-worker.js';
|
const workerFile = 'demo/multithread/node-multiprocess-worker.js';
|
||||||
const imgPathRoot = './assets'; // modify to include your sample images
|
const imgPathRoot = './samples/in'; // modify to include your sample images
|
||||||
const numWorkers = 4; // how many workers will be started
|
const numWorkers = 4; // how many workers will be started
|
||||||
const workers = []; // this holds worker processes
|
const workers = []; // this holds worker processes
|
||||||
const images = []; // this holds queue of enumerated images
|
const images = []; // this holds queue of enumerated images
|
||||||
|
@ -23,7 +22,7 @@ let numImages;
|
||||||
|
|
||||||
// trigered by main when worker sends ready message
|
// trigered by main when worker sends ready message
|
||||||
// if image pool is empty, signal worker to exit otherwise dispatch image to worker and remove image from queue
|
// if image pool is empty, signal worker to exit otherwise dispatch image to worker and remove image from queue
|
||||||
async function detect(worker) {
|
async function submitDetect(worker) {
|
||||||
if (!t[2]) t[2] = process.hrtime.bigint(); // first time do a timestamp so we can measure initial latency
|
if (!t[2]) t[2] = process.hrtime.bigint(); // first time do a timestamp so we can measure initial latency
|
||||||
if (images.length === numImages) worker.send({ test: true }); // for first image in queue just measure latency
|
if (images.length === numImages) worker.send({ test: true }); // for first image in queue just measure latency
|
||||||
if (images.length === 0) worker.send({ exit: true }); // nothing left in queue
|
if (images.length === 0) worker.send({ exit: true }); // nothing left in queue
|
||||||
|
@ -58,7 +57,7 @@ async function main() {
|
||||||
});
|
});
|
||||||
|
|
||||||
log.header();
|
log.header();
|
||||||
log.info('FaceAPI multi-process test');
|
log.info('Human multi-process test');
|
||||||
|
|
||||||
// enumerate all images into queue
|
// enumerate all images into queue
|
||||||
const dir = fs.readdirSync(imgPathRoot);
|
const dir = fs.readdirSync(imgPathRoot);
|
||||||
|
@ -74,13 +73,13 @@ async function main() {
|
||||||
// manage worker processes
|
// manage worker processes
|
||||||
for (let i = 0; i < numWorkers; i++) {
|
for (let i = 0; i < numWorkers; i++) {
|
||||||
// create worker process
|
// create worker process
|
||||||
workers[i] = await child_process.fork(workerFile, ['special']);
|
workers[i] = await childProcess.fork(workerFile, ['special']);
|
||||||
// parse message that worker process sends back to main
|
// parse message that worker process sends back to main
|
||||||
// if message is ready, dispatch next image in queue
|
// if message is ready, dispatch next image in queue
|
||||||
// if message is processing result, just print how many faces were detected
|
// if message is processing result, just print how many faces were detected
|
||||||
// otherwise it's an unknown message
|
// otherwise it's an unknown message
|
||||||
workers[i].on('message', (msg) => {
|
workers[i].on('message', (msg) => {
|
||||||
if (msg.ready) detect(workers[i]);
|
if (msg.ready) submitDetect(workers[i]);
|
||||||
else if (msg.image) log.data('Main: worker finished:', workers[i].pid, 'detected faces:', msg.detected.face?.length, 'bodies:', msg.detected.body?.length, 'hands:', msg.detected.hand?.length, 'objects:', msg.detected.object?.length);
|
else if (msg.image) log.data('Main: worker finished:', workers[i].pid, 'detected faces:', msg.detected.face?.length, 'bodies:', msg.detected.body?.length, 'hands:', msg.detected.hand?.length, 'objects:', msg.detected.object?.length);
|
||||||
else if (msg.test) measureLatency();
|
else if (msg.test) measureLatency();
|
||||||
else log.data('Main: worker message:', workers[i].pid, msg);
|
else log.data('Main: worker message:', workers[i].pid, msg);
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
/// <reference lib="webworker" />
|
/// <reference lib="webworker" />
|
||||||
|
|
||||||
// load Human using IIFE script as Chome Mobile does not support Modules as Workers
|
// load Human using IIFE script as Chome Mobile does not support Modules as Workers
|
||||||
self.importScripts('../../dist/human.js');
|
self.importScripts('../../dist/human.js'); // eslint-disable-line no-restricted-globals
|
||||||
|
|
||||||
let human;
|
let human;
|
||||||
|
|
||||||
|
@ -9,9 +9,8 @@ onmessage = async (msg) => {
|
||||||
// received from index.js using:
|
// received from index.js using:
|
||||||
// worker.postMessage({ image: image.data.buffer, width: canvas.width, height: canvas.height, config }, [image.data.buffer]);
|
// worker.postMessage({ image: image.data.buffer, width: canvas.width, height: canvas.height, config }, [image.data.buffer]);
|
||||||
|
|
||||||
// @ts-ignore // Human is registered as global namespace using IIFE script
|
// Human is registered as global namespace using IIFE script
|
||||||
// eslint-disable-next-line no-undef, new-cap
|
if (!human) human = new Human.default(msg.data.config); // eslint-disable-line no-undef, new-cap
|
||||||
if (!human) human = new Human.default(msg.data.config);
|
|
||||||
const image = new ImageData(new Uint8ClampedArray(msg.data.image), msg.data.width, msg.data.height);
|
const image = new ImageData(new Uint8ClampedArray(msg.data.image), msg.data.width, msg.data.height);
|
||||||
let result = {};
|
let result = {};
|
||||||
result = await human.detect(image, msg.data.config);
|
result = await human.detect(image, msg.data.config);
|
||||||
|
|
|
@ -28,7 +28,8 @@ or you can pass a path to image to analyze, either on local filesystem or using
|
||||||
node demo/nodejs/node.js
|
node demo/nodejs/node.js
|
||||||
```
|
```
|
||||||
|
|
||||||
```json
|
<!-- eslint-skip -->
|
||||||
|
```js
|
||||||
2021-06-01 08:52:15 INFO: @vladmandic/human version 2.0.0
|
2021-06-01 08:52:15 INFO: @vladmandic/human version 2.0.0
|
||||||
2021-06-01 08:52:15 INFO: User: vlado Platform: linux Arch: x64 Node: v16.0.0
|
2021-06-01 08:52:15 INFO: User: vlado Platform: linux Arch: x64 Node: v16.0.0
|
||||||
2021-06-01 08:52:15 INFO: Current folder: /home/vlado/dev/human
|
2021-06-01 08:52:15 INFO: Current folder: /home/vlado/dev/human
|
||||||
|
@ -82,7 +83,7 @@ node demo/nodejs/node.js
|
||||||
detector: { modelPath: 'handdetect.json' },
|
detector: { modelPath: 'handdetect.json' },
|
||||||
skeleton: { modelPath: 'handskeleton.json' }
|
skeleton: { modelPath: 'handskeleton.json' }
|
||||||
},
|
},
|
||||||
object: { enabled: true, modelPath: 'mb3-centernet.json', minConfidence: 0.2, iouThreshold: 0.4, maxDetected: 10, skipFrames: 19 }
|
object: { enabled: true, modelPath: 'centernet.json', minConfidence: 0.2, iouThreshold: 0.4, maxDetected: 10, skipFrames: 19 }
|
||||||
}
|
}
|
||||||
08:52:15.673 Human: version: 2.0.0
|
08:52:15.673 Human: version: 2.0.0
|
||||||
08:52:15.674 Human: tfjs version: 3.6.0
|
08:52:15.674 Human: tfjs version: 3.6.0
|
||||||
|
@ -96,7 +97,7 @@ node demo/nodejs/node.js
|
||||||
08:52:15.847 Human: load model: file://models/handdetect.json
|
08:52:15.847 Human: load model: file://models/handdetect.json
|
||||||
08:52:15.847 Human: load model: file://models/handskeleton.json
|
08:52:15.847 Human: load model: file://models/handskeleton.json
|
||||||
08:52:15.914 Human: load model: file://models/movenet-lightning.json
|
08:52:15.914 Human: load model: file://models/movenet-lightning.json
|
||||||
08:52:15.957 Human: load model: file://models/mb3-centernet.json
|
08:52:15.957 Human: load model: file://models/centernet.json
|
||||||
08:52:16.015 Human: load model: file://models/faceres.json
|
08:52:16.015 Human: load model: file://models/faceres.json
|
||||||
08:52:16.015 Human: tf engine state: 50796152 bytes 1318 tensors
|
08:52:16.015 Human: tf engine state: 50796152 bytes 1318 tensors
|
||||||
2021-06-01 08:52:16 INFO: Loaded: [ 'face', 'movenet', 'handpose', 'emotion', 'centernet', 'faceres', [length]: 6 ]
|
2021-06-01 08:52:16 INFO: Loaded: [ 'face', 'movenet', 'handpose', 'emotion', 'centernet', 'faceres', [length]: 6 ]
|
||||||
|
|
|
@ -0,0 +1,66 @@
|
||||||
|
/**
|
||||||
|
* Human simple demo for NodeJS
|
||||||
|
*/
|
||||||
|
|
||||||
|
const childProcess = require('child_process'); // eslint-disable-line camelcase
|
||||||
|
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||||
|
const canvas = require('canvas'); // eslint-disable-line node/no-unpublished-require
|
||||||
|
|
||||||
|
const config = {
|
||||||
|
cacheSensitivity: 0.01,
|
||||||
|
wasmPlatformFetch: true,
|
||||||
|
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
|
||||||
|
};
|
||||||
|
const count = 10;
|
||||||
|
|
||||||
|
async function loadImage(input) {
|
||||||
|
const inputImage = await canvas.loadImage(input);
|
||||||
|
const inputCanvas = new canvas.Canvas(inputImage.width, inputImage.height);
|
||||||
|
const inputCtx = inputCanvas.getContext('2d');
|
||||||
|
inputCtx.drawImage(inputImage, 0, 0);
|
||||||
|
const imageData = inputCtx.getImageData(0, 0, inputCanvas.width, inputCanvas.height);
|
||||||
|
process.send({ input, resolution: [inputImage.width, inputImage.height] });
|
||||||
|
return imageData;
|
||||||
|
}
|
||||||
|
|
||||||
|
async function runHuman(module, backend) {
|
||||||
|
if (backend === 'wasm') require('@tensorflow/tfjs-backend-wasm'); // eslint-disable-line node/no-unpublished-require, global-require
|
||||||
|
const Human = require('../../dist/' + module); // eslint-disable-line global-require, import/no-dynamic-require
|
||||||
|
config.backend = backend;
|
||||||
|
const human = new Human.Human(config);
|
||||||
|
human.env.Canvas = canvas.Canvas;
|
||||||
|
human.env.Image = canvas.Image;
|
||||||
|
human.env.ImageData = canvas.ImageData;
|
||||||
|
process.send({ human: human.version, module });
|
||||||
|
await human.init();
|
||||||
|
process.send({ desired: human.config.backend, wasm: human.env.wasm, tfjs: human.tf.version.tfjs, tensorflow: human.env.tensorflow });
|
||||||
|
const imageData = await loadImage('samples/in/ai-body.jpg');
|
||||||
|
const t0 = human.now();
|
||||||
|
await human.load();
|
||||||
|
const t1 = human.now();
|
||||||
|
await human.warmup();
|
||||||
|
const t2 = human.now();
|
||||||
|
for (let i = 0; i < count; i++) await human.detect(imageData);
|
||||||
|
const t3 = human.now();
|
||||||
|
process.send({ backend: human.tf.getBackend(), load: Math.round(t1 - t0), warmup: Math.round(t2 - t1), detect: Math.round(t3 - t2), count, memory: human.tf.memory().numBytes });
|
||||||
|
}
|
||||||
|
|
||||||
|
async function executeWorker(args) {
|
||||||
|
return new Promise((resolve) => {
|
||||||
|
const worker = childProcess.fork(process.argv[1], args);
|
||||||
|
worker.on('message', (msg) => log.data(msg));
|
||||||
|
worker.on('exit', () => resolve(true));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
async function main() {
|
||||||
|
if (process.argv[2]) {
|
||||||
|
await runHuman(process.argv[2], process.argv[3]);
|
||||||
|
} else {
|
||||||
|
await executeWorker(['human.node.js', 'tensorflow']);
|
||||||
|
await executeWorker(['human.node-gpu.js', 'tensorflow']);
|
||||||
|
await executeWorker(['human.node-wasm.js', 'wasm']);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
main();
|
|
@ -1,18 +1,22 @@
|
||||||
/**
|
/**
|
||||||
* Human demo for NodeJS using Canvas library
|
* Human demo for NodeJS using Canvas library
|
||||||
|
*
|
||||||
|
* Requires [canvas](https://www.npmjs.com/package/canvas) to provide Canvas functionality in NodeJS environment
|
||||||
*/
|
*/
|
||||||
|
|
||||||
const fs = require('fs');
|
const fs = require('fs');
|
||||||
const process = require('process');
|
const process = require('process');
|
||||||
const log = require('@vladmandic/pilogger');
|
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||||
const canvas = require('canvas');
|
// in nodejs environments tfjs-node is required to be loaded before human
|
||||||
require('@tensorflow/tfjs-node'); // for nodejs, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human
|
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||||
const Human = require('../../dist/human.node.js'); // this is 'const Human = require('../dist/human.node-gpu.js').default;'
|
const canvas = require('canvas'); // eslint-disable-line node/no-unpublished-require
|
||||||
|
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
|
||||||
|
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
|
||||||
|
|
||||||
const config = { // just enable all and leave default settings
|
const config = { // just enable all and leave default settings
|
||||||
debug: false,
|
debug: false,
|
||||||
face: { enabled: true }, // includes mesh, iris, emotion, descriptor
|
face: { enabled: true, detector: { maxDetected: 10 } }, // includes mesh, iris, emotion, descriptor
|
||||||
hand: { enabled: true, maxDetected: 2, minConfidence: 0.5, detector: { modelPath: 'handtrack.json' } }, // use alternative hand model
|
hand: { enabled: true, maxDetected: 20, minConfidence: 0.5, detector: { modelPath: 'handtrack.json' } }, // use alternative hand model
|
||||||
body: { enabled: true },
|
body: { enabled: true },
|
||||||
object: { enabled: true },
|
object: { enabled: true },
|
||||||
gestures: { enabled: true },
|
gestures: { enabled: true },
|
||||||
|
@ -28,15 +32,16 @@ async function main() {
|
||||||
|
|
||||||
// init
|
// init
|
||||||
const human = new Human.Human(config); // create instance of human
|
const human = new Human.Human(config); // create instance of human
|
||||||
log.info('Human:', human.version);
|
log.info('Human:', human.version, 'TF:', tf.version_core);
|
||||||
|
|
||||||
await human.load(); // pre-load models
|
await human.load(); // pre-load models
|
||||||
log.info('Loaded models:', Object.keys(human.models).filter((a) => human.models[a]));
|
log.info('Loaded models:', human.models.loaded());
|
||||||
log.info('Memory state:', human.tf.engine().memory());
|
log.info('Memory state:', human.tf.engine().memory());
|
||||||
|
|
||||||
// parse cmdline
|
// parse cmdline
|
||||||
const input = process.argv[2];
|
const input = process.argv[2];
|
||||||
const output = process.argv[3];
|
let output = process.argv[3];
|
||||||
|
if (!output.toLowerCase().endsWith('.jpg')) output += '.jpg';
|
||||||
if (process.argv.length !== 4) log.error('Parameters: <input-image> <output-image> missing');
|
if (process.argv.length !== 4) log.error('Parameters: <input-image> <output-image> missing');
|
||||||
else if (!fs.existsSync(input) && !input.startsWith('http')) log.error(`File not found: ${process.argv[2]}`);
|
else if (!fs.existsSync(input) && !input.startsWith('http')) log.error(`File not found: ${process.argv[2]}`);
|
||||||
else {
|
else {
|
||||||
|
@ -50,9 +55,6 @@ async function main() {
|
||||||
|
|
||||||
// run detection
|
// run detection
|
||||||
const result = await human.detect(imageData);
|
const result = await human.detect(imageData);
|
||||||
// run segmentation
|
|
||||||
// const seg = await human.segmentation(inputCanvas);
|
|
||||||
// log.data('Segmentation:', { data: seg.data.length, alpha: typeof seg.alpha, canvas: typeof seg.canvas });
|
|
||||||
|
|
||||||
// print results summary
|
// print results summary
|
||||||
const persons = result.persons; // invoke persons getter, only used to print summary on console
|
const persons = result.persons; // invoke persons getter, only used to print summary on console
|
||||||
|
@ -60,7 +62,7 @@ async function main() {
|
||||||
const face = persons[i].face;
|
const face = persons[i].face;
|
||||||
const faceTxt = face ? `score:${face.score} age:${face.age} gender:${face.gender} iris:${face.iris}` : null;
|
const faceTxt = face ? `score:${face.score} age:${face.age} gender:${face.gender} iris:${face.iris}` : null;
|
||||||
const body = persons[i].body;
|
const body = persons[i].body;
|
||||||
const bodyTxt = body ? `score:${body.score} keypoints:${body.keypoints?.length}` : null;
|
const bodyTxt = body ? `score:${body.score} keypoints:${body.keypoints.length}` : null;
|
||||||
log.data(`Detected: #${i}: Face:${faceTxt} Body:${bodyTxt} LeftHand:${persons[i].hands.left ? 'yes' : 'no'} RightHand:${persons[i].hands.right ? 'yes' : 'no'} Gestures:${persons[i].gestures.length}`);
|
log.data(`Detected: #${i}: Face:${faceTxt} Body:${bodyTxt} LeftHand:${persons[i].hands.left ? 'yes' : 'no'} RightHand:${persons[i].hands.right ? 'yes' : 'no'} Gestures:${persons[i].gestures.length}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,23 +2,18 @@
|
||||||
* Human demo for NodeJS
|
* Human demo for NodeJS
|
||||||
*/
|
*/
|
||||||
|
|
||||||
const log = require('@vladmandic/pilogger');
|
|
||||||
const fs = require('fs');
|
const fs = require('fs');
|
||||||
const process = require('process');
|
const process = require('process');
|
||||||
|
|
||||||
let fetch; // fetch is dynamically imported later
|
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||||
|
// in nodejs environments tfjs-node is required to be loaded before human
|
||||||
// for NodeJS, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human
|
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||||
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
|
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
|
||||||
const tf = require('@tensorflow/tfjs-node'); // or const tf = require('@tensorflow/tfjs-node-gpu');
|
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
|
||||||
|
|
||||||
// load specific version of Human library that matches TensorFlow mode
|
|
||||||
const Human = require('../../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
|
|
||||||
|
|
||||||
let human = null;
|
let human = null;
|
||||||
|
|
||||||
const myConfig = {
|
const myConfig = {
|
||||||
backend: 'tensorflow',
|
|
||||||
modelBasePath: 'file://models/',
|
modelBasePath: 'file://models/',
|
||||||
debug: false,
|
debug: false,
|
||||||
async: true,
|
async: true,
|
||||||
|
@ -41,13 +36,13 @@ async function detect(input) {
|
||||||
let buffer;
|
let buffer;
|
||||||
log.info('Loading image:', input);
|
log.info('Loading image:', input);
|
||||||
if (input.startsWith('http:') || input.startsWith('https:')) {
|
if (input.startsWith('http:') || input.startsWith('https:')) {
|
||||||
fetch = (await import('node-fetch')).default;
|
|
||||||
const res = await fetch(input);
|
const res = await fetch(input);
|
||||||
if (res && res.ok) buffer = await res.buffer();
|
if (res && res.ok) buffer = Buffer.from(await res.arrayBuffer());
|
||||||
else log.error('Invalid image URL:', input, res.status, res.statusText, res.headers.get('content-type'));
|
else log.error('Invalid image URL:', input, res.status, res.statusText, res.headers.get('content-type'));
|
||||||
} else {
|
} else {
|
||||||
buffer = fs.readFileSync(input);
|
buffer = fs.readFileSync(input);
|
||||||
}
|
}
|
||||||
|
log.data('Image bytes:', buffer?.length, 'buffer:', buffer?.slice(0, 32));
|
||||||
|
|
||||||
// decode image using tfjs-node so we don't need external depenencies
|
// decode image using tfjs-node so we don't need external depenencies
|
||||||
if (!buffer) return;
|
if (!buffer) return;
|
||||||
|
@ -61,15 +56,16 @@ async function detect(input) {
|
||||||
async function main() {
|
async function main() {
|
||||||
log.header();
|
log.header();
|
||||||
|
|
||||||
human = new Human(myConfig);
|
human = new Human.Human(myConfig);
|
||||||
|
log.info('Human:', human.version, 'TF:', tf.version_core);
|
||||||
|
|
||||||
|
if (human.events) {
|
||||||
human.events.addEventListener('warmup', () => {
|
human.events.addEventListener('warmup', () => {
|
||||||
log.info('Event Warmup');
|
log.info('Event Warmup');
|
||||||
});
|
});
|
||||||
|
|
||||||
human.events.addEventListener('load', () => {
|
human.events.addEventListener('load', () => {
|
||||||
const loaded = Object.keys(human.models).filter((a) => human.models[a]);
|
log.info('Event Loaded:', human.models.loaded(), human.tf.engine().memory());
|
||||||
log.info('Event Loaded:', loaded, human.tf.engine().memory());
|
|
||||||
});
|
});
|
||||||
|
|
||||||
human.events.addEventListener('image', () => {
|
human.events.addEventListener('image', () => {
|
||||||
|
@ -81,12 +77,13 @@ async function main() {
|
||||||
const persons = human.result.persons;
|
const persons = human.result.persons;
|
||||||
for (let i = 0; i < persons.length; i++) {
|
for (let i = 0; i < persons.length; i++) {
|
||||||
const face = persons[i].face;
|
const face = persons[i].face;
|
||||||
const faceTxt = face ? `score:${face.score} age:${face.age} gender:${face.gender} iris:${face.iris}` : null;
|
const faceTxt = face ? `score:${face.score} age:${face.age} gender:${face.gender} iris:${face.distance}` : null;
|
||||||
const body = persons[i].body;
|
const body = persons[i].body;
|
||||||
const bodyTxt = body ? `score:${body.score} keypoints:${body.keypoints?.length}` : null;
|
const bodyTxt = body ? `score:${body.score} keypoints:${body.keypoints?.length}` : null;
|
||||||
log.data(` #${i}: Face:${faceTxt} Body:${bodyTxt} LeftHand:${persons[i].hands.left ? 'yes' : 'no'} RightHand:${persons[i].hands.right ? 'yes' : 'no'} Gestures:${persons[i].gestures.length}`);
|
log.data(` #${i}: Face:${faceTxt} Body:${bodyTxt} LeftHand:${persons[i].hands.left ? 'yes' : 'no'} RightHand:${persons[i].hands.right ? 'yes' : 'no'} Gestures:${persons[i].gestures.length}`);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
}
|
||||||
|
|
||||||
await human.tf.ready(); // wait until tf is ready
|
await human.tf.ready(); // wait until tf is ready
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,30 @@
|
||||||
|
/**
|
||||||
|
* Human demo for NodeJS using http fetch to get image file
|
||||||
|
*
|
||||||
|
* Requires [node-fetch](https://www.npmjs.com/package/node-fetch) to provide `fetch` functionality in NodeJS environment
|
||||||
|
*/
|
||||||
|
const fs = require('fs');
|
||||||
|
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||||
|
|
||||||
|
// in nodejs environments tfjs-node is required to be loaded before human
|
||||||
|
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||||
|
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
|
||||||
|
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
|
||||||
|
|
||||||
|
const humanConfig = {
|
||||||
|
modelBasePath: 'https://vladmandic.github.io/human/models/',
|
||||||
|
};
|
||||||
|
|
||||||
|
async function main(inputFile) {
|
||||||
|
global.fetch = (await import('node-fetch')).default; // eslint-disable-line node/no-unpublished-import, import/no-unresolved, node/no-missing-import, node/no-extraneous-import
|
||||||
|
const human = new Human.Human(humanConfig); // create instance of human using default configuration
|
||||||
|
log.info('Human:', human.version, 'TF:', tf.version_core);
|
||||||
|
await human.load(); // optional as models would be loaded on-demand first time they are required
|
||||||
|
await human.warmup(); // optional as model warmup is performed on-demand first time its executed
|
||||||
|
const buffer = fs.readFileSync(inputFile); // read file data into buffer
|
||||||
|
const tensor = human.tf.node.decodeImage(buffer); // decode jpg data
|
||||||
|
const result = await human.detect(tensor); // run detection; will initialize backend and on-demand load models
|
||||||
|
log.data(result.gesture);
|
||||||
|
}
|
||||||
|
|
||||||
|
main('samples/in/ai-body.jpg');
|
|
@ -2,17 +2,18 @@
|
||||||
* Human Person Similarity test for NodeJS
|
* Human Person Similarity test for NodeJS
|
||||||
*/
|
*/
|
||||||
|
|
||||||
const log = require('@vladmandic/pilogger');
|
|
||||||
const fs = require('fs');
|
const fs = require('fs');
|
||||||
const process = require('process');
|
const process = require('process');
|
||||||
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
|
|
||||||
const tf = require('@tensorflow/tfjs-node');
|
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||||
const Human = require('../../dist/human.node.js').default;
|
// in nodejs environments tfjs-node is required to be loaded before human
|
||||||
|
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||||
|
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
|
||||||
|
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
|
||||||
|
|
||||||
let human = null;
|
let human = null;
|
||||||
|
|
||||||
const myConfig = {
|
const myConfig = {
|
||||||
backend: 'tensorflow',
|
|
||||||
modelBasePath: 'file://models/',
|
modelBasePath: 'file://models/',
|
||||||
debug: true,
|
debug: true,
|
||||||
face: { emotion: { enabled: false } },
|
face: { emotion: { enabled: false } },
|
||||||
|
@ -22,23 +23,21 @@ const myConfig = {
|
||||||
};
|
};
|
||||||
|
|
||||||
async function init() {
|
async function init() {
|
||||||
human = new Human(myConfig);
|
human = new Human.Human(myConfig);
|
||||||
await human.tf.ready();
|
await human.tf.ready();
|
||||||
log.info('Human:', human.version);
|
log.info('Human:', human.version, 'TF:', tf.version_core);
|
||||||
await human.load();
|
await human.load();
|
||||||
const loaded = Object.keys(human.models).filter((a) => human.models[a]);
|
log.info('Loaded:', human.models.loaded());
|
||||||
log.info('Loaded:', loaded);
|
|
||||||
log.info('Memory state:', human.tf.engine().memory());
|
log.info('Memory state:', human.tf.engine().memory());
|
||||||
}
|
}
|
||||||
|
|
||||||
async function detect(input) {
|
async function detect(input) {
|
||||||
if (!fs.existsSync(input)) {
|
if (!fs.existsSync(input)) {
|
||||||
log.error('Cannot load image:', input);
|
throw new Error('Cannot load image:', input);
|
||||||
process.exit(1);
|
|
||||||
}
|
}
|
||||||
const buffer = fs.readFileSync(input);
|
const buffer = fs.readFileSync(input);
|
||||||
const tensor = human.tf.node.decodeImage(buffer, 3);
|
const tensor = human.tf.node.decodeImage(buffer, 3);
|
||||||
log.state('Loaded image:', input, tensor['shape']);
|
log.state('Loaded image:', input, tensor.shape);
|
||||||
const result = await human.detect(tensor, myConfig);
|
const result = await human.detect(tensor, myConfig);
|
||||||
human.tf.dispose(tensor);
|
human.tf.dispose(tensor);
|
||||||
log.state('Detected faces:', result.face.length);
|
log.state('Detected faces:', result.face.length);
|
||||||
|
@ -50,16 +49,15 @@ async function main() {
|
||||||
log.header();
|
log.header();
|
||||||
if (process.argv.length !== 4) {
|
if (process.argv.length !== 4) {
|
||||||
log.error('Parameters: <first image> <second image> missing');
|
log.error('Parameters: <first image> <second image> missing');
|
||||||
process.exit(1);
|
return;
|
||||||
}
|
}
|
||||||
await init();
|
await init();
|
||||||
const res1 = await detect(process.argv[2]);
|
const res1 = await detect(process.argv[2]);
|
||||||
const res2 = await detect(process.argv[3]);
|
const res2 = await detect(process.argv[3]);
|
||||||
if (!res1 || !res1.face || res1.face.length === 0 || !res2 || !res2.face || res2.face.length === 0) {
|
if (!res1 || !res1.face || res1.face.length === 0 || !res2 || !res2.face || res2.face.length === 0) {
|
||||||
log.error('Could not detect face descriptors');
|
throw new Error('Could not detect face descriptors');
|
||||||
process.exit(1);
|
|
||||||
}
|
}
|
||||||
const similarity = human.similarity(res1.face[0].embedding, res2.face[0].embedding, { order: 2 });
|
const similarity = human.match.similarity(res1.face[0].embedding, res2.face[0].embedding, { order: 2 });
|
||||||
log.data('Similarity: ', similarity);
|
log.data('Similarity: ', similarity);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,13 +1,32 @@
|
||||||
const fs = require('fs');
|
/**
|
||||||
const Human = require('../../dist/human.node.js').default; // this is same as `@vladmandic/human` but using relative paths
|
* Human simple demo for NodeJS
|
||||||
|
*/
|
||||||
|
|
||||||
async function main(inputFile) {
|
const fs = require('fs');
|
||||||
const human = new Human(); // create instance of human using default configuration
|
const process = require('process');
|
||||||
|
|
||||||
|
// in nodejs environments tfjs-node is required to be loaded before human
|
||||||
|
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||||
|
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
|
||||||
|
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
|
||||||
|
|
||||||
|
const humanConfig = {
|
||||||
|
// add any custom config here
|
||||||
|
debug: true,
|
||||||
|
body: { enabled: false },
|
||||||
|
};
|
||||||
|
|
||||||
|
async function detect(inputFile) {
|
||||||
|
const human = new Human.Human(humanConfig); // create instance of human using default configuration
|
||||||
|
console.log('Human:', human.version, 'TF:', tf.version_core); // eslint-disable-line no-console
|
||||||
|
await human.load(); // optional as models would be loaded on-demand first time they are required
|
||||||
|
await human.warmup(); // optional as model warmup is performed on-demand first time its executed
|
||||||
const buffer = fs.readFileSync(inputFile); // read file data into buffer
|
const buffer = fs.readFileSync(inputFile); // read file data into buffer
|
||||||
const tensor = human.tf.node.decodeImage(buffer); // decode jpg data
|
const tensor = human.tf.node.decodeImage(buffer); // decode jpg data
|
||||||
|
console.log('loaded input file:', inputFile, 'resolution:', tensor.shape); // eslint-disable-line no-console
|
||||||
const result = await human.detect(tensor); // run detection; will initialize backend and on-demand load models
|
const result = await human.detect(tensor); // run detection; will initialize backend and on-demand load models
|
||||||
// eslint-disable-next-line no-console
|
console.log(result); // eslint-disable-line no-console
|
||||||
console.log(result);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
main('samples/in/ai-body.jpg');
|
if (process.argv.length === 3) detect(process.argv[2]); // if input file is provided as cmdline parameter use it
|
||||||
|
else detect('samples/in/ai-body.jpg'); // else use built-in test inputfile
|
||||||
|
|
|
@ -7,27 +7,26 @@
|
||||||
* If you want process at specific intervals, set output fps to some value
|
* If you want process at specific intervals, set output fps to some value
|
||||||
* If you want to process an input stream, set real-time flag and set input as required
|
* If you want to process an input stream, set real-time flag and set input as required
|
||||||
*
|
*
|
||||||
* Note that pipe2jpeg is not part of Human dependencies and should be installed manually
|
* Note that [pipe2jpeg](https://www.npmjs.com/package/pipe2jpeg) is not part of Human dependencies and should be installed manually
|
||||||
* Working version of ffmpeg must be present on the system
|
* Working version of `ffmpeg` must be present on the system
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
const process = require('process');
|
||||||
const spawn = require('child_process').spawn;
|
const spawn = require('child_process').spawn;
|
||||||
const log = require('@vladmandic/pilogger');
|
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||||
// @ts-ignore pipe2jpeg is not installed by default
|
// in nodejs environments tfjs-node is required to be loaded before human
|
||||||
// eslint-disable-next-line node/no-missing-require
|
// const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||||
const Pipe2Jpeg = require('pipe2jpeg');
|
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
|
||||||
// for NodeJS, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human
|
const Pipe2Jpeg = require('pipe2jpeg'); // eslint-disable-line node/no-missing-require, import/no-unresolved
|
||||||
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
|
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
|
||||||
const tf = require('@tensorflow/tfjs-node'); // or const tf = require('@tensorflow/tfjs-node-gpu');
|
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
|
||||||
// load specific version of Human library that matches TensorFlow mode
|
|
||||||
const Human = require('../../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
|
|
||||||
|
|
||||||
let count = 0; // counter
|
let count = 0; // counter
|
||||||
let busy = false; // busy flag
|
let busy = false; // busy flag
|
||||||
const inputFile = './test.mp4';
|
let inputFile = './test.mp4';
|
||||||
|
if (process.argv.length === 3) inputFile = process.argv[2];
|
||||||
|
|
||||||
const humanConfig = {
|
const humanConfig = {
|
||||||
backend: 'tensorflow',
|
|
||||||
modelBasePath: 'file://models/',
|
modelBasePath: 'file://models/',
|
||||||
debug: false,
|
debug: false,
|
||||||
async: true,
|
async: true,
|
||||||
|
@ -45,7 +44,7 @@ const humanConfig = {
|
||||||
object: { enabled: false },
|
object: { enabled: false },
|
||||||
};
|
};
|
||||||
|
|
||||||
const human = new Human(humanConfig);
|
const human = new Human.Human(humanConfig);
|
||||||
const pipe2jpeg = new Pipe2Jpeg();
|
const pipe2jpeg = new Pipe2Jpeg();
|
||||||
|
|
||||||
const ffmpegParams = [
|
const ffmpegParams = [
|
||||||
|
@ -62,18 +61,16 @@ const ffmpegParams = [
|
||||||
'pipe:1', // output to unix pipe that is then captured by pipe2jpeg
|
'pipe:1', // output to unix pipe that is then captured by pipe2jpeg
|
||||||
];
|
];
|
||||||
|
|
||||||
async function process(jpegBuffer) {
|
async function detect(jpegBuffer) {
|
||||||
if (busy) return; // skip processing if busy
|
if (busy) return; // skip processing if busy
|
||||||
busy = true;
|
busy = true;
|
||||||
const decoded = tf.node.decodeJpeg(jpegBuffer, 3); // decode jpeg buffer to raw tensor
|
const tensor = human.tf.node.decodeJpeg(jpegBuffer, 3); // decode jpeg buffer to raw tensor
|
||||||
const tensor = tf.expandDims(decoded, 0); // almost all tf models use first dimension as batch number so we add it
|
|
||||||
tf.dispose(decoded);
|
|
||||||
|
|
||||||
log.state('input frame:', ++count, 'size:', jpegBuffer.length, 'decoded shape:', tensor.shape);
|
|
||||||
const res = await human.detect(tensor);
|
const res = await human.detect(tensor);
|
||||||
log.data('gesture', JSON.stringify(res.gesture));
|
human.tf.dispose(tensor); // must dispose tensor
|
||||||
// do processing here
|
// start custom processing here
|
||||||
tf.dispose(tensor); // must dispose tensor
|
log.data('frame', { frame: ++count, size: jpegBuffer.length, shape: tensor.shape, face: res?.face?.length, body: res?.body?.length, hand: res?.hand?.length, gesture: res?.gesture?.length });
|
||||||
|
if (res?.face?.[0]) log.data('person', { score: [res.face[0].boxScore, res.face[0].faceScore], age: res.face[0].age || 0, gender: [res.face[0].genderScore || 0, res.face[0].gender], emotion: res.face[0].emotion?.[0] });
|
||||||
|
// at the of processing mark loop as not busy so it can process next frame
|
||||||
busy = false;
|
busy = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,8 +78,9 @@ async function main() {
|
||||||
log.header();
|
log.header();
|
||||||
await human.tf.ready();
|
await human.tf.ready();
|
||||||
// pre-load models
|
// pre-load models
|
||||||
log.info('human:', human.version);
|
log.info({ human: human.version, tf: human.tf.version_core });
|
||||||
pipe2jpeg.on('jpeg', (jpegBuffer) => process(jpegBuffer));
|
log.info({ input: inputFile });
|
||||||
|
pipe2jpeg.on('data', (jpegBuffer) => detect(jpegBuffer));
|
||||||
|
|
||||||
const ffmpeg = spawn('ffmpeg', ffmpegParams, { stdio: ['ignore', 'pipe', 'ignore'] });
|
const ffmpeg = spawn('ffmpeg', ffmpegParams, { stdio: ['ignore', 'pipe', 'ignore'] });
|
||||||
ffmpeg.on('error', (error) => log.error('ffmpeg error:', error));
|
ffmpeg.on('error', (error) => log.error('ffmpeg error:', error));
|
||||||
|
|
|
@ -2,20 +2,18 @@
|
||||||
* Human demo for NodeJS
|
* Human demo for NodeJS
|
||||||
* Unsupported sample of using external utility fswebcam to capture screenshot from attached webcam in regular intervals and process it using Human
|
* Unsupported sample of using external utility fswebcam to capture screenshot from attached webcam in regular intervals and process it using Human
|
||||||
*
|
*
|
||||||
* Note that node-webcam is not part of Human dependencies and should be installed manually
|
* Note that [node-webcam](https://www.npmjs.com/package/node-webcam) is not part of Human dependencies and should be installed manually
|
||||||
* Working version of fswebcam must be present on the system
|
* Working version of `fswebcam` must be present on the system
|
||||||
*/
|
*/
|
||||||
|
|
||||||
let initial = true; // remember if this is the first run to print additional details
|
let initial = true; // remember if this is the first run to print additional details
|
||||||
const log = require('@vladmandic/pilogger');
|
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||||
// @ts-ignore node-webcam is not installed by default
|
const nodeWebCam = require('node-webcam'); // eslint-disable-line import/no-unresolved, node/no-missing-require
|
||||||
// eslint-disable-next-line node/no-missing-require
|
|
||||||
const nodeWebCam = require('node-webcam');
|
// in nodejs environments tfjs-node is required to be loaded before human
|
||||||
// for NodeJS, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human
|
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||||
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
|
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
|
||||||
const tf = require('@tensorflow/tfjs-node'); // or const tf = require('@tensorflow/tfjs-node-gpu');
|
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
|
||||||
// load specific version of Human library that matches TensorFlow mode
|
|
||||||
const Human = require('../../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
|
|
||||||
|
|
||||||
// options for node-webcam
|
// options for node-webcam
|
||||||
const tempFile = 'webcam-snap'; // node-webcam requires writting snapshot to a file, recommended to use tmpfs to avoid excessive disk writes
|
const tempFile = 'webcam-snap'; // node-webcam requires writting snapshot to a file, recommended to use tmpfs to avoid excessive disk writes
|
||||||
|
@ -27,10 +25,10 @@ const camera = nodeWebCam.create(optionsCamera);
|
||||||
|
|
||||||
// options for human
|
// options for human
|
||||||
const optionsHuman = {
|
const optionsHuman = {
|
||||||
backend: 'tensorflow',
|
|
||||||
modelBasePath: 'file://models/',
|
modelBasePath: 'file://models/',
|
||||||
};
|
};
|
||||||
const human = new Human(optionsHuman);
|
|
||||||
|
const human = new Human.Human(optionsHuman);
|
||||||
|
|
||||||
function buffer2tensor(buffer) {
|
function buffer2tensor(buffer) {
|
||||||
return human.tf.tidy(() => {
|
return human.tf.tidy(() => {
|
||||||
|
@ -62,18 +60,20 @@ async function detect() {
|
||||||
} else {
|
} else {
|
||||||
const tensor = buffer2tensor(data); // create tensor from image buffer
|
const tensor = buffer2tensor(data); // create tensor from image buffer
|
||||||
if (initial) log.data('input tensor:', tensor.shape);
|
if (initial) log.data('input tensor:', tensor.shape);
|
||||||
// eslint-disable-next-line promise/no-promise-in-callback
|
human.detect(tensor) // eslint-disable-line promise/no-promise-in-callback
|
||||||
human.detect(tensor).then((result) => {
|
.then((result) => {
|
||||||
if (result && result.face && result.face.length > 0) {
|
if (result && result.face && result.face.length > 0) {
|
||||||
for (let i = 0; i < result.face.length; i++) {
|
for (let i = 0; i < result.face.length; i++) {
|
||||||
const face = result.face[i];
|
const face = result.face[i];
|
||||||
const emotion = face.emotion.reduce((prev, curr) => (prev.score > curr.score ? prev : curr));
|
const emotion = face.emotion?.reduce((prev, curr) => (prev.score > curr.score ? prev : curr));
|
||||||
log.data(`detected face: #${i} boxScore:${face.boxScore} faceScore:${face.faceScore} age:${face.age} genderScore:${face.genderScore} gender:${face.gender} emotionScore:${emotion.score} emotion:${emotion.emotion} iris:${face.iris}`);
|
log.data(`detected face: #${i} boxScore:${face.boxScore} faceScore:${face.faceScore} age:${face.age} genderScore:${face.genderScore} gender:${face.gender} emotionScore:${emotion?.score} emotion:${emotion?.emotion} iris:${face.iris}`);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.data(' Face: N/A');
|
log.data(' Face: N/A');
|
||||||
}
|
}
|
||||||
});
|
return result;
|
||||||
|
})
|
||||||
|
.catch(() => log.error('human detect error'));
|
||||||
}
|
}
|
||||||
initial = false;
|
initial = false;
|
||||||
});
|
});
|
||||||
|
@ -82,6 +82,7 @@ async function detect() {
|
||||||
}
|
}
|
||||||
|
|
||||||
async function main() {
|
async function main() {
|
||||||
|
log.info('human:', human.version, 'tf:', tf.version_core);
|
||||||
camera.list((list) => {
|
camera.list((list) => {
|
||||||
log.data('detected camera:', list);
|
log.data('detected camera:', list);
|
||||||
});
|
});
|
||||||
|
|
|
@ -2,19 +2,15 @@
|
||||||
* Human demo for NodeJS
|
* Human demo for NodeJS
|
||||||
*/
|
*/
|
||||||
|
|
||||||
const log = require('@vladmandic/pilogger');
|
|
||||||
const fs = require('fs');
|
const fs = require('fs');
|
||||||
const path = require('path');
|
const path = require('path');
|
||||||
const process = require('process');
|
const process = require('process');
|
||||||
|
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||||
|
|
||||||
let fetch; // fetch is dynamically imported later
|
// in nodejs environments tfjs-node is required to be loaded before human
|
||||||
|
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||||
// for NodeJS, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human
|
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
|
||||||
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
|
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
|
||||||
const tf = require('@tensorflow/tfjs-node'); // or const tf = require('@tensorflow/tfjs-node-gpu');
|
|
||||||
|
|
||||||
// load specific version of Human library that matches TensorFlow mode
|
|
||||||
const Human = require('../../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
|
|
||||||
|
|
||||||
let human = null;
|
let human = null;
|
||||||
|
|
||||||
|
@ -45,17 +41,17 @@ const myConfig = {
|
||||||
|
|
||||||
async function init() {
|
async function init() {
|
||||||
// create instance of human
|
// create instance of human
|
||||||
human = new Human(myConfig);
|
human = new Human.Human(myConfig);
|
||||||
// wait until tf is ready
|
// wait until tf is ready
|
||||||
await human.tf.ready();
|
await human.tf.ready();
|
||||||
|
log.info('human:', human.version, 'tf:', tf.version_core);
|
||||||
// pre-load models
|
// pre-load models
|
||||||
log.info('Human:', human.version);
|
log.info('Human:', human.version);
|
||||||
// log.info('Active Configuration', human.config);
|
// log.info('Active Configuration', human.config);
|
||||||
await human.load();
|
await human.load();
|
||||||
const loaded = Object.keys(human.models).filter((a) => human.models[a]);
|
log.info('Loaded:', human.models.loaded());
|
||||||
log.info('Loaded:', loaded);
|
|
||||||
// log.info('Memory state:', human.tf.engine().memory());
|
// log.info('Memory state:', human.tf.engine().memory());
|
||||||
log.data(tf.backend()['binding'] ? tf.backend()['binding']['TF_Version'] : null);
|
log.data(tf.backend().binding ? tf.backend().binding.TF_Version : null);
|
||||||
}
|
}
|
||||||
|
|
||||||
async function detect(input) {
|
async function detect(input) {
|
||||||
|
@ -64,11 +60,12 @@ async function detect(input) {
|
||||||
log.info('Loading image:', input);
|
log.info('Loading image:', input);
|
||||||
if (input.startsWith('http:') || input.startsWith('https:')) {
|
if (input.startsWith('http:') || input.startsWith('https:')) {
|
||||||
const res = await fetch(input);
|
const res = await fetch(input);
|
||||||
if (res && res.ok) buffer = await res.buffer();
|
if (res && res.ok) buffer = Buffer.from(await res.arrayBuffer());
|
||||||
else log.error('Invalid image URL:', input, res.status, res.statusText, res.headers.get('content-type'));
|
else log.error('Invalid image URL:', input, res.status, res.statusText, res.headers.get('content-type'));
|
||||||
} else {
|
} else {
|
||||||
buffer = fs.readFileSync(input);
|
buffer = fs.readFileSync(input);
|
||||||
}
|
}
|
||||||
|
log.data('Image bytes:', buffer?.length, 'buffer:', buffer?.slice(0, 32));
|
||||||
|
|
||||||
// decode image using tfjs-node so we don't need external depenencies
|
// decode image using tfjs-node so we don't need external depenencies
|
||||||
// can also be done using canvas.js or some other 3rd party image library
|
// can also be done using canvas.js or some other 3rd party image library
|
||||||
|
@ -88,14 +85,14 @@ async function detect(input) {
|
||||||
});
|
});
|
||||||
|
|
||||||
// image shape contains image dimensions and depth
|
// image shape contains image dimensions and depth
|
||||||
log.state('Processing:', tensor['shape']);
|
log.state('Processing:', tensor.shape);
|
||||||
|
|
||||||
// run actual detection
|
// run actual detection
|
||||||
let result;
|
let result;
|
||||||
try {
|
try {
|
||||||
result = await human.detect(tensor, myConfig);
|
result = await human.detect(tensor, myConfig);
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
log.error('caught');
|
log.error('caught', err);
|
||||||
}
|
}
|
||||||
|
|
||||||
// dispose image tensor as we no longer need it
|
// dispose image tensor as we no longer need it
|
||||||
|
@ -107,7 +104,7 @@ async function detect(input) {
|
||||||
for (let i = 0; i < result.face.length; i++) {
|
for (let i = 0; i < result.face.length; i++) {
|
||||||
const face = result.face[i];
|
const face = result.face[i];
|
||||||
const emotion = face.emotion.reduce((prev, curr) => (prev.score > curr.score ? prev : curr));
|
const emotion = face.emotion.reduce((prev, curr) => (prev.score > curr.score ? prev : curr));
|
||||||
log.data(` Face: #${i} boxScore:${face.boxScore} faceScore:${face.faceScore} age:${face.age} genderScore:${face.genderScore} gender:${face.gender} emotionScore:${emotion.score} emotion:${emotion.emotion} iris:${face.iris}`);
|
log.data(` Face: #${i} boxScore:${face.boxScore} faceScore:${face.faceScore} age:${face.age} genderScore:${face.genderScore} gender:${face.gender} emotionScore:${emotion.score} emotion:${emotion.emotion} distance:${face.distance}`);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.data(' Face: N/A');
|
log.data(' Face: N/A');
|
||||||
|
@ -191,7 +188,6 @@ async function main() {
|
||||||
log.configure({ inspect: { breakLength: 265 } });
|
log.configure({ inspect: { breakLength: 265 } });
|
||||||
log.header();
|
log.header();
|
||||||
log.info('Current folder:', process.env.PWD);
|
log.info('Current folder:', process.env.PWD);
|
||||||
fetch = (await import('node-fetch')).default;
|
|
||||||
await init();
|
await init();
|
||||||
const f = process.argv[2];
|
const f = process.argv[2];
|
||||||
if (process.argv.length !== 3) {
|
if (process.argv.length !== 3) {
|
||||||
|
@ -199,8 +195,7 @@ async function main() {
|
||||||
await test();
|
await test();
|
||||||
} else if (!fs.existsSync(f) && !f.startsWith('http')) {
|
} else if (!fs.existsSync(f) && !f.startsWith('http')) {
|
||||||
log.error(`File not found: ${process.argv[2]}`);
|
log.error(`File not found: ${process.argv[2]}`);
|
||||||
} else {
|
} else if (fs.existsSync(f)) {
|
||||||
if (fs.existsSync(f)) {
|
|
||||||
const stat = fs.statSync(f);
|
const stat = fs.statSync(f);
|
||||||
if (stat.isDirectory()) {
|
if (stat.isDirectory()) {
|
||||||
const dir = fs.readdirSync(f);
|
const dir = fs.readdirSync(f);
|
||||||
|
@ -214,6 +209,5 @@ async function main() {
|
||||||
await detect(f);
|
await detect(f);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
main();
|
main();
|
||||||
|
|
|
@ -1,76 +1,119 @@
|
||||||
|
/**
|
||||||
|
* Human demo for NodeJS
|
||||||
|
*
|
||||||
|
* Takes input and output folder names parameters and processes all images
|
||||||
|
* found in input folder and creates annotated images in output folder
|
||||||
|
*
|
||||||
|
* Requires [canvas](https://www.npmjs.com/package/canvas) to provide Canvas functionality in NodeJS environment
|
||||||
|
*/
|
||||||
|
|
||||||
const fs = require('fs');
|
const fs = require('fs');
|
||||||
const path = require('path');
|
const path = require('path');
|
||||||
const process = require('process');
|
const process = require('process');
|
||||||
const log = require('@vladmandic/pilogger');
|
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||||
const canvas = require('canvas');
|
const canvas = require('canvas'); // eslint-disable-line node/no-unpublished-require
|
||||||
const tf = require('@tensorflow/tfjs-node'); // for nodejs, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human
|
// for nodejs, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human
|
||||||
const Human = require('../../dist/human.node.js'); // this is 'const Human = require('../dist/human.node-gpu.js').default;'
|
const tf = require('@tensorflow/tfjs-node-gpu'); // eslint-disable-line node/no-unpublished-require
|
||||||
|
const Human = require('../../dist/human.node-gpu.js'); // this is 'const Human = require('../dist/human.node-gpu.js').default;'
|
||||||
|
|
||||||
const config = { // just enable all and leave default settings
|
const config = { // just enable all and leave default settings
|
||||||
|
modelBasePath: 'file://models',
|
||||||
debug: true,
|
debug: true,
|
||||||
async: false,
|
softwareKernels: true, // slower but enhanced precision since face rotation can work in software mode in nodejs environments
|
||||||
cacheSensitivity: 0,
|
cacheSensitivity: 0.01,
|
||||||
face: { enabled: true, detector: { maxDetected: 20 } },
|
face: { enabled: true, detector: { maxDetected: 100, minConfidence: 0.1 } },
|
||||||
object: { enabled: true },
|
object: { enabled: true, maxDetected: 100, minConfidence: 0.1 },
|
||||||
gesture: { enabled: true },
|
gesture: { enabled: true },
|
||||||
hand: { enabled: true },
|
hand: { enabled: true, maxDetected: 100, minConfidence: 0.2 },
|
||||||
body: { enabled: true, modelPath: 'https://vladmandic.github.io/human-models/models/movenet-multipose.json' },
|
body: { enabled: true, maxDetected: 100, minConfidence: 0.1, modelPath: 'https://vladmandic.github.io/human-models/models/movenet-multipose.json' },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const poolSize = 4;
|
||||||
|
|
||||||
|
const human = new Human.Human(config); // create instance of human
|
||||||
|
|
||||||
|
async function saveFile(shape, buffer, result, outFile) {
|
||||||
|
return new Promise(async (resolve, reject) => { // eslint-disable-line no-async-promise-executor
|
||||||
|
const outputCanvas = new canvas.Canvas(shape[2], shape[1]); // create canvas
|
||||||
|
const outputCtx = outputCanvas.getContext('2d');
|
||||||
|
const inputImage = await canvas.loadImage(buffer); // load image using canvas library
|
||||||
|
outputCtx.drawImage(inputImage, 0, 0); // draw input image onto canvas
|
||||||
|
human.draw.all(outputCanvas, result); // use human build-in method to draw results as overlays on canvas
|
||||||
|
const outStream = fs.createWriteStream(outFile); // write canvas to new image file
|
||||||
|
outStream.on('finish', () => {
|
||||||
|
log.data('Output image:', outFile, outputCanvas.width, outputCanvas.height);
|
||||||
|
resolve();
|
||||||
|
});
|
||||||
|
outStream.on('error', (err) => {
|
||||||
|
log.error('Output error:', outFile, err);
|
||||||
|
reject();
|
||||||
|
});
|
||||||
|
const stream = outputCanvas.createJPEGStream({ quality: 0.5, progressive: true, chromaSubsampling: true });
|
||||||
|
stream.pipe(outStream);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
async function processFile(image, inFile, outFile) {
|
||||||
|
const buffer = fs.readFileSync(inFile);
|
||||||
|
const tensor = tf.tidy(() => {
|
||||||
|
const decode = tf.node.decodeImage(buffer, 3);
|
||||||
|
const expand = tf.expandDims(decode, 0);
|
||||||
|
const cast = tf.cast(expand, 'float32');
|
||||||
|
return cast;
|
||||||
|
});
|
||||||
|
log.state('Loaded image:', inFile, tensor.shape);
|
||||||
|
|
||||||
|
const result = await human.detect(tensor);
|
||||||
|
human.tf.dispose(tensor);
|
||||||
|
log.data(`Detected: ${image}:`, 'Face:', result.face.length, 'Body:', result.body.length, 'Hand:', result.hand.length, 'Objects:', result.object.length, 'Gestures:', result.gesture.length);
|
||||||
|
|
||||||
|
if (outFile) await saveFile(tensor.shape, buffer, result, outFile);
|
||||||
|
}
|
||||||
|
|
||||||
async function main() {
|
async function main() {
|
||||||
log.header();
|
log.header();
|
||||||
|
|
||||||
globalThis.Canvas = canvas.Canvas; // patch global namespace with canvas library
|
globalThis.Canvas = canvas.Canvas; // patch global namespace with canvas library
|
||||||
globalThis.ImageData = canvas.ImageData; // patch global namespace with canvas library
|
globalThis.ImageData = canvas.ImageData; // patch global namespace with canvas library
|
||||||
|
|
||||||
const human = new Human.Human(config); // create instance of human
|
log.info('Human:', human.version, 'TF:', tf.version_core);
|
||||||
log.info('Human:', human.version);
|
|
||||||
const configErrors = await human.validate();
|
const configErrors = await human.validate();
|
||||||
if (configErrors.length > 0) log.error('Configuration errors:', configErrors);
|
if (configErrors.length > 0) log.error('Configuration errors:', configErrors);
|
||||||
await human.load(); // pre-load models
|
await human.load(); // pre-load models
|
||||||
log.info('Loaded models:', Object.keys(human.models).filter((a) => human.models[a]));
|
log.info('Loaded models:', human.models.loaded());
|
||||||
|
|
||||||
const inDir = process.argv[2];
|
const inDir = process.argv[2];
|
||||||
const outDir = process.argv[3];
|
const outDir = process.argv[3];
|
||||||
if (process.argv.length !== 4) {
|
if (!inDir) {
|
||||||
log.error('Parameters: <input-directory> <output-directory> missing');
|
log.error('Parameters: <input-directory> missing');
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (!fs.existsSync(inDir) || !fs.statSync(inDir).isDirectory() || !fs.existsSync(outDir) || !fs.statSync(outDir).isDirectory()) {
|
if (inDir && (!fs.existsSync(inDir) || !fs.statSync(inDir).isDirectory())) {
|
||||||
log.error('Invalid directory specified:', 'input:', fs.existsSync(inDir) ?? fs.statSync(inDir).isDirectory(), 'output:', fs.existsSync(outDir) ?? fs.statSync(outDir).isDirectory());
|
log.error('Invalid input directory:', fs.existsSync(inDir) ?? fs.statSync(inDir).isDirectory());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (!outDir) {
|
||||||
|
log.info('Parameters: <output-directory> missing, images will not be saved');
|
||||||
|
}
|
||||||
|
if (outDir && (!fs.existsSync(outDir) || !fs.statSync(outDir).isDirectory())) {
|
||||||
|
log.error('Invalid output directory:', fs.existsSync(outDir) ?? fs.statSync(outDir).isDirectory());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const dir = fs.readdirSync(inDir);
|
const dir = fs.readdirSync(inDir);
|
||||||
const images = dir.filter((f) => fs.statSync(path.join(inDir, f)).isFile() && (f.toLocaleLowerCase().endsWith('.jpg') || f.toLocaleLowerCase().endsWith('.jpeg')));
|
const images = dir.filter((f) => fs.statSync(path.join(inDir, f)).isFile() && (f.toLocaleLowerCase().endsWith('.jpg') || f.toLocaleLowerCase().endsWith('.jpeg')));
|
||||||
log.info(`Processing folder: ${inDir} entries:`, dir.length, 'images', images.length);
|
log.info(`Processing folder: ${inDir} entries:`, dir.length, 'images', images.length);
|
||||||
for (const image of images) {
|
const t0 = performance.now();
|
||||||
const inFile = path.join(inDir, image);
|
const promises = [];
|
||||||
const buffer = fs.readFileSync(inFile);
|
for (let i = 0; i < images.length; i++) {
|
||||||
const tensor = human.tf.tidy(() => {
|
const inFile = path.join(inDir, images[i]);
|
||||||
const decode = human.tf.node.decodeImage(buffer, 3);
|
const outFile = outDir ? path.join(outDir, images[i]) : null;
|
||||||
const expand = human.tf.expandDims(decode, 0);
|
promises.push(processFile(images[i], inFile, outFile));
|
||||||
const cast = human.tf.cast(expand, 'float32');
|
if (i % poolSize === 0) await Promise.all(promises);
|
||||||
return cast;
|
|
||||||
});
|
|
||||||
log.state('Loaded image:', inFile, tensor.shape);
|
|
||||||
|
|
||||||
const result = await human.detect(tensor);
|
|
||||||
tf.dispose(tensor);
|
|
||||||
log.data(`Detected: ${image}:`, 'Face:', result.face.length, 'Body:', result.body.length, 'Hand:', result.hand.length, 'Objects:', result.object.length, 'Gestures:', result.gesture.length);
|
|
||||||
|
|
||||||
const outputCanvas = new canvas.Canvas(tensor.shape[2], tensor.shape[1]); // create canvas
|
|
||||||
const outputCtx = outputCanvas.getContext('2d');
|
|
||||||
const inputImage = await canvas.loadImage(buffer); // load image using canvas library
|
|
||||||
outputCtx.drawImage(inputImage, 0, 0); // draw input image onto canvas
|
|
||||||
human.draw.all(outputCanvas, result); // use human build-in method to draw results as overlays on canvas
|
|
||||||
const outFile = path.join(outDir, image);
|
|
||||||
const outStream = fs.createWriteStream(outFile); // write canvas to new image file
|
|
||||||
outStream.on('finish', () => log.state('Output image:', outFile, outputCanvas.width, outputCanvas.height));
|
|
||||||
outStream.on('error', (err) => log.error('Output error:', outFile, err));
|
|
||||||
const stream = outputCanvas.createJPEGStream({ quality: 0.5, progressive: true, chromaSubsampling: true });
|
|
||||||
stream.pipe(outStream);
|
|
||||||
}
|
}
|
||||||
|
await Promise.all(promises);
|
||||||
|
const t1 = performance.now();
|
||||||
|
log.info(`Processed ${images.length} images in ${Math.round(t1 - t0)} ms`);
|
||||||
}
|
}
|
||||||
|
|
||||||
main();
|
main();
|
||||||
|
|
|
@ -0,0 +1,61 @@
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8">
|
||||||
|
<meta http-equiv="content-type" content="text/html; charset=utf-8">
|
||||||
|
<title>Human Demo</title>
|
||||||
|
<meta name="viewport" content="width=device-width, shrink-to-fit=yes">
|
||||||
|
<meta name="mobile-web-app-capable" content="yes">
|
||||||
|
<meta name="application-name" content="Human Demo">
|
||||||
|
<meta name="keywords" content="Human Demo">
|
||||||
|
<meta name="description" content="Human Demo; Author: Vladimir Mandic <mandic00@live.com>">
|
||||||
|
<link rel="manifest" href="../manifest.webmanifest">
|
||||||
|
<link rel="shortcut icon" href="../favicon.ico" type="image/x-icon">
|
||||||
|
<link rel="icon" sizes="256x256" href="../assets/icons/dash-256.png">
|
||||||
|
<link rel="apple-touch-icon" href="../assets/icons/dash-256.png">
|
||||||
|
<link rel="apple-touch-startup-image" href="../assets/icons/dash-256.png">
|
||||||
|
<style>
|
||||||
|
@font-face { font-family: 'CenturyGothic'; font-display: swap; font-style: normal; font-weight: 400; src: local('CenturyGothic'), url('../assets/century-gothic.ttf') format('truetype'); }
|
||||||
|
html { font-size: 18px; }
|
||||||
|
body { font-size: 1rem; font-family: "CenturyGothic", "Segoe UI", sans-serif; font-variant: small-caps; width: -webkit-fill-available; height: 100%; background: black; color: white; overflow: hidden; margin: 0; }
|
||||||
|
select { font-size: 1rem; font-family: "CenturyGothic", "Segoe UI", sans-serif; font-variant: small-caps; background: gray; color: white; border: none; }
|
||||||
|
</style>
|
||||||
|
<script src="../segmentation/index.js" type="module"></script>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<noscript><h1>javascript is required</h1></noscript>
|
||||||
|
<nav>
|
||||||
|
<div id="nav" class="nav"></div>
|
||||||
|
</nav>
|
||||||
|
<header>
|
||||||
|
<div id="header" class="header" style="position: fixed; top: 0; right: 0; padding: 4px; margin: 16px; background: rgba(0, 0, 0, 0.5); z-index: 10; line-height: 2rem;">
|
||||||
|
<label for="mode">mode</label>
|
||||||
|
<select id="mode" name="mode">
|
||||||
|
<option value="default">remove background</option>
|
||||||
|
<option value="alpha">draw alpha channel</option>
|
||||||
|
<option value="foreground">full foreground</option>
|
||||||
|
<option value="state">recurrent state</option>
|
||||||
|
</select><br>
|
||||||
|
<label for="composite">composite</label>
|
||||||
|
<select id="composite" name="composite"></select><br>
|
||||||
|
<label for="ratio">downsample ratio</label>
|
||||||
|
<input type="range" name="ratio" id="ratio" min="0.1" max="1" value="0.5" step="0.05">
|
||||||
|
<div id="fps" style="margin-top: 8px"></div>
|
||||||
|
</div>
|
||||||
|
</header>
|
||||||
|
<main>
|
||||||
|
<div id="main" class="main">
|
||||||
|
<video id="webcam" style="position: fixed; top: 0; left: 0; width: 50vw; height: 50vh"></video>
|
||||||
|
<img id="background" alt="background" style="position: fixed; top: 0; right: 0; width: 50vw; height: 50vh" controls></img>
|
||||||
|
<canvas id="output" style="position: fixed; bottom: 0; left: 0; height: 50vh"></canvas>
|
||||||
|
<canvas id="merge" style="position: fixed; bottom: 0; right: 0; height: 50vh"></canvas>
|
||||||
|
</div>
|
||||||
|
</main>
|
||||||
|
<footer>
|
||||||
|
<div id="footer" class="footer"></div>
|
||||||
|
</footer>
|
||||||
|
<aside>
|
||||||
|
<div id="aside" class="aside"></div>
|
||||||
|
</aside>
|
||||||
|
</body>
|
||||||
|
</html>
|
|
@ -0,0 +1,99 @@
|
||||||
|
/**
|
||||||
|
* Human demo for browsers
|
||||||
|
* @default Human Library
|
||||||
|
* @summary <https://github.com/vladmandic/human>
|
||||||
|
* @author <https://github.com/vladmandic>
|
||||||
|
* @copyright <https://github.com/vladmandic>
|
||||||
|
* @license MIT
|
||||||
|
*/
|
||||||
|
|
||||||
|
import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
|
||||||
|
|
||||||
|
const humanConfig = { // user configuration for human, used to fine-tune behavior
|
||||||
|
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
|
||||||
|
filter: { enabled: true, equalization: false, flip: false },
|
||||||
|
face: { enabled: false },
|
||||||
|
body: { enabled: false },
|
||||||
|
hand: { enabled: false },
|
||||||
|
object: { enabled: false },
|
||||||
|
gesture: { enabled: false },
|
||||||
|
segmentation: {
|
||||||
|
enabled: true,
|
||||||
|
modelPath: 'rvm.json', // can use rvm, selfie or meet
|
||||||
|
ratio: 0.5,
|
||||||
|
mode: 'default',
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
const backgroundImage = '../../samples/in/background.jpg';
|
||||||
|
|
||||||
|
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
|
||||||
|
|
||||||
|
const log = (...msg) => console.log(...msg); // eslint-disable-line no-console
|
||||||
|
|
||||||
|
async function main() {
|
||||||
|
// gather dom elements
|
||||||
|
const dom = {
|
||||||
|
background: document.getElementById('background'),
|
||||||
|
webcam: document.getElementById('webcam'),
|
||||||
|
output: document.getElementById('output'),
|
||||||
|
merge: document.getElementById('merge'),
|
||||||
|
mode: document.getElementById('mode'),
|
||||||
|
composite: document.getElementById('composite'),
|
||||||
|
ratio: document.getElementById('ratio'),
|
||||||
|
fps: document.getElementById('fps'),
|
||||||
|
};
|
||||||
|
// set defaults
|
||||||
|
dom.fps.innerText = 'initializing';
|
||||||
|
dom.ratio.valueAsNumber = human.config.segmentation.ratio;
|
||||||
|
dom.background.src = backgroundImage;
|
||||||
|
dom.composite.innerHTML = ['source-atop', 'color', 'color-burn', 'color-dodge', 'copy', 'darken', 'destination-atop', 'destination-in', 'destination-out', 'destination-over', 'difference', 'exclusion', 'hard-light', 'hue', 'lighten', 'lighter', 'luminosity', 'multiply', 'overlay', 'saturation', 'screen', 'soft-light', 'source-in', 'source-out', 'source-over', 'xor'].map((gco) => `<option value="${gco}">${gco}</option>`).join(''); // eslint-disable-line max-len
|
||||||
|
const ctxMerge = dom.merge.getContext('2d');
|
||||||
|
|
||||||
|
log('human version:', human.version, '| tfjs version:', human.tf.version['tfjs-core']);
|
||||||
|
log('platform:', human.env.platform, '| agent:', human.env.agent);
|
||||||
|
await human.load(); // preload all models
|
||||||
|
log('backend:', human.tf.getBackend(), '| available:', human.env.backends);
|
||||||
|
log('models stats:', human.models.stats());
|
||||||
|
log('models loaded:', human.models.loaded());
|
||||||
|
await human.warmup(); // warmup function to initialize backend for future faster detection
|
||||||
|
const numTensors = human.tf.engine().state.numTensors;
|
||||||
|
|
||||||
|
// initialize webcam
|
||||||
|
dom.webcam.onplay = () => { // start processing on video play
|
||||||
|
log('start processing');
|
||||||
|
dom.output.width = human.webcam.width;
|
||||||
|
dom.output.height = human.webcam.height;
|
||||||
|
dom.merge.width = human.webcam.width;
|
||||||
|
dom.merge.height = human.webcam.height;
|
||||||
|
loop(); // eslint-disable-line no-use-before-define
|
||||||
|
};
|
||||||
|
|
||||||
|
await human.webcam.start({ element: dom.webcam, crop: true, width: window.innerWidth / 2, height: window.innerHeight / 2 }); // use human webcam helper methods and associate webcam stream with a dom element
|
||||||
|
if (!human.webcam.track) dom.fps.innerText = 'webcam error';
|
||||||
|
|
||||||
|
// processing loop
|
||||||
|
async function loop() {
|
||||||
|
if (!human.webcam.element || human.webcam.paused) return; // check if webcam is valid and playing
|
||||||
|
human.config.segmentation.mode = dom.mode.value; // get segmentation mode from ui
|
||||||
|
human.config.segmentation.ratio = dom.ratio.valueAsNumber; // get segmentation downsample ratio from ui
|
||||||
|
const t0 = Date.now();
|
||||||
|
const rgba = await human.segmentation(human.webcam.element, human.config); // run model and process results
|
||||||
|
const t1 = Date.now();
|
||||||
|
if (!rgba) {
|
||||||
|
dom.fps.innerText = 'error';
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
dom.fps.innerText = `fps: ${Math.round(10000 / (t1 - t0)) / 10}`; // mark performance
|
||||||
|
human.draw.tensor(rgba, dom.output); // draw raw output
|
||||||
|
human.tf.dispose(rgba); // dispose tensors
|
||||||
|
ctxMerge.globalCompositeOperation = 'source-over';
|
||||||
|
ctxMerge.drawImage(dom.background, 0, 0); // draw original video to first stacked canvas
|
||||||
|
ctxMerge.globalCompositeOperation = dom.composite.value;
|
||||||
|
ctxMerge.drawImage(dom.output, 0, 0); // draw processed output to second stacked canvas
|
||||||
|
if (numTensors !== human.tf.engine().state.numTensors) log({ leak: human.tf.engine().state.numTensors - numTensors }); // check for memory leaks
|
||||||
|
requestAnimationFrame(loop);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
window.onload = main;
|
|
@ -0,0 +1,28 @@
|
||||||
|
## Tracker
|
||||||
|
|
||||||
|
### Based on
|
||||||
|
|
||||||
|
<https://github.com/opendatacam/node-moving-things-tracker>
|
||||||
|
|
||||||
|
### Build
|
||||||
|
|
||||||
|
- remove reference to `lodash`:
|
||||||
|
> `isEqual` in <tracker.js>
|
||||||
|
- replace external lib:
|
||||||
|
> curl https://raw.githubusercontent.com/ubilabs/kd-tree-javascript/master/kdTree.js -o lib/kdTree-min.js
|
||||||
|
- build with `esbuild`:
|
||||||
|
> node_modules/.bin/esbuild --bundle tracker.js --format=esm --platform=browser --target=esnext --keep-names --tree-shaking=false --analyze --outfile=/home/vlado/dev/human/demo/tracker/tracker.js --banner:js="/* eslint-disable */"
|
||||||
|
|
||||||
|
### Usage
|
||||||
|
|
||||||
|
computeDistance(item1, item2)
|
||||||
|
disableKeepInMemory()
|
||||||
|
enableKeepInMemory()
|
||||||
|
getAllTrackedItems()
|
||||||
|
getJSONDebugOfTrackedItems(roundInt = true)
|
||||||
|
getJSONOfAllTrackedItems()
|
||||||
|
getJSONOfTrackedItems(roundInt = true)
|
||||||
|
getTrackedItemsInMOTFormat(frameNb)
|
||||||
|
reset()
|
||||||
|
setParams(newParams)
|
||||||
|
updateTrackedItemsWithNewFrame(detectionsOfThisFrame, frameNb)
|
|
@ -0,0 +1,65 @@
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8">
|
||||||
|
<title>Human</title>
|
||||||
|
<meta name="viewport" content="width=device-width" id="viewport">
|
||||||
|
<meta name="keywords" content="Human">
|
||||||
|
<meta name="application-name" content="Human">
|
||||||
|
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||||
|
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||||
|
<meta name="theme-color" content="#000000">
|
||||||
|
<link rel="manifest" href="../manifest.webmanifest">
|
||||||
|
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
|
||||||
|
<link rel="apple-touch-icon" href="../../assets/icon.png">
|
||||||
|
<script src="./index.js" type="module"></script>
|
||||||
|
<style>
|
||||||
|
html { font-family: 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
|
||||||
|
body { margin: 0; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
|
||||||
|
body::-webkit-scrollbar { display: none; }
|
||||||
|
input[type="file"] { font-family: 'Segoe UI'; font-size: 14px; font-variant: small-caps; }
|
||||||
|
::-webkit-file-upload-button { background: #333333; color: white; border: 0; border-radius: 0; padding: 6px 16px; box-shadow: 4px 4px 4px #222222; font-family: 'Segoe UI'; font-size: 14px; font-variant: small-caps; }
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<div style="display: flex">
|
||||||
|
<video id="video" playsinline style="width: 25vw" controls controlslist="nofullscreen nodownload noremoteplayback" disablepictureinpicture loop></video>
|
||||||
|
<canvas id="canvas" style="width: 75vw"></canvas>
|
||||||
|
</div>
|
||||||
|
<div class="uploader" style="padding: 8px">
|
||||||
|
<input type="file" name="inputvideo" id="inputvideo" accept="video/*"></input>
|
||||||
|
<input type="checkbox" id="interpolation" name="interpolation"></input>
|
||||||
|
<label for="tracker">interpolation</label>
|
||||||
|
</div>
|
||||||
|
<form id="config" style="padding: 8px; line-height: 1.6rem;">
|
||||||
|
tracker |
|
||||||
|
<input type="checkbox" id="tracker" name="tracker" checked></input>
|
||||||
|
<label for="tracker">enabled</label> |
|
||||||
|
<input type="checkbox" id="keepInMemory" name="keepInMemory"></input>
|
||||||
|
<label for="keepInMemory">keepInMemory</label> |
|
||||||
|
<br>
|
||||||
|
tracker source |
|
||||||
|
<input type="radio" id="box-face" name="box" value="face" checked>
|
||||||
|
<label for="box-face">face</label> |
|
||||||
|
<input type="radio" id="box-body" name="box" value="body">
|
||||||
|
<label for="box-face">body</label> |
|
||||||
|
<input type="radio" id="box-object" name="box" value="object">
|
||||||
|
<label for="box-face">object</label> |
|
||||||
|
<br>
|
||||||
|
tracker config |
|
||||||
|
<input type="range" id="unMatchedFramesTolerance" name="unMatchedFramesTolerance" min="0" max="300" step="1", value="60"></input>
|
||||||
|
<label for="unMatchedFramesTolerance">unMatchedFramesTolerance</label> |
|
||||||
|
<input type="range" id="iouLimit" name="unMatchedFramesTolerance" min="0" max="1" step="0.01", value="0.1"></input>
|
||||||
|
<label for="iouLimit">iouLimit</label> |
|
||||||
|
<input type="range" id="distanceLimit" name="unMatchedFramesTolerance" min="0" max="1" step="0.01", value="0.1"></input>
|
||||||
|
<label for="distanceLimit">distanceLimit</label> |
|
||||||
|
<input type="radio" id="matchingAlgorithm-kdTree" name="matchingAlgorithm" value="kdTree" checked>
|
||||||
|
<label for="matchingAlgorithm-kdTree">kdTree</label> |
|
||||||
|
<input type="radio" id="matchingAlgorithm-munkres" name="matchingAlgorithm" value="munkres">
|
||||||
|
<label for="matchingAlgorithm-kdTree">munkres</label> |
|
||||||
|
</form>
|
||||||
|
<pre id="status" style="position: absolute; top: 12px; right: 20px; background-color: grey; padding: 8px; box-shadow: 2px 2px black"></pre>
|
||||||
|
<pre id="log" style="padding: 8px"></pre>
|
||||||
|
<div id="performance" style="position: absolute; bottom: 0; width: 100%; padding: 8px; font-size: 0.8rem;"></div>
|
||||||
|
</body>
|
||||||
|
</html>
|
|
@ -0,0 +1,208 @@
|
||||||
|
/**
|
||||||
|
* Human demo for browsers
|
||||||
|
* @default Human Library
|
||||||
|
* @summary <https://github.com/vladmandic/human>
|
||||||
|
* @author <https://github.com/vladmandic>
|
||||||
|
* @copyright <https://github.com/vladmandic>
|
||||||
|
* @license MIT
|
||||||
|
*/
|
||||||
|
|
||||||
|
import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
|
||||||
|
import tracker from './tracker.js';
|
||||||
|
|
||||||
|
const humanConfig: Partial<H.Config> = { // user configuration for human, used to fine-tune behavior
|
||||||
|
debug: true,
|
||||||
|
backend: 'webgl',
|
||||||
|
// cacheSensitivity: 0,
|
||||||
|
// cacheModels: false,
|
||||||
|
// warmup: 'none',
|
||||||
|
modelBasePath: 'https://vladmandic.github.io/human-models/models',
|
||||||
|
filter: { enabled: true, equalization: false, flip: false },
|
||||||
|
face: {
|
||||||
|
enabled: true,
|
||||||
|
detector: { rotation: false, maxDetected: 10, minConfidence: 0.3 },
|
||||||
|
mesh: { enabled: true },
|
||||||
|
attention: { enabled: false },
|
||||||
|
iris: { enabled: false },
|
||||||
|
description: { enabled: false },
|
||||||
|
emotion: { enabled: false },
|
||||||
|
antispoof: { enabled: false },
|
||||||
|
liveness: { enabled: false },
|
||||||
|
},
|
||||||
|
body: { enabled: false, maxDetected: 6, modelPath: 'movenet-multipose.json' },
|
||||||
|
hand: { enabled: false },
|
||||||
|
object: { enabled: false, maxDetected: 10 },
|
||||||
|
segmentation: { enabled: false },
|
||||||
|
gesture: { enabled: false },
|
||||||
|
};
|
||||||
|
|
||||||
|
interface TrackerConfig {
|
||||||
|
unMatchedFramesTolerance: number, // number of frame when an object is not matched before considering it gone; ignored if fastDelete is set
|
||||||
|
iouLimit: number, // exclude things from beeing matched if their IOU less than; 1 means total overlap; 0 means no overlap
|
||||||
|
fastDelete: boolean, // remove new objects immediately if they could not be matched in the next frames; if set, ignores unMatchedFramesTolerance
|
||||||
|
distanceLimit: number, // distance limit for matching; if values need to be excluded from matching set their distance to something greater than the distance limit
|
||||||
|
matchingAlgorithm: 'kdTree' | 'munkres', // algorithm used to match tracks with new detections
|
||||||
|
}
|
||||||
|
|
||||||
|
interface TrackerResult {
|
||||||
|
id: number,
|
||||||
|
confidence: number,
|
||||||
|
bearing: number,
|
||||||
|
isZombie: boolean,
|
||||||
|
name: string,
|
||||||
|
x: number,
|
||||||
|
y: number,
|
||||||
|
w: number,
|
||||||
|
h: number,
|
||||||
|
}
|
||||||
|
|
||||||
|
const trackerConfig: TrackerConfig = {
|
||||||
|
unMatchedFramesTolerance: 100,
|
||||||
|
iouLimit: 0.05,
|
||||||
|
fastDelete: false,
|
||||||
|
distanceLimit: 1e4,
|
||||||
|
matchingAlgorithm: 'kdTree',
|
||||||
|
};
|
||||||
|
|
||||||
|
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
|
||||||
|
|
||||||
|
const dom = { // grab instances of dom objects so we dont have to look them up later
|
||||||
|
video: document.getElementById('video') as HTMLVideoElement,
|
||||||
|
canvas: document.getElementById('canvas') as HTMLCanvasElement,
|
||||||
|
log: document.getElementById('log') as HTMLPreElement,
|
||||||
|
fps: document.getElementById('status') as HTMLPreElement,
|
||||||
|
tracker: document.getElementById('tracker') as HTMLInputElement,
|
||||||
|
interpolation: document.getElementById('interpolation') as HTMLInputElement,
|
||||||
|
config: document.getElementById('config') as HTMLFormElement,
|
||||||
|
ctx: (document.getElementById('canvas') as HTMLCanvasElement).getContext('2d') as CanvasRenderingContext2D,
|
||||||
|
};
|
||||||
|
const timestamp = { detect: 0, draw: 0, tensors: 0, start: 0 }; // holds information used to calculate performance and possible memory leaks
|
||||||
|
const fps = { detectFPS: 0, drawFPS: 0, frames: 0, averageMs: 0 }; // holds calculated fps information for both detect and screen refresh
|
||||||
|
|
||||||
|
const log = (...msg) => { // helper method to output messages
|
||||||
|
dom.log.innerText += msg.join(' ') + '\n';
|
||||||
|
console.log(...msg); // eslint-disable-line no-console
|
||||||
|
};
|
||||||
|
const status = (msg) => dom.fps.innerText = msg; // print status element
|
||||||
|
|
||||||
|
async function detectionLoop() { // main detection loop
|
||||||
|
if (!dom.video.paused && dom.video.readyState >= 2) {
|
||||||
|
if (timestamp.start === 0) timestamp.start = human.now();
|
||||||
|
// log('profiling data:', await human.profile(dom.video));
|
||||||
|
await human.detect(dom.video, humanConfig); // actual detection; were not capturing output in a local variable as it can also be reached via human.result
|
||||||
|
const tensors = human.tf.memory().numTensors; // check current tensor usage for memory leaks
|
||||||
|
if (tensors - timestamp.tensors !== 0) log('allocated tensors:', tensors - timestamp.tensors); // printed on start and each time there is a tensor leak
|
||||||
|
timestamp.tensors = tensors;
|
||||||
|
fps.detectFPS = Math.round(1000 * 1000 / (human.now() - timestamp.detect)) / 1000;
|
||||||
|
fps.frames++;
|
||||||
|
fps.averageMs = Math.round(1000 * (human.now() - timestamp.start) / fps.frames) / 1000;
|
||||||
|
}
|
||||||
|
timestamp.detect = human.now();
|
||||||
|
requestAnimationFrame(detectionLoop); // start new frame immediately
|
||||||
|
}
|
||||||
|
|
||||||
|
function drawLoop() { // main screen refresh loop
|
||||||
|
if (!dom.video.paused && dom.video.readyState >= 2) {
|
||||||
|
const res: H.Result = dom.interpolation.checked ? human.next(human.result) : human.result; // interpolate results if enabled
|
||||||
|
let tracking: H.FaceResult[] | H.BodyResult[] | H.ObjectResult[] = [];
|
||||||
|
if (human.config.face.enabled) tracking = res.face;
|
||||||
|
else if (human.config.body.enabled) tracking = res.body;
|
||||||
|
else if (human.config.object.enabled) tracking = res.object;
|
||||||
|
else log('unknown object type');
|
||||||
|
let data: TrackerResult[] = [];
|
||||||
|
if (dom.tracker.checked) {
|
||||||
|
const items = tracking.map((obj) => ({
|
||||||
|
x: obj.box[0] + obj.box[2] / 2,
|
||||||
|
y: obj.box[1] + obj.box[3] / 2,
|
||||||
|
w: obj.box[2],
|
||||||
|
h: obj.box[3],
|
||||||
|
name: obj.label || (human.config.face.enabled ? 'face' : 'body'),
|
||||||
|
confidence: obj.score,
|
||||||
|
}));
|
||||||
|
tracker.updateTrackedItemsWithNewFrame(items, fps.frames);
|
||||||
|
data = tracker.getJSONOfTrackedItems(true) as TrackerResult[];
|
||||||
|
}
|
||||||
|
human.draw.canvas(dom.video, dom.canvas); // copy input video frame to output canvas
|
||||||
|
for (let i = 0; i < tracking.length; i++) {
|
||||||
|
// @ts-ignore
|
||||||
|
const name = tracking[i].label || (human.config.face.enabled ? 'face' : 'body');
|
||||||
|
dom.ctx.strokeRect(tracking[i].box[0], tracking[i].box[1], tracking[i].box[1], tracking[i].box[2]);
|
||||||
|
dom.ctx.fillText(`id: ${tracking[i].id} ${Math.round(100 * tracking[i].score)}% ${name}`, tracking[i].box[0] + 4, tracking[i].box[1] + 16);
|
||||||
|
if (data[i]) {
|
||||||
|
dom.ctx.fillText(`t: ${data[i].id} ${Math.round(100 * data[i].confidence)}% ${data[i].name} ${data[i].isZombie ? 'zombie' : ''}`, tracking[i].box[0] + 4, tracking[i].box[1] + 34);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const now = human.now();
|
||||||
|
fps.drawFPS = Math.round(1000 * 1000 / (now - timestamp.draw)) / 1000;
|
||||||
|
timestamp.draw = now;
|
||||||
|
status(dom.video.paused ? 'paused' : `fps: ${fps.detectFPS.toFixed(1).padStart(5, ' ')} detect | ${fps.drawFPS.toFixed(1).padStart(5, ' ')} draw`); // write status
|
||||||
|
setTimeout(drawLoop, 30); // use to slow down refresh from max refresh rate to target of 30 fps
|
||||||
|
}
|
||||||
|
|
||||||
|
async function handleVideo(file: File) {
|
||||||
|
const url = URL.createObjectURL(file);
|
||||||
|
dom.video.src = url;
|
||||||
|
await dom.video.play();
|
||||||
|
log('loaded video:', file.name, 'resolution:', [dom.video.videoWidth, dom.video.videoHeight], 'duration:', dom.video.duration);
|
||||||
|
dom.canvas.width = dom.video.videoWidth;
|
||||||
|
dom.canvas.height = dom.video.videoHeight;
|
||||||
|
dom.ctx.strokeStyle = 'white';
|
||||||
|
dom.ctx.fillStyle = 'white';
|
||||||
|
dom.ctx.font = '16px Segoe UI';
|
||||||
|
dom.video.playbackRate = 0.25;
|
||||||
|
}
|
||||||
|
|
||||||
|
function initInput() {
|
||||||
|
document.body.addEventListener('dragenter', (evt) => evt.preventDefault());
|
||||||
|
document.body.addEventListener('dragleave', (evt) => evt.preventDefault());
|
||||||
|
document.body.addEventListener('dragover', (evt) => evt.preventDefault());
|
||||||
|
document.body.addEventListener('drop', async (evt) => {
|
||||||
|
evt.preventDefault();
|
||||||
|
if (evt.dataTransfer) evt.dataTransfer.dropEffect = 'copy';
|
||||||
|
const file = evt.dataTransfer?.files?.[0];
|
||||||
|
if (file) await handleVideo(file);
|
||||||
|
log(dom.video.readyState);
|
||||||
|
});
|
||||||
|
(document.getElementById('inputvideo') as HTMLInputElement).onchange = async (evt) => {
|
||||||
|
evt.preventDefault();
|
||||||
|
const file = evt.target?.['files']?.[0];
|
||||||
|
if (file) await handleVideo(file);
|
||||||
|
};
|
||||||
|
dom.config.onchange = () => {
|
||||||
|
trackerConfig.distanceLimit = (document.getElementById('distanceLimit') as HTMLInputElement).valueAsNumber;
|
||||||
|
trackerConfig.iouLimit = (document.getElementById('iouLimit') as HTMLInputElement).valueAsNumber;
|
||||||
|
trackerConfig.unMatchedFramesTolerance = (document.getElementById('unMatchedFramesTolerance') as HTMLInputElement).valueAsNumber;
|
||||||
|
trackerConfig.unMatchedFramesTolerance = (document.getElementById('unMatchedFramesTolerance') as HTMLInputElement).valueAsNumber;
|
||||||
|
trackerConfig.matchingAlgorithm = (document.getElementById('matchingAlgorithm-kdTree') as HTMLInputElement).checked ? 'kdTree' : 'munkres';
|
||||||
|
tracker.setParams(trackerConfig);
|
||||||
|
if ((document.getElementById('keepInMemory') as HTMLInputElement).checked) tracker.enableKeepInMemory();
|
||||||
|
else tracker.disableKeepInMemory();
|
||||||
|
tracker.reset();
|
||||||
|
log('tracker config change', JSON.stringify(trackerConfig));
|
||||||
|
humanConfig.face!.enabled = (document.getElementById('box-face') as HTMLInputElement).checked; // eslint-disable-line @typescript-eslint/no-non-null-assertion
|
||||||
|
humanConfig.body!.enabled = (document.getElementById('box-body') as HTMLInputElement).checked; // eslint-disable-line @typescript-eslint/no-non-null-assertion
|
||||||
|
humanConfig.object!.enabled = (document.getElementById('box-object') as HTMLInputElement).checked; // eslint-disable-line @typescript-eslint/no-non-null-assertion
|
||||||
|
};
|
||||||
|
dom.tracker.onchange = (evt) => {
|
||||||
|
log('tracker', (evt.target as HTMLInputElement).checked ? 'enabled' : 'disabled');
|
||||||
|
tracker.setParams(trackerConfig);
|
||||||
|
tracker.reset();
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
async function main() { // main entry point
|
||||||
|
log('human version:', human.version, '| tfjs version:', human.tf.version['tfjs-core']);
|
||||||
|
log('platform:', human.env.platform, '| agent:', human.env.agent);
|
||||||
|
status('loading...');
|
||||||
|
await human.load(); // preload all models
|
||||||
|
log('backend:', human.tf.getBackend(), '| available:', human.env.backends);
|
||||||
|
log('models loaded:', human.models.loaded());
|
||||||
|
status('initializing...');
|
||||||
|
await human.warmup(); // warmup function to initialize backend for future faster detection
|
||||||
|
initInput(); // initialize input
|
||||||
|
await detectionLoop(); // start detection loop
|
||||||
|
drawLoop(); // start draw loop
|
||||||
|
}
|
||||||
|
|
||||||
|
window.onload = main;
|
|
@ -21,7 +21,7 @@
|
||||||
</style>
|
</style>
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
<canvas id="canvas" style="margin: 0 auto; width: 100%"></canvas>
|
<canvas id="canvas" style="margin: 0 auto; width: 100vw"></canvas>
|
||||||
<video id="video" playsinline style="display: none"></video>
|
<video id="video" playsinline style="display: none"></video>
|
||||||
<pre id="status" style="position: absolute; top: 12px; right: 20px; background-color: grey; padding: 8px; box-shadow: 2px 2px black"></pre>
|
<pre id="status" style="position: absolute; top: 12px; right: 20px; background-color: grey; padding: 8px; box-shadow: 2px 2px black"></pre>
|
||||||
<pre id="log" style="padding: 8px"></pre>
|
<pre id="log" style="padding: 8px"></pre>
|
||||||
|
|
|
@ -4,106 +4,6 @@
|
||||||
author: <https://github.com/vladmandic>'
|
author: <https://github.com/vladmandic>'
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// demo/typescript/index.ts
|
import*as m from"../../dist/human.esm.js";var v=1920,b={debug:!0,backend:"webgl",modelBasePath:"https://vladmandic.github.io/human-models/models/",filter:{enabled:!0,equalization:!1,flip:!1},face:{enabled:!0,detector:{rotation:!1},mesh:{enabled:!0},attention:{enabled:!1},iris:{enabled:!0},description:{enabled:!0},emotion:{enabled:!0},antispoof:{enabled:!0},liveness:{enabled:!0}},body:{enabled:!1},hand:{enabled:!1},object:{enabled:!1},segmentation:{enabled:!1},gesture:{enabled:!0}},e=new m.Human(b);e.env.perfadd=!1;e.draw.options.font='small-caps 18px "Lato"';e.draw.options.lineHeight=20;e.draw.options.drawPoints=!0;var a={video:document.getElementById("video"),canvas:document.getElementById("canvas"),log:document.getElementById("log"),fps:document.getElementById("status"),perf:document.getElementById("performance")},n={detect:0,draw:0,tensors:0,start:0},s={detectFPS:0,drawFPS:0,frames:0,averageMs:0},o=(...t)=>{a.log.innerText+=t.join(" ")+`
|
||||||
import { Human } from "../../dist/human.esm.js";
|
`,console.log(...t)},i=t=>a.fps.innerText=t,g=t=>a.perf.innerText="tensors:"+e.tf.memory().numTensors.toString()+" | performance: "+JSON.stringify(t).replace(/"|{|}/g,"").replace(/,/g," | ");async function f(){if(!a.video.paused){n.start===0&&(n.start=e.now()),await e.detect(a.video);let t=e.tf.memory().numTensors;t-n.tensors!==0&&o("allocated tensors:",t-n.tensors),n.tensors=t,s.detectFPS=Math.round(1e3*1e3/(e.now()-n.detect))/1e3,s.frames++,s.averageMs=Math.round(1e3*(e.now()-n.start)/s.frames)/1e3,s.frames%100===0&&!a.video.paused&&o("performance",{...s,tensors:n.tensors})}n.detect=e.now(),requestAnimationFrame(f)}async function u(){var d,r,c;if(!a.video.paused){let l=e.next(e.result),w=await e.image(a.video);e.draw.canvas(w.canvas,a.canvas);let p={bodyLabels:`person confidence [score] and ${(c=(r=(d=e.result)==null?void 0:d.body)==null?void 0:r[0])==null?void 0:c.keypoints.length} keypoints`};await e.draw.all(a.canvas,l,p),g(l.performance)}let t=e.now();s.drawFPS=Math.round(1e3*1e3/(t-n.draw))/1e3,n.draw=t,i(a.video.paused?"paused":`fps: ${s.detectFPS.toFixed(1).padStart(5," ")} detect | ${s.drawFPS.toFixed(1).padStart(5," ")} draw`),setTimeout(u,30)}async function h(){let d=(await e.webcam.enumerate())[0].deviceId,r=await e.webcam.start({element:a.video,crop:!1,width:v,id:d});o(r),a.canvas.width=e.webcam.width,a.canvas.height=e.webcam.height,a.canvas.onclick=async()=>{e.webcam.paused?await e.webcam.play():e.webcam.pause()}}async function y(){o("human version:",e.version,"| tfjs version:",e.tf.version["tfjs-core"]),o("platform:",e.env.platform,"| agent:",e.env.agent),i("loading..."),await e.load(),o("backend:",e.tf.getBackend(),"| available:",e.env.backends),o("models stats:",e.models.stats()),o("models loaded:",e.models.loaded()),o("environment",e.env),i("initializing..."),await e.warmup(),await h(),await f(),await u()}window.onload=y;
|
||||||
var humanConfig = {
|
|
||||||
modelBasePath: "../../models",
|
|
||||||
filter: { enabled: true, equalization: false },
|
|
||||||
face: { enabled: true, detector: { rotation: false }, mesh: { enabled: true }, iris: { enabled: true }, description: { enabled: true }, emotion: { enabled: true } },
|
|
||||||
body: { enabled: true },
|
|
||||||
hand: { enabled: true },
|
|
||||||
object: { enabled: false },
|
|
||||||
gesture: { enabled: true }
|
|
||||||
};
|
|
||||||
var human = new Human(humanConfig);
|
|
||||||
human.env["perfadd"] = false;
|
|
||||||
human.draw.options.font = 'small-caps 18px "Lato"';
|
|
||||||
human.draw.options.lineHeight = 20;
|
|
||||||
var dom = {
|
|
||||||
video: document.getElementById("video"),
|
|
||||||
canvas: document.getElementById("canvas"),
|
|
||||||
log: document.getElementById("log"),
|
|
||||||
fps: document.getElementById("status"),
|
|
||||||
perf: document.getElementById("performance")
|
|
||||||
};
|
|
||||||
var timestamp = { detect: 0, draw: 0, tensors: 0 };
|
|
||||||
var fps = { detect: 0, draw: 0 };
|
|
||||||
var log = (...msg) => {
|
|
||||||
dom.log.innerText += msg.join(" ") + "\n";
|
|
||||||
console.log(...msg);
|
|
||||||
};
|
|
||||||
var status = (msg) => dom.fps.innerText = msg;
|
|
||||||
var perf = (msg) => dom.perf.innerText = "tensors:" + human.tf.memory().numTensors + " | performance: " + JSON.stringify(msg).replace(/"|{|}/g, "").replace(/,/g, " | ");
|
|
||||||
async function webCam() {
|
|
||||||
status("starting webcam...");
|
|
||||||
const options = { audio: false, video: { facingMode: "user", resizeMode: "none", width: { ideal: document.body.clientWidth } } };
|
|
||||||
const stream = await navigator.mediaDevices.getUserMedia(options);
|
|
||||||
const ready = new Promise((resolve) => {
|
|
||||||
dom.video.onloadeddata = () => resolve(true);
|
|
||||||
});
|
|
||||||
dom.video.srcObject = stream;
|
|
||||||
dom.video.play();
|
|
||||||
await ready;
|
|
||||||
dom.canvas.width = dom.video.videoWidth;
|
|
||||||
dom.canvas.height = dom.video.videoHeight;
|
|
||||||
const track = stream.getVideoTracks()[0];
|
|
||||||
const capabilities = track.getCapabilities ? track.getCapabilities() : "";
|
|
||||||
const settings = track.getSettings ? track.getSettings() : "";
|
|
||||||
const constraints = track.getConstraints ? track.getConstraints() : "";
|
|
||||||
log("video:", dom.video.videoWidth, dom.video.videoHeight, track.label, { stream, track, settings, constraints, capabilities });
|
|
||||||
dom.canvas.onclick = () => {
|
|
||||||
if (dom.video.paused)
|
|
||||||
dom.video.play();
|
|
||||||
else
|
|
||||||
dom.video.pause();
|
|
||||||
};
|
|
||||||
}
|
|
||||||
async function detectionLoop() {
|
|
||||||
if (!dom.video.paused) {
|
|
||||||
await human.detect(dom.video);
|
|
||||||
const tensors = human.tf.memory().numTensors;
|
|
||||||
if (tensors - timestamp.tensors !== 0)
|
|
||||||
log("allocated tensors:", tensors - timestamp.tensors);
|
|
||||||
timestamp.tensors = tensors;
|
|
||||||
}
|
|
||||||
const now = human.now();
|
|
||||||
fps.detect = 1e3 / (now - timestamp.detect);
|
|
||||||
timestamp.detect = now;
|
|
||||||
requestAnimationFrame(detectionLoop);
|
|
||||||
}
|
|
||||||
async function drawLoop() {
|
|
||||||
if (!dom.video.paused) {
|
|
||||||
const interpolated = await human.next(human.result);
|
|
||||||
await human.draw.canvas(dom.video, dom.canvas);
|
|
||||||
await human.draw.all(dom.canvas, interpolated);
|
|
||||||
perf(interpolated.performance);
|
|
||||||
}
|
|
||||||
const now = human.now();
|
|
||||||
fps.draw = 1e3 / (now - timestamp.draw);
|
|
||||||
timestamp.draw = now;
|
|
||||||
status(dom.video.paused ? "paused" : `fps: ${fps.detect.toFixed(1).padStart(5, " ")} detect | ${fps.draw.toFixed(1).padStart(5, " ")} draw`);
|
|
||||||
setTimeout(drawLoop, 30);
|
|
||||||
}
|
|
||||||
async function main() {
|
|
||||||
log("human version:", human.version, "| tfjs version:", human.tf.version["tfjs-core"]);
|
|
||||||
log("platform:", human.env.platform, "| agent:", human.env.agent);
|
|
||||||
status("loading...");
|
|
||||||
await human.load();
|
|
||||||
log("backend:", human.tf.getBackend(), "| available:", human.env.backends);
|
|
||||||
log("loaded models:" + Object.values(human.models).filter((model) => model !== null).length);
|
|
||||||
status("initializing...");
|
|
||||||
await human.warmup();
|
|
||||||
await webCam();
|
|
||||||
await detectionLoop();
|
|
||||||
await drawLoop();
|
|
||||||
}
|
|
||||||
window.onload = main;
|
|
||||||
/**
|
|
||||||
* Human demo for browsers
|
|
||||||
* @default Human Library
|
|
||||||
* @summary <https://github.com/vladmandic/human>
|
|
||||||
* @author <https://github.com/vladmandic>
|
|
||||||
* @copyright <https://github.com/vladmandic>
|
|
||||||
* @license MIT
|
|
||||||
*/
|
|
||||||
//# sourceMappingURL=index.js.map
|
//# sourceMappingURL=index.js.map
|
||||||
|
|
|
@ -7,25 +7,34 @@
|
||||||
* @license MIT
|
* @license MIT
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { Human, Config } from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
|
import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
|
||||||
|
|
||||||
const humanConfig: Partial<Config> = { // user configuration for human, used to fine-tune behavior
|
const width = 1920; // used by webcam config as well as human maximum resultion // can be anything, but resolutions higher than 4k will disable internal optimizations
|
||||||
// backend: 'webgpu' as const,
|
|
||||||
// async: true,
|
const humanConfig: Partial<H.Config> = { // user configuration for human, used to fine-tune behavior
|
||||||
modelBasePath: '../../models',
|
debug: true,
|
||||||
filter: { enabled: true, equalization: false },
|
backend: 'webgl',
|
||||||
face: { enabled: true, detector: { rotation: false }, mesh: { enabled: true }, iris: { enabled: true }, description: { enabled: true }, emotion: { enabled: true } },
|
// cacheSensitivity: 0,
|
||||||
body: { enabled: true },
|
// cacheModels: false,
|
||||||
hand: { enabled: true },
|
// warmup: 'none',
|
||||||
|
// modelBasePath: '../../models',
|
||||||
|
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
|
||||||
|
filter: { enabled: true, equalization: false, flip: false },
|
||||||
|
face: { enabled: true, detector: { rotation: false }, mesh: { enabled: true }, attention: { enabled: false }, iris: { enabled: true }, description: { enabled: true }, emotion: { enabled: true }, antispoof: { enabled: true }, liveness: { enabled: true } },
|
||||||
|
body: { enabled: false },
|
||||||
|
hand: { enabled: false },
|
||||||
object: { enabled: false },
|
object: { enabled: false },
|
||||||
|
segmentation: { enabled: false },
|
||||||
gesture: { enabled: true },
|
gesture: { enabled: true },
|
||||||
};
|
};
|
||||||
|
|
||||||
const human = new Human(humanConfig); // create instance of human with overrides from user configuration
|
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
|
||||||
|
|
||||||
human.env['perfadd'] = false; // is performance data showing instant or total values
|
human.env.perfadd = false; // is performance data showing instant or total values
|
||||||
human.draw.options.font = 'small-caps 18px "Lato"'; // set font used to draw labels when using draw methods
|
human.draw.options.font = 'small-caps 18px "Lato"'; // set font used to draw labels when using draw methods
|
||||||
human.draw.options.lineHeight = 20;
|
human.draw.options.lineHeight = 20;
|
||||||
|
human.draw.options.drawPoints = true; // draw points on face mesh
|
||||||
|
// human.draw.options.fillPolygons = true;
|
||||||
|
|
||||||
const dom = { // grab instances of dom objects so we dont have to look them up later
|
const dom = { // grab instances of dom objects so we dont have to look them up later
|
||||||
video: document.getElementById('video') as HTMLVideoElement,
|
video: document.getElementById('video') as HTMLVideoElement,
|
||||||
|
@ -34,75 +43,72 @@ const dom = { // grab instances of dom objects so we dont have to look them up l
|
||||||
fps: document.getElementById('status') as HTMLPreElement,
|
fps: document.getElementById('status') as HTMLPreElement,
|
||||||
perf: document.getElementById('performance') as HTMLDivElement,
|
perf: document.getElementById('performance') as HTMLDivElement,
|
||||||
};
|
};
|
||||||
const timestamp = { detect: 0, draw: 0, tensors: 0 }; // holds information used to calculate performance and possible memory leaks
|
const timestamp = { detect: 0, draw: 0, tensors: 0, start: 0 }; // holds information used to calculate performance and possible memory leaks
|
||||||
const fps = { detect: 0, draw: 0 }; // holds calculated fps information for both detect and screen refresh
|
const fps = { detectFPS: 0, drawFPS: 0, frames: 0, averageMs: 0 }; // holds calculated fps information for both detect and screen refresh
|
||||||
|
|
||||||
const log = (...msg) => { // helper method to output messages
|
const log = (...msg) => { // helper method to output messages
|
||||||
dom.log.innerText += msg.join(' ') + '\n';
|
dom.log.innerText += msg.join(' ') + '\n';
|
||||||
// eslint-disable-next-line no-console
|
console.log(...msg); // eslint-disable-line no-console
|
||||||
console.log(...msg);
|
|
||||||
};
|
};
|
||||||
const status = (msg) => dom.fps.innerText = msg; // print status element
|
const status = (msg) => dom.fps.innerText = msg; // print status element
|
||||||
const perf = (msg) => dom.perf.innerText = 'tensors:' + human.tf.memory().numTensors + ' | performance: ' + JSON.stringify(msg).replace(/"|{|}/g, '').replace(/,/g, ' | '); // print performance element
|
const perf = (msg) => dom.perf.innerText = 'tensors:' + human.tf.memory().numTensors.toString() + ' | performance: ' + JSON.stringify(msg).replace(/"|{|}/g, '').replace(/,/g, ' | '); // print performance element
|
||||||
|
|
||||||
async function webCam() { // initialize webcam
|
|
||||||
status('starting webcam...');
|
|
||||||
// @ts-ignore resizeMode is not yet defined in tslib
|
|
||||||
const options: MediaStreamConstraints = { audio: false, video: { facingMode: 'user', resizeMode: 'none', width: { ideal: document.body.clientWidth } } };
|
|
||||||
const stream: MediaStream = await navigator.mediaDevices.getUserMedia(options);
|
|
||||||
const ready = new Promise((resolve) => { dom.video.onloadeddata = () => resolve(true); });
|
|
||||||
dom.video.srcObject = stream;
|
|
||||||
dom.video.play();
|
|
||||||
await ready;
|
|
||||||
dom.canvas.width = dom.video.videoWidth;
|
|
||||||
dom.canvas.height = dom.video.videoHeight;
|
|
||||||
const track: MediaStreamTrack = stream.getVideoTracks()[0];
|
|
||||||
const capabilities: MediaTrackCapabilities | string = track.getCapabilities ? track.getCapabilities() : '';
|
|
||||||
const settings: MediaTrackSettings | string = track.getSettings ? track.getSettings() : '';
|
|
||||||
const constraints: MediaTrackConstraints | string = track.getConstraints ? track.getConstraints() : '';
|
|
||||||
log('video:', dom.video.videoWidth, dom.video.videoHeight, track.label, { stream, track, settings, constraints, capabilities });
|
|
||||||
dom.canvas.onclick = () => { // pause when clicked on screen and resume on next click
|
|
||||||
if (dom.video.paused) dom.video.play();
|
|
||||||
else dom.video.pause();
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
async function detectionLoop() { // main detection loop
|
async function detectionLoop() { // main detection loop
|
||||||
if (!dom.video.paused) {
|
if (!dom.video.paused) {
|
||||||
// console.log('profiling data:', await human.profile(dom.video));
|
if (timestamp.start === 0) timestamp.start = human.now();
|
||||||
|
// log('profiling data:', await human.profile(dom.video));
|
||||||
await human.detect(dom.video); // actual detection; were not capturing output in a local variable as it can also be reached via human.result
|
await human.detect(dom.video); // actual detection; were not capturing output in a local variable as it can also be reached via human.result
|
||||||
const tensors = human.tf.memory().numTensors; // check current tensor usage for memory leaks
|
const tensors = human.tf.memory().numTensors; // check current tensor usage for memory leaks
|
||||||
if (tensors - timestamp.tensors !== 0) log('allocated tensors:', tensors - timestamp.tensors); // printed on start and each time there is a tensor leak
|
if (tensors - timestamp.tensors !== 0) log('allocated tensors:', tensors - timestamp.tensors); // printed on start and each time there is a tensor leak
|
||||||
timestamp.tensors = tensors;
|
timestamp.tensors = tensors;
|
||||||
|
fps.detectFPS = Math.round(1000 * 1000 / (human.now() - timestamp.detect)) / 1000;
|
||||||
|
fps.frames++;
|
||||||
|
fps.averageMs = Math.round(1000 * (human.now() - timestamp.start) / fps.frames) / 1000;
|
||||||
|
if (fps.frames % 100 === 0 && !dom.video.paused) log('performance', { ...fps, tensors: timestamp.tensors });
|
||||||
}
|
}
|
||||||
const now = human.now();
|
timestamp.detect = human.now();
|
||||||
fps.detect = 1000 / (now - timestamp.detect);
|
|
||||||
timestamp.detect = now;
|
|
||||||
requestAnimationFrame(detectionLoop); // start new frame immediately
|
requestAnimationFrame(detectionLoop); // start new frame immediately
|
||||||
}
|
}
|
||||||
|
|
||||||
async function drawLoop() { // main screen refresh loop
|
async function drawLoop() { // main screen refresh loop
|
||||||
if (!dom.video.paused) {
|
if (!dom.video.paused) {
|
||||||
const interpolated = await human.next(human.result); // smoothen result using last-known results
|
const interpolated = human.next(human.result); // smoothen result using last-known results
|
||||||
await human.draw.canvas(dom.video, dom.canvas); // draw canvas to screen
|
const processed = await human.image(dom.video); // get current video frame, but enhanced with human.filters
|
||||||
await human.draw.all(dom.canvas, interpolated); // draw labels, boxes, lines, etc.
|
human.draw.canvas(processed.canvas as HTMLCanvasElement, dom.canvas);
|
||||||
|
|
||||||
|
const opt: Partial<H.DrawOptions> = { bodyLabels: `person confidence [score] and ${human.result?.body?.[0]?.keypoints.length} keypoints` };
|
||||||
|
await human.draw.all(dom.canvas, interpolated, opt); // draw labels, boxes, lines, etc.
|
||||||
perf(interpolated.performance); // write performance data
|
perf(interpolated.performance); // write performance data
|
||||||
}
|
}
|
||||||
const now = human.now();
|
const now = human.now();
|
||||||
fps.draw = 1000 / (now - timestamp.draw);
|
fps.drawFPS = Math.round(1000 * 1000 / (now - timestamp.draw)) / 1000;
|
||||||
timestamp.draw = now;
|
timestamp.draw = now;
|
||||||
status(dom.video.paused ? 'paused' : `fps: ${fps.detect.toFixed(1).padStart(5, ' ')} detect | ${fps.draw.toFixed(1).padStart(5, ' ')} draw`); // write status
|
status(dom.video.paused ? 'paused' : `fps: ${fps.detectFPS.toFixed(1).padStart(5, ' ')} detect | ${fps.drawFPS.toFixed(1).padStart(5, ' ')} draw`); // write status
|
||||||
// requestAnimationFrame(drawLoop); // refresh at screen refresh rate
|
|
||||||
setTimeout(drawLoop, 30); // use to slow down refresh from max refresh rate to target of 30 fps
|
setTimeout(drawLoop, 30); // use to slow down refresh from max refresh rate to target of 30 fps
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async function webCam() {
|
||||||
|
const devices = await human.webcam.enumerate();
|
||||||
|
const id = devices[0].deviceId; // use first available video source
|
||||||
|
const webcamStatus = await human.webcam.start({ element: dom.video, crop: false, width, id }); // use human webcam helper methods and associate webcam stream with a dom element
|
||||||
|
log(webcamStatus);
|
||||||
|
dom.canvas.width = human.webcam.width;
|
||||||
|
dom.canvas.height = human.webcam.height;
|
||||||
|
dom.canvas.onclick = async () => { // pause when clicked on screen and resume on next click
|
||||||
|
if (human.webcam.paused) await human.webcam.play();
|
||||||
|
else human.webcam.pause();
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
async function main() { // main entry point
|
async function main() { // main entry point
|
||||||
log('human version:', human.version, '| tfjs version:', human.tf.version['tfjs-core']);
|
log('human version:', human.version, '| tfjs version:', human.tf.version['tfjs-core']);
|
||||||
log('platform:', human.env.platform, '| agent:', human.env.agent);
|
log('platform:', human.env.platform, '| agent:', human.env.agent);
|
||||||
status('loading...');
|
status('loading...');
|
||||||
await human.load(); // preload all models
|
await human.load(); // preload all models
|
||||||
log('backend:', human.tf.getBackend(), '| available:', human.env.backends);
|
log('backend:', human.tf.getBackend(), '| available:', human.env.backends);
|
||||||
log('loaded models:' + Object.values(human.models).filter((model) => model !== null).length);
|
log('models stats:', human.models.stats());
|
||||||
|
log('models loaded:', human.models.loaded());
|
||||||
|
log('environment', human.env);
|
||||||
status('initializing...');
|
status('initializing...');
|
||||||
await human.warmup(); // warmup function to initialize backend for future faster detection
|
await human.warmup(); // warmup function to initialize backend for future faster detection
|
||||||
await webCam(); // start webcam
|
await webCam(); // start webcam
|
||||||
|
|
|
@ -0,0 +1,58 @@
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8">
|
||||||
|
<title>Human</title>
|
||||||
|
<meta name="viewport" content="width=device-width" id="viewport">
|
||||||
|
<meta name="keywords" content="Human">
|
||||||
|
<meta name="description" content="Human: Demo; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||||
|
<link rel="manifest" href="../manifest.webmanifest">
|
||||||
|
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
|
||||||
|
<style>
|
||||||
|
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
|
||||||
|
body { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; margin: 0; background: black; color: white; overflow: hidden; width: 100vw; height: 100vh; }
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<canvas id="canvas" style="margin: 0 auto; width: 100%"></canvas>
|
||||||
|
<pre id="log" style="padding: 8px; position: fixed; bottom: 0"></pre>
|
||||||
|
<script type="module">
|
||||||
|
import * as H from '../../dist/human.esm.js'; // equivalent of import @vladmandic/Human
|
||||||
|
|
||||||
|
const humanConfig = { // user configuration for human, used to fine-tune behavior
|
||||||
|
modelBasePath: '../../models', // models can be loaded directly from cdn as well
|
||||||
|
filter: { enabled: true, equalization: true, flip: false },
|
||||||
|
face: { enabled: true, detector: { rotation: false }, mesh: { enabled: true }, attention: { enabled: false }, iris: { enabled: true }, description: { enabled: true }, emotion: { enabled: true } },
|
||||||
|
body: { enabled: true },
|
||||||
|
hand: { enabled: true },
|
||||||
|
gesture: { enabled: true },
|
||||||
|
object: { enabled: false },
|
||||||
|
segmentation: { enabled: false },
|
||||||
|
};
|
||||||
|
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
|
||||||
|
const canvas = document.getElementById('canvas'); // output canvas to draw both webcam and detection results
|
||||||
|
|
||||||
|
async function drawLoop() { // main screen refresh loop
|
||||||
|
const interpolated = human.next(); // get smoothened result using last-known results which are continously updated based on input webcam video
|
||||||
|
human.draw.canvas(human.webcam.element, canvas); // draw webcam video to screen canvas // better than using procesed image as this loop happens faster than processing loop
|
||||||
|
await human.draw.all(canvas, interpolated); // draw labels, boxes, lines, etc.
|
||||||
|
setTimeout(drawLoop, 30); // use to slow down refresh from max refresh rate to target of 1000/30 ~ 30 fps
|
||||||
|
}
|
||||||
|
|
||||||
|
async function main() { // main entry point
|
||||||
|
document.getElementById('log').innerHTML = `human version: ${human.version} | tfjs version: ${human.tf.version['tfjs-core']}<br>platform: ${human.env.platform} | agent ${human.env.agent}`;
|
||||||
|
await human.webcam.start({ crop: true }); // find webcam and start it
|
||||||
|
human.video(human.webcam.element); // instruct human to continously detect video frames
|
||||||
|
canvas.width = human.webcam.width; // set canvas resolution to input webcam native resolution
|
||||||
|
canvas.height = human.webcam.height;
|
||||||
|
canvas.onclick = async () => { // pause when clicked on screen and resume on next click
|
||||||
|
if (human.webcam.paused) await human.webcam.play();
|
||||||
|
else human.webcam.pause();
|
||||||
|
};
|
||||||
|
await drawLoop(); // start draw loop
|
||||||
|
}
|
||||||
|
|
||||||
|
window.onload = main;
|
||||||
|
</script>
|
||||||
|
</body>
|
||||||
|
</html>
|
|
@ -0,0 +1,25 @@
|
||||||
|
/* eslint-disable import/no-unresolved */
|
||||||
|
/* eslint-disable import/no-extraneous-dependencies */
|
||||||
|
|
||||||
|
export * from 'types/tfjs.esm';
|
||||||
|
|
||||||
|
export declare const version: {
|
||||||
|
'tfjs-core': string;
|
||||||
|
'tfjs-backend-cpu': string;
|
||||||
|
'tfjs-backend-webgl': string;
|
||||||
|
'tfjs-data': string;
|
||||||
|
'tfjs-layers': string;
|
||||||
|
'tfjs-converter': string;
|
||||||
|
tfjs: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
export * from '@tensorflow/tfjs-core';
|
||||||
|
export * from '@tensorflow/tfjs-converter';
|
||||||
|
export * from '@tensorflow/tfjs-data';
|
||||||
|
export * from '@tensorflow/tfjs-layers';
|
||||||
|
export * from '@tensorflow/tfjs-backend-cpu';
|
||||||
|
export * from '@tensorflow/tfjs-backend-wasm';
|
||||||
|
export * from '@tensorflow/tfjs-backend-webgl';
|
||||||
|
export * from '@tensorflow/tfjs-backend-webgpu';
|
||||||
|
export * from '@tensorflow/tfjs-node';
|
||||||
|
export * from '@tensorflow/tfjs-node-gpu';
|
|
@ -4,41 +4,4 @@
|
||||||
author: <https://github.com/vladmandic>'
|
author: <https://github.com/vladmandic>'
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// node_modules/.pnpm/@tensorflow+tfjs@3.12.0_seedrandom@3.0.5/node_modules/@tensorflow/tfjs/package.json
|
var e="4.22.0";var s="4.22.0";var t="4.22.0";var n="4.22.0";var r="4.22.0";var i="4.22.0";var h={tfjs:e,"tfjs-core":e,"tfjs-converter":s,"tfjs-backend-cpu":t,"tfjs-backend-webgl":n,"tfjs-backend-wasm":r,"tfjs-backend-webgpu":i};export{h as version};
|
||||||
var version = "3.12.0";
|
|
||||||
|
|
||||||
// node_modules/.pnpm/@tensorflow+tfjs-core@3.12.0/node_modules/@tensorflow/tfjs-core/package.json
|
|
||||||
var version2 = "3.12.0";
|
|
||||||
|
|
||||||
// node_modules/.pnpm/@tensorflow+tfjs-data@3.12.0_23101d5e73420f02a83504fb6376a2a9/node_modules/@tensorflow/tfjs-data/package.json
|
|
||||||
var version3 = "3.12.0";
|
|
||||||
|
|
||||||
// node_modules/.pnpm/@tensorflow+tfjs-layers@3.12.0_@tensorflow+tfjs-core@3.12.0/node_modules/@tensorflow/tfjs-layers/package.json
|
|
||||||
var version4 = "3.12.0";
|
|
||||||
|
|
||||||
// node_modules/.pnpm/@tensorflow+tfjs-converter@3.12.0_@tensorflow+tfjs-core@3.12.0/node_modules/@tensorflow/tfjs-converter/package.json
|
|
||||||
var version5 = "3.12.0";
|
|
||||||
|
|
||||||
// node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.12.0_@tensorflow+tfjs-core@3.12.0/node_modules/@tensorflow/tfjs-backend-cpu/package.json
|
|
||||||
var version6 = "3.12.0";
|
|
||||||
|
|
||||||
// node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.12.0_@tensorflow+tfjs-core@3.12.0/node_modules/@tensorflow/tfjs-backend-webgl/package.json
|
|
||||||
var version7 = "3.12.0";
|
|
||||||
|
|
||||||
// node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.12.0_@tensorflow+tfjs-core@3.12.0/node_modules/@tensorflow/tfjs-backend-wasm/package.json
|
|
||||||
var version8 = "3.12.0";
|
|
||||||
|
|
||||||
// tfjs/tf-version.ts
|
|
||||||
var version9 = {
|
|
||||||
tfjs: version,
|
|
||||||
"tfjs-core": version2,
|
|
||||||
"tfjs-data": version3,
|
|
||||||
"tfjs-layers": version4,
|
|
||||||
"tfjs-converter": version5,
|
|
||||||
"tfjs-backend-cpu": version6,
|
|
||||||
"tfjs-backend-webgl": version7,
|
|
||||||
"tfjs-backend-wasm": version8
|
|
||||||
};
|
|
||||||
export {
|
|
||||||
version9 as version
|
|
||||||
};
|
|
||||||
|
|
|
@ -5,7 +5,7 @@ After=network.target network-online.target
|
||||||
[Service]
|
[Service]
|
||||||
Type=simple
|
Type=simple
|
||||||
Environment="NODE_ENV=production"
|
Environment="NODE_ENV=production"
|
||||||
ExecStart=<path-to-node> <your-project-folder>/node_modules/@vladmandic/build/src/build.js --profile development
|
ExecStart=<path-to-node> <your-project-folder>/node_modules/@vladmandic/build/src/build.js --profile serve
|
||||||
WorkingDirectory=<your-project-folder>
|
WorkingDirectory=<your-project-folder>
|
||||||
StandardOutput=inherit
|
StandardOutput=inherit
|
||||||
StandardError=inherit
|
StandardError=inherit
|
||||||
|
|
305
models/README.md
|
@ -3,308 +3,3 @@
|
||||||
For details see Wiki:
|
For details see Wiki:
|
||||||
|
|
||||||
- [**List of Models & Credits**](https://github.com/vladmandic/human/wiki/Models)
|
- [**List of Models & Credits**](https://github.com/vladmandic/human/wiki/Models)
|
||||||
|
|
||||||
## Model signatures:
|
|
||||||
|
|
||||||
```js
|
|
||||||
INFO: graph model: /home/vlado/dev/human/models/iris.json
|
|
||||||
INFO: created on: 2020-10-12T18:46:47.060Z
|
|
||||||
INFO: metadata: { generatedBy: 'https://github.com/google/mediapipe', convertedBy: 'https://github.com/vladmandic', version: undefined }
|
|
||||||
INFO: model inputs based on signature
|
|
||||||
{ name: 'input_1:0', dtype: 'DT_FLOAT', shape: [ -1, 64, 64, 3 ] }
|
|
||||||
INFO: model outputs based on signature
|
|
||||||
{ id: 0, name: 'Identity:0', dytpe: 'DT_FLOAT', shape: [ -1, 1, 1, 228 ] }
|
|
||||||
INFO: tensors: 191
|
|
||||||
DATA: weights: {
|
|
||||||
files: [ 'iris.bin' ],
|
|
||||||
size: { disk: 2599092, memory: 2599092 },
|
|
||||||
count: { total: 191, float32: 189, int32: 2 },
|
|
||||||
quantized: { none: 191 },
|
|
||||||
values: { total: 649773, float32: 649764, int32: 9 }
|
|
||||||
}
|
|
||||||
DATA: kernel ops: {
|
|
||||||
graph: [ 'Const', 'Placeholder', 'Identity' ],
|
|
||||||
convolution: [ '_FusedConv2D', 'DepthwiseConv2dNative', 'MaxPool' ],
|
|
||||||
arithmetic: [ 'AddV2' ],
|
|
||||||
basic_math: [ 'Prelu' ],
|
|
||||||
transformation: [ 'Pad' ],
|
|
||||||
slice_join: [ 'ConcatV2' ]
|
|
||||||
}
|
|
||||||
INFO: graph model: /home/vlado/dev/human/models/facemesh.json
|
|
||||||
INFO: created on: 2020-10-12T18:46:46.944Z
|
|
||||||
INFO: metadata: { generatedBy: 'https://github.com/google/mediapipe', convertedBy: 'https://github.com/vladmandic', version: undefined }
|
|
||||||
INFO: model inputs based on signature
|
|
||||||
{ name: 'input_1:0', dtype: 'DT_FLOAT', shape: [ 1, 192, 192, 3 ] }
|
|
||||||
INFO: model outputs based on signature
|
|
||||||
{ id: 0, name: 'Identity_1:0', dytpe: 'DT_FLOAT', shape: [ 1, 266 ] }
|
|
||||||
{ id: 1, name: 'Identity_2:0', dytpe: 'DT_FLOAT', shape: [ 1, 1 ] }
|
|
||||||
{ id: 2, name: 'Identity:0', dytpe: 'DT_FLOAT', shape: [ 1, 1404 ] }
|
|
||||||
INFO: tensors: 118
|
|
||||||
DATA: weights: {
|
|
||||||
files: [ 'facemesh.bin' ],
|
|
||||||
size: { disk: 2955780, memory: 2955780 },
|
|
||||||
count: { total: 118, float32: 114, int32: 4 },
|
|
||||||
quantized: { none: 118 },
|
|
||||||
values: { total: 738945, float32: 738919, int32: 26 }
|
|
||||||
}
|
|
||||||
DATA: kernel ops: {
|
|
||||||
graph: [ 'Placeholder', 'Const', 'NoOp', 'Identity' ],
|
|
||||||
convolution: [ '_FusedConv2D', 'DepthwiseConv2dNative', 'MaxPool' ],
|
|
||||||
arithmetic: [ 'AddV2' ],
|
|
||||||
basic_math: [ 'Prelu', 'Sigmoid' ],
|
|
||||||
transformation: [ 'Pad', 'Reshape' ]
|
|
||||||
}
|
|
||||||
INFO: graph model: /home/vlado/dev/human/models/emotion.json
|
|
||||||
INFO: created on: 2020-11-05T20:11:29.740Z
|
|
||||||
INFO: metadata: { generatedBy: 'https://github.com/oarriaga/face_classification', convertedBy: 'https://github.com/vladmandic', version: undefined }
|
|
||||||
INFO: model inputs based on signature
|
|
||||||
{ name: 'input_1:0', dtype: 'DT_FLOAT', shape: [ -1, 64, 64, 1 ] }
|
|
||||||
INFO: model outputs based on signature
|
|
||||||
{ id: 0, name: 'Identity:0', dytpe: 'DT_FLOAT', shape: [ -1, 7 ] }
|
|
||||||
INFO: tensors: 23
|
|
||||||
DATA: weights: {
|
|
||||||
files: [ 'emotion.bin' ],
|
|
||||||
size: { disk: 820516, memory: 820516 },
|
|
||||||
count: { total: 23, float32: 22, int32: 1 },
|
|
||||||
quantized: { none: 23 },
|
|
||||||
values: { total: 205129, float32: 205127, int32: 2 }
|
|
||||||
}
|
|
||||||
DATA: kernel ops: {
|
|
||||||
graph: [ 'Const', 'Placeholder', 'Identity' ],
|
|
||||||
convolution: [ '_FusedConv2D', 'DepthwiseConv2dNative', 'MaxPool' ],
|
|
||||||
arithmetic: [ 'AddV2' ],
|
|
||||||
basic_math: [ 'Relu' ],
|
|
||||||
reduction: [ 'Mean' ],
|
|
||||||
normalization: [ 'Softmax' ]
|
|
||||||
}
|
|
||||||
INFO: graph model: /home/vlado/dev/human/models/faceres.json
|
|
||||||
INFO: created on: 2021-03-21T14:12:59.863Z
|
|
||||||
INFO: metadata: { generatedBy: 'https://github.com/HSE-asavchenko/HSE_FaceRec_tf', convertedBy: 'https://github.com/vladmandic', version: undefined }
|
|
||||||
INFO: model inputs based on signature
|
|
||||||
{ name: 'input_1', dtype: 'DT_FLOAT', shape: [ -1, 224, 224, 3 ] }
|
|
||||||
INFO: model outputs based on signature
|
|
||||||
{ id: 0, name: 'gender_pred/Sigmoid:0', dytpe: 'DT_FLOAT', shape: [ 1, 1 ] }
|
|
||||||
{ id: 1, name: 'global_pooling/Mean', dytpe: 'DT_FLOAT', shape: [ 1, 1024 ] }
|
|
||||||
{ id: 2, name: 'age_pred/Softmax:0', dytpe: 'DT_FLOAT', shape: [ 1, 100 ] }
|
|
||||||
INFO: tensors: 128
|
|
||||||
DATA: weights: {
|
|
||||||
files: [ 'faceres.bin' ],
|
|
||||||
size: { disk: 6978814, memory: 13957620 },
|
|
||||||
count: { total: 128, float32: 127, int32: 1 },
|
|
||||||
quantized: { float16: 127, none: 1 },
|
|
||||||
values: { total: 3489405, float32: 3489403, int32: 2 }
|
|
||||||
}
|
|
||||||
DATA: kernel ops: {
|
|
||||||
graph: [ 'Const', 'Placeholder' ],
|
|
||||||
convolution: [ 'Conv2D', 'DepthwiseConv2dNative' ],
|
|
||||||
arithmetic: [ 'Add', 'Minimum', 'Maximum', 'Mul' ],
|
|
||||||
basic_math: [ 'Relu', 'Sigmoid' ],
|
|
||||||
reduction: [ 'Mean' ],
|
|
||||||
matrices: [ '_FusedMatMul' ],
|
|
||||||
normalization: [ 'Softmax' ]
|
|
||||||
}
|
|
||||||
INFO: graph model: /home/vlado/dev/human/models/blazeface.json
|
|
||||||
INFO: created on: 2020-10-15T19:57:26.419Z
|
|
||||||
INFO: metadata: { generatedBy: 'https://github.com/google/mediapipe', convertedBy: 'https://github.com/vladmandic', version: undefined }
|
|
||||||
INFO: model inputs based on signature
|
|
||||||
{ name: 'input:0', dtype: 'DT_FLOAT', shape: [ 1, 256, 256, 3 ] }
|
|
||||||
INFO: model outputs based on signature
|
|
||||||
{ id: 0, name: 'Identity_3:0', dytpe: 'DT_FLOAT', shape: [ 1, 384, 16 ] }
|
|
||||||
{ id: 1, name: 'Identity:0', dytpe: 'DT_FLOAT', shape: [ 1, 512, 1 ] }
|
|
||||||
{ id: 2, name: 'Identity_1:0', dytpe: 'DT_FLOAT', shape: [ 1, 384, 1 ] }
|
|
||||||
{ id: 3, name: 'Identity_2:0', dytpe: 'DT_FLOAT', shape: [ 1, 512, 16 ] }
|
|
||||||
INFO: tensors: 112
|
|
||||||
DATA: weights: {
|
|
||||||
files: [ 'blazeface.bin' ],
|
|
||||||
size: { disk: 538928, memory: 538928 },
|
|
||||||
count: { total: 112, float32: 106, int32: 6 },
|
|
||||||
quantized: { none: 112 },
|
|
||||||
values: { total: 134732, float32: 134704, int32: 28 }
|
|
||||||
}
|
|
||||||
DATA: kernel ops: {
|
|
||||||
graph: [ 'Const', 'Placeholder', 'Identity' ],
|
|
||||||
convolution: [ '_FusedConv2D', 'DepthwiseConv2dNative', 'MaxPool' ],
|
|
||||||
arithmetic: [ 'AddV2' ],
|
|
||||||
basic_math: [ 'Relu' ],
|
|
||||||
transformation: [ 'Pad', 'Reshape' ]
|
|
||||||
}
|
|
||||||
INFO: graph model: /home/vlado/dev/human/models/mb3-centernet.json
|
|
||||||
INFO: created on: 2021-05-19T11:50:13.013Z
|
|
||||||
INFO: metadata: { generatedBy: 'https://github.com/610265158/mobilenetv3_centernet', convertedBy: 'https://github.com/vladmandic', version: undefined }
|
|
||||||
INFO: model inputs based on signature
|
|
||||||
{ name: 'tower_0/images', dtype: 'DT_FLOAT', shape: [ 1, 512, 512, 3 ] }
|
|
||||||
INFO: model outputs based on signature
|
|
||||||
{ id: 0, name: 'tower_0/wh', dytpe: 'DT_FLOAT', shape: [ 1, 128, 128, 4 ] }
|
|
||||||
{ id: 1, name: 'tower_0/keypoints', dytpe: 'DT_FLOAT', shape: [ 1, 128, 128, 80 ] }
|
|
||||||
{ id: 2, name: 'tower_0/detections', dytpe: 'DT_FLOAT', shape: [ 1, 100, 6 ] }
|
|
||||||
INFO: tensors: 267
|
|
||||||
DATA: weights: {
|
|
||||||
files: [ 'mb3-centernet.bin' ],
|
|
||||||
size: { disk: 4030290, memory: 8060260 },
|
|
||||||
count: { total: 267, float32: 227, int32: 40 },
|
|
||||||
quantized: { float16: 227, none: 40 },
|
|
||||||
values: { total: 2015065, float32: 2014985, int32: 80 }
|
|
||||||
}
|
|
||||||
DATA: kernel ops: {
|
|
||||||
graph: [ 'Const', 'Placeholder', 'Identity' ],
|
|
||||||
convolution: [ '_FusedConv2D', 'FusedDepthwiseConv2dNative', 'DepthwiseConv2dNative', 'Conv2D', 'MaxPool' ],
|
|
||||||
arithmetic: [ 'Mul', 'Add', 'FloorDiv', 'FloorMod', 'Sub' ],
|
|
||||||
basic_math: [ 'Relu6', 'Relu', 'Sigmoid' ],
|
|
||||||
reduction: [ 'Mean' ],
|
|
||||||
image: [ 'ResizeBilinear' ],
|
|
||||||
slice_join: [ 'ConcatV2', 'GatherV2', 'StridedSlice' ],
|
|
||||||
transformation: [ 'Reshape', 'Cast', 'ExpandDims' ],
|
|
||||||
logical: [ 'Equal' ],
|
|
||||||
evaluation: [ 'TopKV2' ]
|
|
||||||
}
|
|
||||||
INFO: graph model: /home/vlado/dev/human/models/movenet-lightning.json
|
|
||||||
INFO: created on: 2021-05-29T12:26:32.994Z
|
|
||||||
INFO: metadata: { generatedBy: 'https://tfhub.dev/google/movenet/singlepose/lightning/4', convertedBy: 'https://github.com/vladmandic', version: undefined }
|
|
||||||
INFO: model inputs based on signature
|
|
||||||
{ name: 'input:0', dtype: 'DT_INT32', shape: [ 1, 192, 192, 3 ] }
|
|
||||||
INFO: model outputs based on signature
|
|
||||||
{ id: 0, name: 'Identity:0', dytpe: 'DT_FLOAT', shape: [ 1, 1, 17, 3 ] }
|
|
||||||
INFO: tensors: 180
|
|
||||||
DATA: weights: {
|
|
||||||
files: [ 'movenet-lightning.bin' ],
|
|
||||||
size: { disk: 4650216, memory: 9300008 },
|
|
||||||
count: { total: 180, int32: 31, float32: 149 },
|
|
||||||
quantized: { none: 31, float16: 149 },
|
|
||||||
values: { total: 2325002, int32: 106, float32: 2324896 }
|
|
||||||
}
|
|
||||||
DATA: kernel ops: {
|
|
||||||
graph: [ 'Const', 'Placeholder', 'Identity' ],
|
|
||||||
transformation: [ 'Cast', 'ExpandDims', 'Squeeze', 'Reshape' ],
|
|
||||||
slice_join: [ 'Unpack', 'Pack', 'GatherNd', 'ConcatV2' ],
|
|
||||||
arithmetic: [ 'Sub', 'Mul', 'AddV2', 'FloorDiv', 'SquaredDifference', 'RealDiv' ],
|
|
||||||
convolution: [ '_FusedConv2D', 'FusedDepthwiseConv2dNative', 'DepthwiseConv2dNative' ],
|
|
||||||
image: [ 'ResizeBilinear' ],
|
|
||||||
basic_math: [ 'Sigmoid', 'Sqrt' ],
|
|
||||||
reduction: [ 'ArgMax' ]
|
|
||||||
}
|
|
||||||
INFO: graph model: /home/vlado/dev/human/models/selfie.json
|
|
||||||
INFO: created on: 2021-06-04T13:46:56.904Z
|
|
||||||
INFO: metadata: { generatedBy: 'https://github.com/PINTO0309/PINTO_model_zoo/tree/main/109_Selfie_Segmentation', convertedBy: 'https://github.com/vladmandic', version: '561.undefined' }
|
|
||||||
INFO: model inputs based on signature
|
|
||||||
{ name: 'input_1:0', dtype: 'DT_FLOAT', shape: [ 1, 256, 256, 3 ] }
|
|
||||||
INFO: model outputs based on signature
|
|
||||||
{ id: 0, name: 'activation_10:0', dytpe: 'DT_FLOAT', shape: [ 1, 256, 256, 1 ] }
|
|
||||||
INFO: tensors: 136
|
|
||||||
DATA: weights: {
|
|
||||||
files: [ 'selfie.bin' ],
|
|
||||||
size: { disk: 212886, memory: 425732 },
|
|
||||||
count: { total: 136, int32: 4, float32: 132 },
|
|
||||||
quantized: { none: 4, float16: 132 },
|
|
||||||
values: { total: 106433, int32: 10, float32: 106423 }
|
|
||||||
}
|
|
||||||
DATA: kernel ops: {
|
|
||||||
graph: [ 'Const', 'Placeholder' ],
|
|
||||||
convolution: [ 'Conv2D', 'DepthwiseConv2dNative', 'AvgPool', 'Conv2DBackpropInput' ],
|
|
||||||
arithmetic: [ 'Add', 'Mul', 'AddV2', 'AddN' ],
|
|
||||||
basic_math: [ 'Relu6', 'Relu', 'Sigmoid' ],
|
|
||||||
image: [ 'ResizeBilinear' ]
|
|
||||||
}
|
|
||||||
INFO: graph model: /home/vlado/dev/human/models/handtrack.json
|
|
||||||
INFO: created on: 2021-09-21T12:09:47.583Z
|
|
||||||
INFO: metadata: { generatedBy: 'https://github.com/victordibia/handtracking', convertedBy: 'https://github.com/vladmandic', version: '561.undefined' }
|
|
||||||
INFO: model inputs based on signature
|
|
||||||
{ name: 'input_tensor:0', dtype: 'DT_UINT8', shape: [ 1, 320, 320, 3 ] }
|
|
||||||
INFO: model outputs based on signature
|
|
||||||
{ id: 0, name: 'Identity_2:0', dytpe: 'DT_FLOAT', shape: [ 1, 100 ] }
|
|
||||||
{ id: 1, name: 'Identity_4:0', dytpe: 'DT_FLOAT', shape: [ 1, 100 ] }
|
|
||||||
{ id: 2, name: 'Identity_6:0', dytpe: 'DT_FLOAT', shape: [ 1, 12804, 4 ] }
|
|
||||||
{ id: 3, name: 'Identity_1:0', dytpe: 'DT_FLOAT', shape: [ 1, 100, 4 ] }
|
|
||||||
{ id: 4, name: 'Identity_3:0', dytpe: 'DT_FLOAT', shape: [ 1, 100, 8 ] }
|
|
||||||
{ id: 5, name: 'Identity_5:0', dytpe: 'DT_FLOAT', shape: [ 1 ] }
|
|
||||||
{ id: 6, name: 'Identity:0', dytpe: 'DT_FLOAT', shape: [ 1, 100 ] }
|
|
||||||
{ id: 7, name: 'Identity_7:0', dytpe: 'DT_FLOAT', shape: [ 1, 12804, 8 ] }
|
|
||||||
INFO: tensors: 619
|
|
||||||
DATA: weights: {
|
|
||||||
files: [ 'handtrack.bin' ],
|
|
||||||
size: { disk: 2964837, memory: 11846016 },
|
|
||||||
count: { total: 619, int32: 347, float32: 272 },
|
|
||||||
quantized: { none: 347, uint8: 272 },
|
|
||||||
values: { total: 2961504, int32: 1111, float32: 2960393 }
|
|
||||||
}
|
|
||||||
DATA: kernel ops: {
|
|
||||||
graph: [ 'Const', 'Placeholder', 'Identity', 'Shape', 'NoOp' ],
|
|
||||||
control: [ 'TensorListReserve', 'Enter', 'TensorListFromTensor', 'Merge', 'LoopCond', 'Switch', 'Exit', 'TensorListStack', 'NextIteration', 'TensorListSetItem', 'TensorListGetItem' ],
|
|
||||||
logical: [ 'Less', 'LogicalAnd', 'Select', 'Greater', 'GreaterEqual' ],
|
|
||||||
convolution: [ '_FusedConv2D', 'FusedDepthwiseConv2dNative', 'DepthwiseConv2dNative' ],
|
|
||||||
arithmetic: [ 'AddV2', 'Mul', 'Sub', 'Minimum', 'Maximum' ],
|
|
||||||
transformation: [ 'Cast', 'ExpandDims', 'Squeeze', 'Reshape', 'Pad' ],
|
|
||||||
slice_join: [ 'Unpack', 'StridedSlice', 'Pack', 'ConcatV2', 'Slice', 'GatherV2', 'Split' ],
|
|
||||||
image: [ 'ResizeBilinear' ],
|
|
||||||
basic_math: [ 'Reciprocal', 'Sigmoid', 'Exp' ],
|
|
||||||
matrices: [ 'Transpose' ],
|
|
||||||
dynamic: [ 'NonMaxSuppressionV5', 'Where' ],
|
|
||||||
creation: [ 'Fill', 'Range' ],
|
|
||||||
evaluation: [ 'TopKV2' ],
|
|
||||||
reduction: [ 'Sum' ]
|
|
||||||
}
|
|
||||||
INFO: graph model: /home/vlado/dev/human/models/antispoof.json
|
|
||||||
INFO: created on: 2021-10-13T14:20:27.100Z
|
|
||||||
INFO: metadata: { generatedBy: 'https://www.kaggle.com/anku420/fake-face-detection', convertedBy: 'https://github.com/vladmandic', version: '716.undefined' }
|
|
||||||
INFO: model inputs based on signature
|
|
||||||
{ name: 'conv2d_input', dtype: 'DT_FLOAT', shape: [ -1, 128, 128, 3 ] }
|
|
||||||
INFO: model outputs based on signature
|
|
||||||
{ id: 0, name: 'activation_4', dytpe: 'DT_FLOAT', shape: [ -1, 1 ] }
|
|
||||||
INFO: tensors: 11
|
|
||||||
DATA: weights: {
|
|
||||||
files: [ 'antispoof.bin' ],
|
|
||||||
size: { disk: 853098, memory: 1706188 },
|
|
||||||
count: { total: 11, float32: 10, int32: 1 },
|
|
||||||
quantized: { float16: 10, none: 1 },
|
|
||||||
values: { total: 426547, float32: 426545, int32: 2 }
|
|
||||||
}
|
|
||||||
DATA: kernel ops: { graph: [ 'Const', 'Placeholder', 'Identity' ], convolution: [ '_FusedConv2D', 'MaxPool' ], basic_math: [ 'Relu', 'Sigmoid' ], transformation: [ 'Reshape' ], matrices: [ '_FusedMatMul' ] }
|
|
||||||
INFO: graph model: /home/vlado/dev/human/models/handlandmark-full.json
|
|
||||||
INFO: created on: 2021-10-31T12:27:49.343Z
|
|
||||||
INFO: metadata: { generatedBy: 'https://github.com/google/mediapipe', convertedBy: 'https://github.com/vladmandic', version: '808.undefined' }
|
|
||||||
INFO: model inputs based on signature
|
|
||||||
{ name: 'input_1', dtype: 'DT_FLOAT', shape: [ 1, 224, 224, 3 ] }
|
|
||||||
INFO: model outputs based on signature
|
|
||||||
{ id: 0, name: 'Identity_3:0', dytpe: 'DT_FLOAT', shape: [ 1, 63 ] }
|
|
||||||
{ id: 1, name: 'Identity:0', dytpe: 'DT_FLOAT', shape: [ 1, 63 ] }
|
|
||||||
{ id: 2, name: 'Identity_1:0', dytpe: 'DT_FLOAT', shape: [ 1, 1 ] }
|
|
||||||
{ id: 3, name: 'Identity_2:0', dytpe: 'DT_FLOAT', shape: [ 1, 1 ] }
|
|
||||||
INFO: tensors: 103
|
|
||||||
DATA: weights: {
|
|
||||||
files: [ 'handlandmark-full.bin' ],
|
|
||||||
size: { disk: 5431368, memory: 10862728 },
|
|
||||||
count: { total: 103, float32: 102, int32: 1 },
|
|
||||||
quantized: { float16: 102, none: 1 },
|
|
||||||
values: { total: 2715682, float32: 2715680, int32: 2 }
|
|
||||||
}
|
|
||||||
DATA: kernel ops: {
|
|
||||||
graph: [ 'Const', 'Placeholder', 'Identity' ],
|
|
||||||
convolution: [ 'Conv2D', 'DepthwiseConv2dNative' ],
|
|
||||||
arithmetic: [ 'AddV2', 'AddN' ],
|
|
||||||
basic_math: [ 'Relu6', 'Sigmoid' ],
|
|
||||||
reduction: [ 'Mean' ],
|
|
||||||
matrices: [ '_FusedMatMul' ]
|
|
||||||
}
|
|
||||||
INFO: graph model: /home/vlado/dev/human/models/liveness.json
|
|
||||||
INFO: created on: 2021-11-09T12:39:11.760Z
|
|
||||||
INFO: metadata: { generatedBy: 'https://github.com/leokwu/livenessnet', convertedBy: 'https://github.com/vladmandic', version: '808.undefined' }
|
|
||||||
INFO: model inputs based on signature
|
|
||||||
{ name: 'conv2d_1_input', dtype: 'DT_FLOAT', shape: [ -1, 32, 32, 3 ] }
|
|
||||||
INFO: model outputs based on signature
|
|
||||||
{ id: 0, name: 'activation_6', dytpe: 'DT_FLOAT', shape: [ -1, 2 ] }
|
|
||||||
INFO: tensors: 23
|
|
||||||
DATA: weights: {
|
|
||||||
files: [ 'liveness.bin' ],
|
|
||||||
size: { disk: 592976, memory: 592976 },
|
|
||||||
count: { total: 23, float32: 22, int32: 1 },
|
|
||||||
quantized: { none: 23 },
|
|
||||||
values: { total: 148244, float32: 148242, int32: 2 }
|
|
||||||
}
|
|
||||||
DATA: kernel ops: {
|
|
||||||
graph: [ 'Const', 'Placeholder', 'Identity' ],
|
|
||||||
convolution: [ '_FusedConv2D', 'MaxPool' ],
|
|
||||||
arithmetic: [ 'Mul', 'Add', 'AddV2' ],
|
|
||||||
transformation: [ 'Reshape' ],
|
|
||||||
matrices: [ '_FusedMatMul' ],
|
|
||||||
normalization: [ 'Softmax' ]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
|
@ -0,0 +1,60 @@
|
||||||
|
{
|
||||||
|
"antispoof": 853098,
|
||||||
|
"blazeface": 538928,
|
||||||
|
"centernet": 4030290,
|
||||||
|
"emotion": 820516,
|
||||||
|
"facemesh": 1477958,
|
||||||
|
"faceres": 6978814,
|
||||||
|
"handlandmark-lite": 2023432,
|
||||||
|
"handtrack": 2964837,
|
||||||
|
"iris": 2599092,
|
||||||
|
"liveness": 592976,
|
||||||
|
"models": 0,
|
||||||
|
"movenet-lightning": 4650216,
|
||||||
|
"affectnet-mobilenet": 6920630,
|
||||||
|
"age": 161240,
|
||||||
|
"blazeface-back": 538928,
|
||||||
|
"blazeface-front": 402048,
|
||||||
|
"blazepose-detector": 5928856,
|
||||||
|
"blazepose-full": 6339202,
|
||||||
|
"blazepose-heavy": 27502466,
|
||||||
|
"blazepose-lite": 2726402,
|
||||||
|
"efficientpose": 5651240,
|
||||||
|
"faceboxes": 2013002,
|
||||||
|
"facemesh-attention-pinto": 2387598,
|
||||||
|
"facemesh-attention": 2382414,
|
||||||
|
"facemesh-detection-full": 1026192,
|
||||||
|
"facemesh-detection-short": 201268,
|
||||||
|
"faceres-deep": 13957620,
|
||||||
|
"gear-e1": 112438,
|
||||||
|
"gear-e2": 112438,
|
||||||
|
"gear": 1498916,
|
||||||
|
"gender-ssrnet-imdb": 161236,
|
||||||
|
"gender": 201808,
|
||||||
|
"handdetect": 3515612,
|
||||||
|
"handlandmark-full": 5431368,
|
||||||
|
"handlandmark-sparse": 5286322,
|
||||||
|
"handskeleton": 5502280,
|
||||||
|
"meet": 372228,
|
||||||
|
"mobileface": 2183192,
|
||||||
|
"mobilefacenet": 5171976,
|
||||||
|
"movenet-multipose": 9448838,
|
||||||
|
"movenet-thunder": 12477112,
|
||||||
|
"nanodet": 7574558,
|
||||||
|
"posenet": 5032780,
|
||||||
|
"rvm": 3739355,
|
||||||
|
"selfie": 212886,
|
||||||
|
"anti-spoofing": 853098,
|
||||||
|
"efficientpose-i-lite": 2269064,
|
||||||
|
"efficientpose-ii-lite": 5651240,
|
||||||
|
"efficientpose-iv": 25643252,
|
||||||
|
"insightface-efficientnet-b0": 13013224,
|
||||||
|
"insightface-ghostnet-strides1": 8093408,
|
||||||
|
"insightface-ghostnet-strides2": 8049584,
|
||||||
|
"insightface-mobilenet-emore": 6938536,
|
||||||
|
"insightface-mobilenet-swish": 12168584,
|
||||||
|
"nanodet-e": 12319156,
|
||||||
|
"nanodet-g": 7574558,
|
||||||
|
"nanodet-m": 1887474,
|
||||||
|
"nanodet-t": 5294216
|
||||||
|
}
|