Compare commits
495 Commits
Author | SHA1 | Date |
---|---|---|
![]() |
d39ce5e8a4 | |
![]() |
928ee524f3 | |
![]() |
cd0d39a8e3 | |
![]() |
be328a3707 | |
![]() |
c9120d5c67 | |
![]() |
ad7463c7d3 | |
![]() |
4da18dda84 | |
![]() |
668fdec9a6 | |
![]() |
9cc61ae65f | |
![]() |
94b7253642 | |
![]() |
3c324033ab | |
![]() |
b1cb0684e4 | |
![]() |
e4b12ddff0 | |
![]() |
0c8567bb1f | |
![]() |
e3e2d3267c | |
![]() |
55846ab4e1 | |
![]() |
94207aad4f | |
![]() |
da3ad3033f | |
![]() |
1d00e154b0 | |
![]() |
22755355f7 | |
![]() |
79775267bc | |
![]() |
497ca46006 | |
![]() |
04766d9693 | |
![]() |
32556d5954 | |
![]() |
5b4a693429 | |
![]() |
369026c2b1 | |
![]() |
bcb26f3a2e | |
![]() |
202c808f5b | |
![]() |
3db85dd8cc | |
![]() |
7f688743fd | |
![]() |
a8c613c903 | |
![]() |
cc1b1ae5e6 | |
![]() |
58d4390735 | |
![]() |
8ec586e2b8 | |
![]() |
9a80bbc39d | |
![]() |
a83f07a972 | |
![]() |
11bc5c63b4 | |
![]() |
60b0669206 | |
![]() |
6c4c3dacca | |
![]() |
24a03b04ab | |
![]() |
65cda56e3e | |
![]() |
bfe63d6955 | |
![]() |
fd9f8b4e95 | |
![]() |
c425cfe6e0 | |
![]() |
7be582acae | |
![]() |
7b8b843771 | |
![]() |
8a62a3a87d | |
![]() |
4d0b9fff98 | |
![]() |
0ea4edf39c | |
![]() |
4ce7fa22a0 | |
![]() |
789f6770e7 | |
![]() |
3f54aa8c3e | |
![]() |
e51a58211d | |
![]() |
5f03ae3053 | |
![]() |
f543c82ec9 | |
![]() |
c625892e18 | |
![]() |
0abc7ce124 | |
![]() |
b341aadd0f | |
![]() |
8480468869 | |
![]() |
c08395433e | |
![]() |
ccfd5ef49e | |
![]() |
bd110857f8 | |
![]() |
5c375117d7 | |
![]() |
ad4b23aa34 | |
![]() |
50d117c22e | |
![]() |
88f3b6eeda | |
![]() |
bc009e165e | |
![]() |
b201435660 | |
![]() |
22bcd417fd | |
![]() |
c39e13e60a | |
![]() |
71729677be | |
![]() |
f23aab9866 | |
![]() |
53f5edbe26 | |
![]() |
2b0b7bbb7b | |
![]() |
694f08708a | |
![]() |
2a60b0787f | |
![]() |
35c2d2452b | |
![]() |
465176e2dd | |
![]() |
cc18f16b2e | |
![]() |
2596b82580 | |
![]() |
4ce4a92b6f | |
![]() |
6b21638372 | |
![]() |
8ab1256e11 | |
![]() |
5275475c26 | |
![]() |
eae60a2693 | |
![]() |
08cf2be7cf | |
![]() |
0a8f194e02 | |
![]() |
ea4a99a1a1 | |
![]() |
95f278b72e | |
![]() |
e239386fd8 | |
![]() |
365ce35f2d | |
![]() |
90b38664ec | |
![]() |
e8156f4adb | |
![]() |
b155cae9bb | |
![]() |
55aa98817c | |
![]() |
e7871bc79c | |
![]() |
daa89efd39 | |
![]() |
d347babe11 | |
![]() |
346e722839 | |
![]() |
0cb473dc0d | |
![]() |
a3d890119c | |
![]() |
7835126870 | |
![]() |
0c0fff6a99 | |
![]() |
13a78e477b | |
![]() |
1125a79a8c | |
![]() |
4b3edc5f78 | |
![]() |
0086a9169d | |
![]() |
738ee9a7e5 | |
![]() |
f350f01fb0 | |
![]() |
f7be4a4c1f | |
![]() |
9da67575fe | |
![]() |
9452cf097c | |
![]() |
3bc8a5a1b2 | |
![]() |
51246d43f8 | |
![]() |
7ce0aaf8b4 | |
![]() |
3df1c8c8b8 | |
![]() |
c83b8d9485 | |
![]() |
c20a8102c4 | |
![]() |
d1ae5dc761 | |
![]() |
b586c0d998 | |
![]() |
bfcc6b63e8 | |
![]() |
d96d275061 | |
![]() |
1c2032fd89 | |
![]() |
4471881930 | |
![]() |
086f30c3f5 | |
![]() |
1e666d7a27 | |
![]() |
ea03e67601 | |
![]() |
ca1b609eb8 | |
![]() |
beafe9bfef | |
![]() |
60e13020ea | |
![]() |
9a6ad885f4 | |
![]() |
5a3bbd0d4a | |
![]() |
ac1522386b | |
![]() |
3a3e2eb067 | |
![]() |
1059c9c3de | |
![]() |
77f7d6cce2 | |
![]() |
a8aec4e610 | |
![]() |
4fd18fa5fa | |
![]() |
4c8a12512a | |
![]() |
d2ce0331e8 | |
![]() |
3788c82ffc | |
![]() |
daffddd057 | |
![]() |
bbe68c3c55 | |
![]() |
48a0c36d9e | |
![]() |
ff3d9df22e | |
![]() |
56f6c54ee5 | |
![]() |
7aff9ad28d | |
![]() |
31de3e2d86 | |
![]() |
1afe7f7777 | |
![]() |
abac67389b | |
![]() |
03b2a9f4ea | |
![]() |
48bb49dd8f | |
![]() |
ab1f44ff30 | |
![]() |
5a7553aa14 | |
![]() |
b7d1dca089 | |
![]() |
9d798cb073 | |
![]() |
268f2602d1 | |
![]() |
5cf64c1803 | |
![]() |
690a94f5ea | |
![]() |
ec41b72710 | |
![]() |
40429cedda | |
![]() |
39c3f4ea1d | |
![]() |
c53be42d11 | |
![]() |
1162f6e06b | |
![]() |
3132f298c3 | |
![]() |
96e5399991 | |
![]() |
5da11225b6 | |
![]() |
6e60ec8d22 | |
![]() |
1a53bb14de | |
![]() |
15fb4981c9 | |
![]() |
a3359460e2 | |
![]() |
fb81d557e4 | |
![]() |
8cc5c938f4 | |
![]() |
0420a5d144 | |
![]() |
6d580a0b4f | |
![]() |
4f641380ce | |
![]() |
39fa2396a1 | |
![]() |
4b526ad53c | |
![]() |
76830567f7 | |
![]() |
61a7de3c0f | |
![]() |
9d026ea950 | |
![]() |
8cc810bb69 | |
![]() |
6b69f38d55 | |
![]() |
a019176a15 | |
![]() |
d6acf90013 | |
![]() |
485c10ab0a | |
![]() |
34c52df4d6 | |
![]() |
f8a51deae4 | |
![]() |
cbc1fddbf7 | |
![]() |
13a71485cd | |
![]() |
a0654dc231 | |
![]() |
419dd219a6 | |
![]() |
7343d2e8ec | |
![]() |
391812d251 | |
![]() |
e0d66fffd9 | |
![]() |
588c9c08f0 | |
![]() |
2b1f5a2b3f | |
![]() |
d937953f4d | |
![]() |
3200460055 | |
![]() |
018e61f2bb | |
![]() |
6c75c26a51 | |
![]() |
065349c085 | |
![]() |
52769ef2e9 | |
![]() |
91440273f2 | |
![]() |
d30a571a9c | |
![]() |
8634ec7fd9 | |
![]() |
cff58d6384 | |
![]() |
e45e7ebd55 | |
![]() |
eeadb396fa | |
![]() |
8ba083651f | |
![]() |
a109f3b051 | |
![]() |
bcb6e34a1e | |
![]() |
2c2688023e | |
![]() |
792930fc91 | |
![]() |
5bf74a53ad | |
![]() |
74cf335523 | |
![]() |
ff48422c13 | |
![]() |
8892886734 | |
![]() |
4357fbc0ba | |
![]() |
e2866b2bd6 | |
![]() |
3b449dbfe3 | |
![]() |
38b581373e | |
![]() |
f0f8a8ddff | |
![]() |
3d4c12fec3 | |
![]() |
1f03270e76 | |
![]() |
90ce714446 | |
![]() |
c38773bf26 | |
![]() |
267c87536a | |
![]() |
3d81a19a66 | |
![]() |
b517dc7f1b | |
![]() |
1534b58235 | |
![]() |
129365e52f | |
![]() |
b2b07e4b36 | |
![]() |
fa0a93e9b2 | |
![]() |
7e1b2840a2 | |
![]() |
dc5e46adde | |
![]() |
01316a4c2d | |
![]() |
7f87d2633a | |
![]() |
7a05cf3743 | |
![]() |
a741ad95cb | |
![]() |
3b9fc2e8a7 | |
![]() |
eaeb02592e | |
![]() |
ff156bc413 | |
![]() |
8bd987a7b3 | |
![]() |
6291d779ef | |
![]() |
131cc2609a | |
![]() |
209506611a | |
![]() |
2463b16e85 | |
![]() |
d046513c92 | |
![]() |
84df7f885f | |
![]() |
515fbf76e7 | |
![]() |
d47557cfdd | |
![]() |
203fcfa904 | |
![]() |
ff894a1ee7 | |
![]() |
ab453f69df | |
![]() |
df53d373e1 | |
![]() |
2930255757 | |
![]() |
c430d0d99d | |
![]() |
67afa5952f | |
![]() |
ce37a0f716 | |
![]() |
90ec92bbe2 | |
![]() |
1b7ee44659 | |
![]() |
a7b06eafb2 | |
![]() |
fd7f7c2195 | |
![]() |
d4322fc0f9 | |
![]() |
b2d5b8322d | |
![]() |
02afd6c54f | |
![]() |
12644a3e06 | |
![]() |
8a50618e9a | |
![]() |
ab3cda4f51 | |
![]() |
a76ebdaf00 | |
![]() |
429df55ac5 | |
![]() |
ab69d5414a | |
![]() |
8b2225d737 | |
![]() |
03fd6378c4 | |
![]() |
8579766d5f | |
![]() |
23b937e5e2 | |
![]() |
44a5c30e0d | |
![]() |
c1af3888f9 | |
![]() |
eed3d67928 | |
![]() |
61c8ab9b2c | |
![]() |
49cbbb387e | |
![]() |
b6f7d683e1 | |
![]() |
0643fb50df | |
![]() |
ae0e7533eb | |
![]() |
3f8db964ba | |
![]() |
8864b5f7c1 | |
![]() |
ab93b7ffb2 | |
![]() |
a250db9042 | |
![]() |
b5f307f49b | |
![]() |
6f0c0e77b8 | |
![]() |
16f993c266 | |
![]() |
e3f477a50d | |
![]() |
79822d3e01 | |
![]() |
a8f37111ad | |
![]() |
a2b52b3f52 | |
![]() |
43d3c6ce80 | |
![]() |
7d636c8522 | |
![]() |
182136fbfb | |
![]() |
70181b53e9 | |
![]() |
cb9af0a48c | |
![]() |
0d681e4908 | |
![]() |
e0c43098dd | |
![]() |
2695b215df | |
![]() |
ddf5d5f0f2 | |
![]() |
d7cdda2e1b | |
![]() |
f59250bde6 | |
![]() |
6ced256a42 | |
![]() |
ab53f1cde7 | |
![]() |
bee6f3b651 | |
![]() |
06ab47f941 | |
![]() |
a261b7bd99 | |
![]() |
ca56407d49 | |
![]() |
a50c3e2103 | |
![]() |
b0b0702208 | |
![]() |
ee0b4af220 | |
![]() |
1e8a932a2b | |
![]() |
4febbc1d3e | |
![]() |
35583d3a04 | |
![]() |
9ace39fdab | |
![]() |
155c7c2a00 | |
![]() |
71b08aa8f8 | |
![]() |
c94e5f86c2 | |
![]() |
6569b62f70 | |
![]() |
b39f0d55bb | |
![]() |
36150c0730 | |
![]() |
9d4955cb8a | |
![]() |
6796a9a1ba | |
![]() |
edc719cc9e | |
![]() |
0374ecdcc3 | |
![]() |
fd0df97d94 | |
![]() |
b492ce5c40 | |
![]() |
c03b7b3f4c | |
![]() |
259036e8ca | |
![]() |
f423bfe9e3 | |
![]() |
6c8faf7de2 | |
![]() |
eef2b43852 | |
![]() |
7b284f63f6 | |
![]() |
4d2ca7dd2d | |
![]() |
cc864891bf | |
![]() |
f4d73e46b9 | |
![]() |
b36cd175b0 | |
![]() |
fbd04f8555 | |
![]() |
f42f64427a | |
![]() |
589511f1f3 | |
![]() |
2c05cfea24 | |
![]() |
14164dc603 | |
![]() |
43074a660c | |
![]() |
e6f004dbf9 | |
![]() |
76cdfa88a6 | |
![]() |
90a21de40e | |
![]() |
2be4c00d3a | |
![]() |
6f6577c3b0 | |
![]() |
eaaa71df07 | |
![]() |
de89111e70 | |
![]() |
9cd8a433fe | |
![]() |
21d4af9235 | |
![]() |
e5821563a1 | |
![]() |
49c547d698 | |
![]() |
6764b0f5be | |
![]() |
6b6a991cf2 | |
![]() |
5d871f1dd9 | |
![]() |
58fc62e646 | |
![]() |
f460eca131 | |
![]() |
a559d05378 | |
![]() |
cf7ba1ef30 | |
![]() |
cf64b3259e | |
![]() |
4d3a112985 | |
![]() |
0b0e63f183 | |
![]() |
f40eb5d380 | |
![]() |
d0d0aba8fa | |
![]() |
00d7efa724 | |
![]() |
4fd37dfd56 | |
![]() |
af5ab60495 | |
![]() |
6e06695780 | |
![]() |
af28fff323 | |
![]() |
f877c2515b | |
![]() |
0fd7683a56 | |
![]() |
cafb7732ab | |
![]() |
7dad79933e | |
![]() |
c6ec8dec76 | |
![]() |
958f898d4b | |
![]() |
5c529d5889 | |
![]() |
cd8861b6bb | |
![]() |
b1b9cc2954 | |
![]() |
0868aeb350 | |
![]() |
173fa35b7c | |
![]() |
db1502829a | |
![]() |
c3b95f452e | |
![]() |
a3dea5a01f | |
![]() |
1dccff181b | |
![]() |
45d4095bff | |
![]() |
80299c8600 | |
![]() |
e3470dc2f1 | |
![]() |
190c3a60b4 | |
![]() |
80526ee02a | |
![]() |
3e26e91340 | |
![]() |
46f2426621 | |
![]() |
1de80c9e36 | |
![]() |
b506f67e91 | |
![]() |
b408c47847 | |
![]() |
20f9ab4519 | |
![]() |
70510e9a2f | |
![]() |
07412090d8 | |
![]() |
65c4d2581f | |
![]() |
a558dd8870 | |
![]() |
66615cac76 | |
![]() |
1491561ad2 | |
![]() |
1d7227b02a | |
![]() |
f0ed4d2cd5 | |
![]() |
e205509a39 | |
![]() |
c3ac335771 | |
![]() |
53f960f821 | |
![]() |
7944953ab7 | |
![]() |
6d91ea3f53 | |
![]() |
8d3aacea46 | |
![]() |
b08afe09c0 | |
![]() |
f2abd3c069 | |
![]() |
496d09aab7 | |
![]() |
69330e6335 | |
![]() |
c7cd2f8a59 | |
![]() |
af3699c769 | |
![]() |
5b81690dc9 | |
![]() |
7a76f9c065 | |
![]() |
af31b5b9ef | |
![]() |
6d728b4e5c | |
![]() |
22c849593e | |
![]() |
60b7143da4 | |
![]() |
3e73fd8742 | |
![]() |
ac7d1c3e12 | |
![]() |
4be7fc9294 | |
![]() |
5d42b85084 | |
![]() |
13d82a176a | |
![]() |
f6e91fb47d | |
![]() |
f85356843d | |
![]() |
47e2f78324 | |
![]() |
b4a9934f92 | |
![]() |
835fd8d184 | |
![]() |
e4cdd3ffca | |
![]() |
852f22d786 | |
![]() |
c100d03405 | |
![]() |
7b19e5d246 | |
![]() |
2cc6f380ba | |
![]() |
9d7b7dcdce | |
![]() |
32bb8eba58 | |
![]() |
375c6f65fc | |
![]() |
92f225d3df | |
![]() |
47c5c6c822 | |
![]() |
c4df466a9a | |
![]() |
616aad6add | |
![]() |
00f5bd6fde | |
![]() |
dc2b68532f | |
![]() |
23276d522c | |
![]() |
f218f96749 | |
![]() |
0b62a4f9a3 | |
![]() |
d29a47e5a2 | |
![]() |
25c45b20c6 | |
![]() |
52f1fccb28 | |
![]() |
168ad14fda | |
![]() |
168fd473c6 | |
![]() |
fa33b660af | |
![]() |
60f6f75d35 | |
![]() |
55d2848336 | |
![]() |
5f08806e8f | |
![]() |
e1ba7ef942 | |
![]() |
dafaca5b3d | |
![]() |
30df35ec7c | |
![]() |
0c4574a5a3 | |
![]() |
b9a8d27d9c | |
![]() |
ef22c94d62 | |
![]() |
96ef4a4805 | |
![]() |
53a8d12d7b | |
![]() |
9f0621ba99 | |
![]() |
59a1fb3855 | |
![]() |
92e611e735 | |
![]() |
3c1111a831 | |
![]() |
2903adbf37 | |
![]() |
7b4a90cfb5 | |
![]() |
02ba8016e2 | |
![]() |
33e8a92cd8 | |
![]() |
19b17acbf5 | |
![]() |
dbd18e0344 | |
![]() |
17ae986665 | |
![]() |
c70e9817ef | |
![]() |
76f624f78f | |
![]() |
2307767161 | |
![]() |
24ec73e037 | |
![]() |
f2ac34f4a9 | |
![]() |
01e7855578 | |
![]() |
15925dd75d | |
![]() |
f0420232c6 | |
![]() |
21d37c0f31 | |
![]() |
c71475af90 | |
![]() |
5566c145ea | |
![]() |
6b736ebb0d |
|
@ -0,0 +1,28 @@
|
|||
{
|
||||
"$schema": "https://developer.microsoft.com/json-schemas/api-extractor/v7/api-extractor.schema.json",
|
||||
"mainEntryPointFilePath": "types/lib/src/human.d.ts",
|
||||
"bundledPackages": ["@types/offscreencanvas", "@tensorflow/tfjs-core", "@tensorflow/tfjs-converter", "@tensorflow/tfjs-data"],
|
||||
"compiler": {
|
||||
"skipLibCheck": false
|
||||
},
|
||||
"newlineKind": "lf",
|
||||
"dtsRollup": {
|
||||
"enabled": true,
|
||||
"untrimmedFilePath": "types/human.d.ts"
|
||||
},
|
||||
"docModel": { "enabled": false },
|
||||
"tsdocMetadata": { "enabled": false },
|
||||
"apiReport": { "enabled": false },
|
||||
"messages": {
|
||||
"compilerMessageReporting": {
|
||||
"default": { "logLevel": "warning" }
|
||||
},
|
||||
"extractorMessageReporting": {
|
||||
"default": { "logLevel": "warning" },
|
||||
"ae-missing-release-tag": { "logLevel": "none" }
|
||||
},
|
||||
"tsdocMessageReporting": {
|
||||
"default": { "logLevel": "warning" }
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,171 @@
|
|||
{
|
||||
"log": {
|
||||
"enabled": true,
|
||||
"debug": false,
|
||||
"console": true,
|
||||
"output": "test/build.log"
|
||||
},
|
||||
"profiles": {
|
||||
"production": ["clean", "compile", "typings", "typedoc", "lint", "changelog"],
|
||||
"development": ["serve", "watch", "compile"],
|
||||
"serve": ["serve"],
|
||||
"clean": ["clean"]
|
||||
},
|
||||
"clean": {
|
||||
"locations": ["dist/*", "types/lib/*", "typedoc/*"]
|
||||
},
|
||||
"lint": {
|
||||
"locations": [ "*.json", "src/**/*.ts", "test/**/*.js", "demo/**/*.js" ],
|
||||
"rules": { }
|
||||
},
|
||||
"changelog": {
|
||||
"log": "CHANGELOG.md"
|
||||
},
|
||||
"serve": {
|
||||
"sslKey": "node_modules/@vladmandic/build/cert/https.key",
|
||||
"sslCrt": "node_modules/@vladmandic/build/cert/https.crt",
|
||||
"httpPort": 10030,
|
||||
"httpsPort": 10031,
|
||||
"documentRoot": ".",
|
||||
"defaultFolder": "demo",
|
||||
"defaultFile": "index.html"
|
||||
},
|
||||
"build": {
|
||||
"global": {
|
||||
"target": "es2018",
|
||||
"sourcemap": false,
|
||||
"treeShaking": true,
|
||||
"ignoreAnnotations": true,
|
||||
"banner": { "js": "/*\n Human\n homepage: <https://github.com/vladmandic/human>\n author: <https://github.com/vladmandic>'\n*/\n" }
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"name": "tfjs/nodejs/cpu",
|
||||
"platform": "node",
|
||||
"format": "cjs",
|
||||
"input": "tfjs/tf-node.ts",
|
||||
"output": "dist/tfjs.esm.js",
|
||||
"external": ["@tensorflow"]
|
||||
},
|
||||
{
|
||||
"name": "human/nodejs/cpu",
|
||||
"platform": "node",
|
||||
"format": "cjs",
|
||||
"input": "src/human.ts",
|
||||
"output": "dist/human.node.js",
|
||||
"external": ["@tensorflow"]
|
||||
},
|
||||
{
|
||||
"name": "tfjs/nodejs/gpu",
|
||||
"platform": "node",
|
||||
"format": "cjs",
|
||||
"input": "tfjs/tf-node-gpu.ts",
|
||||
"output": "dist/tfjs.esm.js",
|
||||
"external": ["@tensorflow"]
|
||||
},
|
||||
{
|
||||
"name": "human/nodejs/gpu",
|
||||
"platform": "node",
|
||||
"format": "cjs",
|
||||
"input": "src/human.ts",
|
||||
"output": "dist/human.node-gpu.js",
|
||||
"external": ["@tensorflow"]
|
||||
},
|
||||
{
|
||||
"name": "tfjs/nodejs/wasm",
|
||||
"platform": "node",
|
||||
"format": "cjs",
|
||||
"input": "tfjs/tf-node-wasm.ts",
|
||||
"output": "dist/tfjs.esm.js",
|
||||
"external": ["@tensorflow"]
|
||||
},
|
||||
{
|
||||
"name": "human/nodejs/wasm",
|
||||
"platform": "node",
|
||||
"format": "cjs",
|
||||
"input": "src/human.ts",
|
||||
"output": "dist/human.node-wasm.js",
|
||||
"external": ["@tensorflow"]
|
||||
},
|
||||
{
|
||||
"name": "tfjs/browser/version",
|
||||
"platform": "browser",
|
||||
"format": "esm",
|
||||
"input": "tfjs/tf-version.ts",
|
||||
"output": "dist/tfjs.version.js"
|
||||
},
|
||||
{
|
||||
"name": "tfjs/browser/esm/nobundle",
|
||||
"platform": "browser",
|
||||
"format": "esm",
|
||||
"input": "tfjs/tf-browser.ts",
|
||||
"output": "dist/tfjs.esm.js",
|
||||
"external": ["@tensorflow"]
|
||||
},
|
||||
{
|
||||
"name": "human/browser/esm/nobundle",
|
||||
"platform": "browser",
|
||||
"format": "esm",
|
||||
"input": "src/human.ts",
|
||||
"output": "dist/human.esm-nobundle.js",
|
||||
"sourcemap": true,
|
||||
"external": ["@tensorflow"]
|
||||
},
|
||||
{
|
||||
"name": "tfjs/browser/esm/custom",
|
||||
"platform": "browser",
|
||||
"format": "esm",
|
||||
"input": "tfjs/tf-custom.ts",
|
||||
"output": "dist/tfjs.esm.js",
|
||||
"sourcemap": false,
|
||||
"minify": false
|
||||
},
|
||||
{
|
||||
"name": "human/browser/iife/bundle",
|
||||
"platform": "browser",
|
||||
"format": "iife",
|
||||
"input": "src/human.ts",
|
||||
"output": "dist/human.js",
|
||||
"minify": true,
|
||||
"globalName": "Human",
|
||||
"external": ["@tensorflow"]
|
||||
},
|
||||
{
|
||||
"name": "human/browser/esm/bundle",
|
||||
"platform": "browser",
|
||||
"format": "esm",
|
||||
"input": "src/human.ts",
|
||||
"output": "dist/human.esm.js",
|
||||
"sourcemap": true,
|
||||
"minify": false,
|
||||
"external": ["@tensorflow"],
|
||||
"typings": "types/lib",
|
||||
"typedoc": "typedoc"
|
||||
},
|
||||
{
|
||||
"name": "demo/typescript",
|
||||
"platform": "browser",
|
||||
"format": "esm",
|
||||
"input": "demo/typescript/index.ts",
|
||||
"output": "demo/typescript/index.js",
|
||||
"sourcemap": true,
|
||||
"external": ["*/human.esm.js"]
|
||||
},
|
||||
{
|
||||
"name": "demo/faceid",
|
||||
"platform": "browser",
|
||||
"format": "esm",
|
||||
"input": "demo/faceid/index.ts",
|
||||
"output": "demo/faceid/index.js",
|
||||
"sourcemap": true,
|
||||
"external": ["*/human.esm.js"]
|
||||
}
|
||||
]
|
||||
},
|
||||
"watch": {
|
||||
"locations": [ "src/**/*", "tfjs/**/*", "demo/**/*.ts" ]
|
||||
},
|
||||
"typescript": {
|
||||
"allowJs": false
|
||||
}
|
||||
}
|
232
.eslintrc.json
|
@ -1,74 +1,164 @@
|
|||
{
|
||||
"globals": {},
|
||||
"env": {
|
||||
"browser": true,
|
||||
"commonjs": true,
|
||||
"es6": true,
|
||||
"node": true,
|
||||
"jquery": true,
|
||||
"es2020": true
|
||||
},
|
||||
"parser": "@typescript-eslint/parser",
|
||||
"parserOptions": { "ecmaVersion": 2020 },
|
||||
"plugins": [
|
||||
"@typescript-eslint"
|
||||
"overrides": [
|
||||
{
|
||||
"files": ["**/*.ts"],
|
||||
"env": {
|
||||
"browser": true,
|
||||
"commonjs": false,
|
||||
"node": false,
|
||||
"es2021": true
|
||||
},
|
||||
"parser": "@typescript-eslint/parser",
|
||||
"parserOptions": {
|
||||
"ecmaVersion": "latest",
|
||||
"project": ["./tsconfig.json"]
|
||||
},
|
||||
"plugins": [
|
||||
"@typescript-eslint"
|
||||
],
|
||||
"extends": [
|
||||
"airbnb-base",
|
||||
"eslint:recommended",
|
||||
"plugin:@typescript-eslint/eslint-recommended",
|
||||
"plugin:@typescript-eslint/recommended",
|
||||
"plugin:@typescript-eslint/recommended-requiring-type-checking",
|
||||
"plugin:@typescript-eslint/strict",
|
||||
"plugin:import/recommended",
|
||||
"plugin:promise/recommended"
|
||||
],
|
||||
"rules": {
|
||||
"@typescript-eslint/ban-ts-comment":"off",
|
||||
"@typescript-eslint/dot-notation":"off",
|
||||
"@typescript-eslint/no-empty-interface":"off",
|
||||
"@typescript-eslint/no-inferrable-types":"off",
|
||||
"@typescript-eslint/no-misused-promises":"off",
|
||||
"@typescript-eslint/no-unnecessary-condition":"off",
|
||||
"@typescript-eslint/no-unsafe-argument":"off",
|
||||
"@typescript-eslint/no-unsafe-assignment":"off",
|
||||
"@typescript-eslint/no-unsafe-call":"off",
|
||||
"@typescript-eslint/no-unsafe-member-access":"off",
|
||||
"@typescript-eslint/no-unsafe-return":"off",
|
||||
"@typescript-eslint/non-nullable-type-assertion-style":"off",
|
||||
"@typescript-eslint/prefer-for-of":"off",
|
||||
"@typescript-eslint/prefer-nullish-coalescing":"off",
|
||||
"@typescript-eslint/prefer-ts-expect-error":"off",
|
||||
"@typescript-eslint/restrict-plus-operands":"off",
|
||||
"@typescript-eslint/restrict-template-expressions":"off",
|
||||
"dot-notation":"off",
|
||||
"guard-for-in":"off",
|
||||
"import/extensions": ["off", "always"],
|
||||
"import/no-unresolved":"off",
|
||||
"import/prefer-default-export":"off",
|
||||
"lines-between-class-members":"off",
|
||||
"max-len": [1, 275, 3],
|
||||
"no-async-promise-executor":"off",
|
||||
"no-await-in-loop":"off",
|
||||
"no-bitwise":"off",
|
||||
"no-continue":"off",
|
||||
"no-lonely-if":"off",
|
||||
"no-mixed-operators":"off",
|
||||
"no-param-reassign":"off",
|
||||
"no-plusplus":"off",
|
||||
"no-regex-spaces":"off",
|
||||
"no-restricted-syntax":"off",
|
||||
"no-return-assign":"off",
|
||||
"no-void":"off",
|
||||
"object-curly-newline":"off",
|
||||
"prefer-destructuring":"off",
|
||||
"prefer-template":"off",
|
||||
"radix":"off"
|
||||
}
|
||||
},
|
||||
{
|
||||
"files": ["**/*.js"],
|
||||
"env": {
|
||||
"browser": true,
|
||||
"commonjs": true,
|
||||
"node": true,
|
||||
"es2021": true
|
||||
},
|
||||
"parserOptions": {
|
||||
"sourceType": "module",
|
||||
"ecmaVersion": "latest"
|
||||
},
|
||||
"plugins": [
|
||||
],
|
||||
"extends": [
|
||||
"airbnb-base",
|
||||
"eslint:recommended",
|
||||
"plugin:node/recommended",
|
||||
"plugin:promise/recommended"
|
||||
],
|
||||
"rules": {
|
||||
"dot-notation":"off",
|
||||
"import/extensions": ["error", "always"],
|
||||
"import/no-extraneous-dependencies":"off",
|
||||
"max-len": [1, 275, 3],
|
||||
"no-await-in-loop":"off",
|
||||
"no-bitwise":"off",
|
||||
"no-continue":"off",
|
||||
"no-mixed-operators":"off",
|
||||
"no-param-reassign":"off",
|
||||
"no-plusplus":"off",
|
||||
"no-regex-spaces":"off",
|
||||
"no-restricted-syntax":"off",
|
||||
"no-return-assign":"off",
|
||||
"node/no-unsupported-features/es-syntax":"off",
|
||||
"object-curly-newline":"off",
|
||||
"prefer-destructuring":"off",
|
||||
"prefer-template":"off",
|
||||
"radix":"off"
|
||||
}
|
||||
},
|
||||
{
|
||||
"files": ["**/*.json"],
|
||||
"env": {
|
||||
"browser": false,
|
||||
"commonjs": false,
|
||||
"node": false,
|
||||
"es2021": false
|
||||
},
|
||||
"parserOptions": {
|
||||
"ecmaVersion": "latest"
|
||||
},
|
||||
"plugins": [
|
||||
"json"
|
||||
],
|
||||
"extends": [
|
||||
"plugin:json/recommended"
|
||||
]
|
||||
},
|
||||
{
|
||||
"files": ["**/*.html"],
|
||||
"env": {
|
||||
"browser": true,
|
||||
"commonjs": false,
|
||||
"node": false,
|
||||
"es2021": false
|
||||
},
|
||||
"parserOptions": {
|
||||
"sourceType": "module",
|
||||
"ecmaVersion": "latest"
|
||||
},
|
||||
"parser": "@html-eslint/parser",
|
||||
"extends": ["plugin:@html-eslint/recommended"],
|
||||
"plugins": [
|
||||
"html", "@html-eslint"
|
||||
],
|
||||
"rules": {
|
||||
"@html-eslint/element-newline":"off",
|
||||
"@html-eslint/indent": ["error", 2]
|
||||
}
|
||||
}
|
||||
],
|
||||
"extends": [
|
||||
"eslint:recommended",
|
||||
"plugin:import/errors",
|
||||
"plugin:import/warnings",
|
||||
"plugin:node/recommended",
|
||||
"plugin:promise/recommended",
|
||||
"plugin:json/recommended-with-comments",
|
||||
"plugin:@typescript-eslint/eslint-recommended",
|
||||
"plugin:@typescript-eslint/recommended",
|
||||
"airbnb-base"
|
||||
],
|
||||
"ignorePatterns": [ "dist", "assets", "media", "models", "node_modules", "demo/helpers" ],
|
||||
"rules": {
|
||||
"@typescript-eslint/ban-ts-comment": "off",
|
||||
"@typescript-eslint/explicit-module-boundary-types": "off",
|
||||
"@typescript-eslint/ban-types": "off",
|
||||
"@typescript-eslint/no-explicit-any": "off",
|
||||
"@typescript-eslint/no-var-requires": "off",
|
||||
"camelcase": "off",
|
||||
"dot-notation": "off",
|
||||
"func-names": "off",
|
||||
"guard-for-in": "off",
|
||||
"import/extensions": "off",
|
||||
"import/no-extraneous-dependencies": "off",
|
||||
"import/no-named-as-default": "off",
|
||||
"import/no-unresolved": "off",
|
||||
"import/prefer-default-export": "off",
|
||||
"lines-between-class-members": "off",
|
||||
"max-len": [1, 275, 3],
|
||||
"newline-per-chained-call": "off",
|
||||
"no-async-promise-executor": "off",
|
||||
"no-await-in-loop": "off",
|
||||
"no-bitwise": "off",
|
||||
"no-case-declarations":"off",
|
||||
"no-continue": "off",
|
||||
"no-loop-func": "off",
|
||||
"no-mixed-operators": "off",
|
||||
"no-param-reassign":"off",
|
||||
"no-plusplus": "off",
|
||||
"no-regex-spaces": "off",
|
||||
"no-restricted-globals": "off",
|
||||
"no-restricted-syntax": "off",
|
||||
"no-return-assign": "off",
|
||||
"no-underscore-dangle": "off",
|
||||
"node/no-missing-import": ["error", { "tryExtensions": [".js", ".json", ".ts"] }],
|
||||
"node/no-unpublished-import": "off",
|
||||
"node/no-unpublished-require": "off",
|
||||
"node/no-unsupported-features/es-syntax": "off",
|
||||
"no-lonely-if": "off",
|
||||
"node/shebang": "off",
|
||||
"object-curly-newline": "off",
|
||||
"prefer-destructuring": "off",
|
||||
"prefer-template":"off",
|
||||
"promise/always-return": "off",
|
||||
"promise/catch-or-return": "off",
|
||||
"promise/no-nesting": "off",
|
||||
"radix": "off"
|
||||
}
|
||||
}
|
||||
"ignorePatterns": [
|
||||
"node_modules",
|
||||
"assets",
|
||||
"dist",
|
||||
"demo/helpers/*.js",
|
||||
"demo/typescript/*.js",
|
||||
"demo/faceid/*.js",
|
||||
"typedoc"
|
||||
]
|
||||
}
|
||||
|
|
|
@ -13,16 +13,23 @@ assignees: vladmandic
|
|||
|
||||
**Expected Behavior**
|
||||
|
||||
**Environment
|
||||
**Environment**
|
||||
|
||||
- Module version?
|
||||
- Human library version?
|
||||
- Built-in demo or custom code?
|
||||
- Type of module used (e.g. `js`, `esm`, `esm-nobundle`)?
|
||||
- Browser or NodeJS and version (e.g. NodeJS 14.15 or Chrome 89)?
|
||||
- OS and Hardware platform (e.g. Windows 10, Ubuntu Linux on x64, Android 10)?
|
||||
- Packager (if any) (e.g, webpack, rollup, parcel, esbuild, etc.)?
|
||||
- TensorFlow/JS version (if not using bundled module)?
|
||||
- Browser or NodeJS and version (e.g. *NodeJS 14.15* or *Chrome 89*)?
|
||||
- OS and Hardware platform (e.g. *Windows 10*, *Ubuntu Linux on x64*, *Android 10*)?
|
||||
- Packager (if any) (e.g, *webpack*, *rollup*, *parcel*, *esbuild*, etc.)?
|
||||
- Framework (if any) (e.g. *React*, *NextJS*, etc.)?
|
||||
|
||||
**Diagnostics**
|
||||
|
||||
- Check out any applicable [diagnostic steps](https://github.com/vladmandic/human/wiki/Diag)
|
||||
|
||||
**Additional**
|
||||
|
||||
- For installation or startup issues include your `package.json`
|
||||
- For usage issues, it is recommended to post your code as [gist](https://gist.github.com/)
|
||||
- For general questions, create a [discussion topic](https://github.com/vladmandic/human/discussions)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
node_modules
|
||||
private
|
||||
node_modules/
|
||||
types/lib
|
||||
pnpm-lock.yaml
|
||||
*.swp
|
||||
|
|
7
.hintrc
|
@ -3,8 +3,11 @@
|
|||
"web-recommended"
|
||||
],
|
||||
"browserslist": [
|
||||
"last 1 versions",
|
||||
"not ie < 20"
|
||||
"chrome >= 90",
|
||||
"edge >= 90",
|
||||
"firefox >= 100",
|
||||
"android >= 90",
|
||||
"safari >= 15"
|
||||
],
|
||||
"hints": {
|
||||
"no-inline-styles": "off",
|
||||
|
|
|
@ -1,3 +1,7 @@
|
|||
node_modules
|
||||
private
|
||||
pnpm-lock.yaml
|
||||
samples
|
||||
typedoc
|
||||
test
|
||||
wiki
|
||||
types/lib
|
||||
|
|
5
.npmrc
|
@ -1 +1,4 @@
|
|||
force = true
|
||||
force=true
|
||||
production=true
|
||||
legacy-peer-deps=true
|
||||
strict-peer-dependencies=false
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
{
|
||||
"search.exclude": {
|
||||
"dist/*": true,
|
||||
"node_modules/*": true
|
||||
},
|
||||
"search.useGlobalIgnoreFiles": true,
|
||||
"search.useParentIgnoreFiles": true
|
||||
}
|
613
CHANGELOG.md
|
@ -1,19 +1,450 @@
|
|||
# @vladmandic/human
|
||||
|
||||
Version: **1.9.0**
|
||||
Description: **Human: AI-powered 3D Face Detection & Rotation Tracking, Face Description & Recognition, Body Pose Tracking, 3D Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction, Gesture Recognition**
|
||||
|
||||
Author: **Vladimir Mandic <mandic00@live.com>**
|
||||
License: **MIT** </LICENSE>
|
||||
Repository: **<git+https://github.com/vladmandic/human.git>**
|
||||
|
||||
Version: **2.10.1**
|
||||
Description: **Human: AI-powered 3D Face Detection & Rotation Tracking, Face Description & Recognition, Body Pose Tracking, 3D Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction, Gesture Recognition**
|
||||
|
||||
Author: **Vladimir Mandic <mandic00@live.com>**
|
||||
License: **MIT**
|
||||
Repository: **<https://github.com/vladmandic/human>**
|
||||
|
||||
## Changelog
|
||||
|
||||
### **human 1.9.0 beta with breaking changes regarding caching** 2021/05/18 mandic00@live.com
|
||||
|
||||
### **HEAD -> main** 2022/09/07 mandic00@live.com
|
||||
|
||||
|
||||
### **origin/main** 2021/05/18 mandic00@live.com
|
||||
### **origin/main** 2022/09/07 mandic00@live.com
|
||||
|
||||
- add config flags
|
||||
- test update
|
||||
- release preview
|
||||
- optimize startup sequence
|
||||
- reorder backend init code
|
||||
- test embedding
|
||||
- embedding test
|
||||
- add browser iife tests
|
||||
- minor bug fixes and increased test coverage
|
||||
- extend release tests
|
||||
- add model load exception handling
|
||||
- add softwarekernels config option
|
||||
- expand type safety
|
||||
- full eslint rule rewrite
|
||||
|
||||
### **2.9.4** 2022/08/20 mandic00@live.com
|
||||
|
||||
- add browser test
|
||||
- add tensorflow library detection
|
||||
- fix wasm detection
|
||||
- enumerate additional models
|
||||
- release refresh
|
||||
|
||||
### **2.9.3** 2022/08/10 mandic00@live.com
|
||||
|
||||
- rehault testing framework
|
||||
- release refresh
|
||||
- add insightface
|
||||
|
||||
### **2.9.2** 2022/08/08 mandic00@live.com
|
||||
|
||||
|
||||
### **release: 2.9.1** 2022/07/25 mandic00@live.com
|
||||
|
||||
|
||||
### **2.9.1** 2022/07/25 mandic00@live.com
|
||||
|
||||
- full rebuild
|
||||
- release cleanup
|
||||
- tflite experiments
|
||||
- add load monitor test
|
||||
- beta for upcoming major release
|
||||
- swtich to release version of tfjs
|
||||
- placeholder for face contours
|
||||
- improve face compare in main demo
|
||||
- add webview support
|
||||
- fix(gear): ensure gear.modelpath is used for loadmodel()
|
||||
- npm default install should be prod only
|
||||
- fix npm v7 compatibility
|
||||
- add getmodelstats method
|
||||
- rebuild
|
||||
- release build
|
||||
|
||||
### **2.8.1** 2022/06/08 mandic00@live.com
|
||||
|
||||
- webgpu and wasm optimizations
|
||||
- add faceboxes prototype
|
||||
- full rebuild
|
||||
|
||||
### **2.7.4** 2022/05/24 mandic00@live.com
|
||||
|
||||
|
||||
### **2.7.3** 2022/05/24 mandic00@live.com
|
||||
|
||||
- add face.mesh.keepinvalid config flag
|
||||
- initial work for new facemesh model
|
||||
|
||||
### **2.7.2** 2022/05/12 mandic00@live.com
|
||||
|
||||
- fix demo when used with video files
|
||||
- major release
|
||||
|
||||
### **2.7.1** 2022/05/09 mandic00@live.com
|
||||
|
||||
- support 4k input
|
||||
- add attention draw methods
|
||||
- fix coloring function
|
||||
- enable precompile as part of warmup
|
||||
- prepare release beta
|
||||
- change default face crop
|
||||
- face attention model is available in human-models
|
||||
- beta release 2.7
|
||||
- refactor draw methods
|
||||
- implement face attention model
|
||||
- add electronjs demo
|
||||
- rebuild
|
||||
|
||||
### **2.6.5** 2022/04/01 mandic00@live.com
|
||||
|
||||
- bundle offscreencanvas types
|
||||
- prototype precompile pass
|
||||
- fix changelog generation
|
||||
- fix indexdb config check
|
||||
|
||||
### **2.6.4** 2022/02/27 mandic00@live.com
|
||||
|
||||
- fix types typo
|
||||
- refresh
|
||||
- add config option wasmplatformfetch
|
||||
|
||||
### **2.6.3** 2022/02/10 mandic00@live.com
|
||||
|
||||
- rebuild
|
||||
|
||||
### **2.6.2** 2022/02/07 mandic00@live.com
|
||||
|
||||
- release rebuild
|
||||
|
||||
### **2.6.1** 2022/01/20 mandic00@live.com
|
||||
|
||||
- implement model caching using indexdb
|
||||
- prototype global fetch handler
|
||||
- fix face box and hand tracking when in front of face
|
||||
|
||||
### **2.5.8** 2022/01/14 mandic00@live.com
|
||||
|
||||
- fix samples
|
||||
- fix(src): typo
|
||||
- change on how face box is calculated
|
||||
|
||||
### **2.5.7** 2021/12/27 mandic00@live.com
|
||||
|
||||
- fix posenet
|
||||
- release refresh
|
||||
|
||||
### **2.5.6** 2021/12/15 mandic00@live.com
|
||||
|
||||
- strong type for string enums
|
||||
- rebuild
|
||||
- fix node detection in electron environment
|
||||
|
||||
### **2.5.5** 2021/12/01 mandic00@live.com
|
||||
|
||||
- added human-motion
|
||||
- add offscreencanvas typedefs
|
||||
- release preview
|
||||
- fix face box scaling on detection
|
||||
- cleanup
|
||||
|
||||
### **2.5.4** 2021/11/22 mandic00@live.com
|
||||
|
||||
- prototype blazepose detector
|
||||
- minor fixes
|
||||
- add body 3d interpolation
|
||||
- edit blazepose keypoints
|
||||
- new build process
|
||||
|
||||
### **2.5.3** 2021/11/18 mandic00@live.com
|
||||
|
||||
- create typedef rollup
|
||||
- optimize centernet
|
||||
- cache frequent tf constants
|
||||
- add extra face rotation prior to mesh
|
||||
- release 2.5.2
|
||||
- improve error handling
|
||||
|
||||
### **2.5.2** 2021/11/14 mandic00@live.com
|
||||
|
||||
- fix mobilefacenet module
|
||||
- fix gear and ssrnet modules
|
||||
- fix for face crop when mesh is disabled
|
||||
- implement optional face masking
|
||||
- add similarity score range normalization
|
||||
- add faceid demo
|
||||
- documentation overhaul
|
||||
- auto tensor shape and channels handling
|
||||
- disable use of path2d in node
|
||||
- add liveness module and facerecognition demo
|
||||
- initial version of facerecognition demo
|
||||
- rebuild
|
||||
- add type defs when working with relative path imports
|
||||
- disable humangl backend if webgl 1.0 is detected
|
||||
- add additional hand gestures
|
||||
|
||||
### **2.5.1** 2021/11/08 mandic00@live.com
|
||||
|
||||
- new human.compare api
|
||||
- added links to release notes
|
||||
- new frame change detection algorithm
|
||||
- add histogram equalization
|
||||
- implement wasm missing ops
|
||||
- performance and memory optimizations
|
||||
- fix react compatibility issues
|
||||
- improve box rescaling for all modules
|
||||
- improve precision using wasm backend
|
||||
- refactor predict with execute
|
||||
- patch tfjs type defs
|
||||
- start 2.5 major version
|
||||
- build and docs cleanup
|
||||
- fix firefox bug
|
||||
|
||||
### **2.4.3** 2021/10/28 mandic00@live.com
|
||||
|
||||
- additional human.performance counters
|
||||
|
||||
### **2.4.2** 2021/10/27 mandic00@live.com
|
||||
|
||||
- add ts demo
|
||||
- switch from es2018 to es2020 for main build
|
||||
- switch to custom tfjs for demos
|
||||
- release 2.4
|
||||
|
||||
### **2.4.1** 2021/10/25 mandic00@live.com
|
||||
|
||||
- refactoring plus jsdoc comments
|
||||
- increase face similarity match resolution
|
||||
- time based caching
|
||||
- turn on minification
|
||||
- initial work on skiptime
|
||||
- added generic types
|
||||
- enhanced typing exports
|
||||
- add optional autodetected custom wasm path
|
||||
|
||||
### **2.3.6** 2021/10/21 mandic00@live.com
|
||||
|
||||
- fix for human.draw labels and typedefs
|
||||
- refactor human.env to a class type
|
||||
- add human.custom.esm using custom tfjs build
|
||||
|
||||
### **2.3.5** 2021/10/19 mandic00@live.com
|
||||
|
||||
- removed direct usage of performance.now
|
||||
|
||||
### **2.3.4** 2021/10/19 mandic00@live.com
|
||||
|
||||
- minor blazepose optimizations
|
||||
- compress samples
|
||||
- remove posenet from default package
|
||||
- enhanced movenet postprocessing
|
||||
- use transferrable buffer for worker messages
|
||||
- add optional anti-spoofing module
|
||||
- add node-match advanced example using worker thread pool
|
||||
- package updates
|
||||
- optimize image preprocessing
|
||||
- set webgpu optimized flags
|
||||
- major precision improvements to movenet and handtrack
|
||||
- image processing fixes
|
||||
- redesign body and hand caching and interpolation
|
||||
- demo default config cleanup
|
||||
- improve gaze and face angle visualizations in draw
|
||||
- release 2.3.1
|
||||
|
||||
### **2.3.1** 2021/10/06 mandic00@live.com
|
||||
|
||||
- workaround for chrome offscreencanvas bug
|
||||
- fix backend conflict in webworker
|
||||
- add blazepose v2 and add annotations to body results
|
||||
- fix backend order initialization
|
||||
- added docker notes
|
||||
- breaking change: new similarity and match methods
|
||||
- tweaked default values
|
||||
- enable handtrack as default model
|
||||
- redesign face processing
|
||||
- refactoring
|
||||
- define app specific types
|
||||
- implement box caching for movenet
|
||||
- autodetect number of bodies and hands
|
||||
- upload new samples
|
||||
- new samples gallery and major code folder restructure
|
||||
- new release
|
||||
|
||||
### **2.2.3** 2021/09/24 mandic00@live.com
|
||||
|
||||
- optimize model loading
|
||||
- support segmentation for nodejs
|
||||
- redo segmentation and handtracking
|
||||
- prototype handtracking
|
||||
- automated browser tests
|
||||
- support for dynamic backend switching
|
||||
- initial automated browser tests
|
||||
- enhanced automated test coverage
|
||||
- more automated tests
|
||||
- added configuration validation
|
||||
- prevent validation failed on some model combinations
|
||||
- webgl exception handling
|
||||
|
||||
### **2.2.2** 2021/09/17 mandic00@live.com
|
||||
|
||||
- experimental webgl status monitoring
|
||||
- major release
|
||||
|
||||
### **2.2.1** 2021/09/16 mandic00@live.com
|
||||
|
||||
- add vr model demo
|
||||
- all tests passing
|
||||
- redefine draw helpers interface
|
||||
- add simple webcam and webrtc demo
|
||||
- added visual results browser to demo
|
||||
- reorganize tfjs bundle
|
||||
- experimental custom tfjs bundle - disabled
|
||||
- add platform and backend capabilities detection
|
||||
- enhanced automated tests
|
||||
- enable canvas patching for nodejs
|
||||
- full ts strict typechecks
|
||||
- fix multiple memory leaks
|
||||
- modularize human class and add model validation
|
||||
- add dynamic kernel op detection
|
||||
- added human.env diagnostic class
|
||||
- minor typos
|
||||
- release candidate
|
||||
- parametrize face config
|
||||
- mark all config items as optional
|
||||
- redefine config and result interfaces
|
||||
- fix usge of string enums
|
||||
- start using partial definitions
|
||||
- implement event emitters
|
||||
- fix iife loader
|
||||
- simplify dependencies
|
||||
- change build process
|
||||
- add benchmark info
|
||||
- simplify canvas handling in nodejs
|
||||
- full rebuild
|
||||
|
||||
### **2.1.5** 2021/08/31 mandic00@live.com
|
||||
|
||||
- added demo node-canvas
|
||||
- dynamically generate default wasm path
|
||||
- implement finger poses in hand detection and gestures
|
||||
- implemented movenet-multipose model
|
||||
|
||||
### **2.1.4** 2021/08/19 mandic00@live.com
|
||||
|
||||
- add static type definitions to main class
|
||||
- fix interpolation overflow
|
||||
- rebuild full
|
||||
- improve face box caching
|
||||
- strict type checks
|
||||
- add webgu checks
|
||||
- experimental webgpu support
|
||||
- add experimental webgu demo
|
||||
- add backend initialization checks
|
||||
- complete async work
|
||||
- list detect cameras
|
||||
- switch to async data reads
|
||||
|
||||
### **2.1.3** 2021/08/12 mandic00@live.com
|
||||
|
||||
- fix centernet & update blazeface
|
||||
- minor update
|
||||
- replace movenet with lightning-v4
|
||||
- enable webgl uniform support for faster warmup
|
||||
|
||||
### **2.1.2** 2021/07/29 mandic00@live.com
|
||||
|
||||
- fix unregistered ops in tfjs
|
||||
- fix typo
|
||||
- rebuild new release
|
||||
|
||||
### **2.1.1** 2021/07/29 mandic00@live.com
|
||||
|
||||
- add note on manually disping tensor
|
||||
- modularize model loading
|
||||
|
||||
### **2.0.3** 2021/06/18 mandic00@live.com
|
||||
|
||||
- fix demo paths
|
||||
- added multithreaded demo
|
||||
|
||||
### **2.0.2** 2021/06/14 mandic00@live.com
|
||||
|
||||
- reorganize demos
|
||||
- fix centernet box width & height
|
||||
- add body segmentation sample
|
||||
- add release notes
|
||||
- release 2.0
|
||||
|
||||
### **2.0.1** 2021/06/08 mandic00@live.com
|
||||
|
||||
- add video drag&drop capability
|
||||
- modularize build platform
|
||||
- custom build tfjs from sources
|
||||
- modularize build platform
|
||||
- enable body segmentation and background replacement in demo
|
||||
- minor git corruption
|
||||
- unified build
|
||||
- enable body segmentation and background replacement
|
||||
- work on body segmentation
|
||||
- added experimental body segmentation module
|
||||
- add meet and selfie models
|
||||
- add live hints to demo
|
||||
- switch worker from module to iife importscripts
|
||||
- release candidate
|
||||
- added samples to git
|
||||
- implemented drag & drop for image processing
|
||||
- release candidate
|
||||
- breaking changes to results.face output properties
|
||||
- breaking changes to results.object output properties
|
||||
- breaking changes to results.hand output properties
|
||||
- breaking changes to results.body output properties
|
||||
- implemented human.next global interpolation method
|
||||
- finished draw buffering and smoothing and enabled by default
|
||||
- implemented service worker
|
||||
- quantized centernet
|
||||
- release candidate
|
||||
- added usage restrictions
|
||||
- quantize handdetect model
|
||||
- added experimental movenet-lightning and removed blazepose from default dist
|
||||
- added experimental face.rotation.gaze
|
||||
- fix and optimize for mobile platform
|
||||
- lock typescript to 4.2 due to typedoc incompatibility with 4.3
|
||||
|
||||
### **1.9.4** 2021/05/27 mandic00@live.com
|
||||
|
||||
- fix demo facecompare
|
||||
- webhint and lighthouse optimizations
|
||||
- add camera startup diag messages
|
||||
- implemented unified result.persons that combines face, body and hands for each person
|
||||
- added experimental results interpolation for smooth draw operations
|
||||
|
||||
### **1.9.3** 2021/05/23 mandic00@live.com
|
||||
|
||||
- use green weighted for input diff calculation
|
||||
- implement experimental drawoptions.bufferedoutput and bufferedfactor
|
||||
- use explicit tensor interface
|
||||
- add tfjs types and remove all instances of any
|
||||
- enhance strong typing
|
||||
- rebuild all for release
|
||||
|
||||
### **1.9.2** 2021/05/22 mandic00@live.com
|
||||
|
||||
- add id and boxraw on missing objects
|
||||
- restructure results strong typing
|
||||
|
||||
### **1.9.1** 2021/05/21 mandic00@live.com
|
||||
|
||||
- caching improvements
|
||||
- add experimental mb3-centernet object detection
|
||||
- individual model skipframes values still max high threshold for caching
|
||||
- config.videooptimized has been removed and config.cachesensitivity has been added instead
|
||||
- caching determination is now dynamic based on detection of input change and not based on input types
|
||||
- human 1.9.0 beta with breaking changes regarding caching
|
||||
|
||||
### **1.8.5** 2021/05/18 mandic00@live.com
|
||||
|
||||
|
@ -30,9 +461,7 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
|
|||
|
||||
### **1.8.2** 2021/05/04 mandic00@live.com
|
||||
|
||||
|
||||
### **release 1.8 with major changes and tfjs 3.6.0** 2021/04/30 mandic00@live.com
|
||||
|
||||
- release 1.8 with major changes and tfjs 3.6.0
|
||||
|
||||
### **1.8.1** 2021/04/30 mandic00@live.com
|
||||
|
||||
|
@ -66,7 +495,6 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
|
|||
- added filter.flip feature
|
||||
- added demo load image from http
|
||||
- mobile demo optimization and iris gestures
|
||||
- full test run
|
||||
- full rebuild
|
||||
- new look
|
||||
- added benchmarks
|
||||
|
@ -176,7 +604,6 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
|
|||
|
||||
- add experimental nanodet object detection
|
||||
- full models signature
|
||||
- cleanup
|
||||
|
||||
### **1.1.7** 2021/03/16 mandic00@live.com
|
||||
|
||||
|
@ -222,7 +649,6 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
|
|||
### **1.0.3** 2021/03/10 mandic00@live.com
|
||||
|
||||
- strong typing for public classes and hide private classes
|
||||
- re-added blazeface-front
|
||||
- enhanced age, gender, emotion detection
|
||||
- full rebuild
|
||||
|
||||
|
@ -231,151 +657,73 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
|
|||
- remove blazeface-front, blazepose-upper, faceboxes
|
||||
- remove blazeface-front and faceboxes
|
||||
|
||||
### **release: 1.0.1** 2021/03/09 mandic00@live.com
|
||||
|
||||
|
||||
### **1.0.1** 2021/03/09 mandic00@live.com
|
||||
|
||||
- fix for face detector when mesh is disabled
|
||||
- optimize for npm
|
||||
|
||||
### **0.40.9** 2021/03/08 mandic00@live.com
|
||||
|
||||
- 0.40.9
|
||||
- fix performance issue when running with low confidence
|
||||
|
||||
### **0.40.8** 2021/03/08 mandic00@live.com
|
||||
|
||||
|
||||
### **0.40.7** 2021/03/06 mandic00@live.com
|
||||
|
||||
- 0.40.8
|
||||
- 0.40.7
|
||||
- implemented 3d face angle calculations
|
||||
|
||||
### **0.40.6** 2021/03/06 mandic00@live.com
|
||||
|
||||
- 0.40.6
|
||||
- add curve draw output
|
||||
|
||||
### **0.40.5** 2021/03/05 mandic00@live.com
|
||||
|
||||
- 0.40.5
|
||||
- fix human.draw
|
||||
|
||||
### **0.40.4** 2021/03/05 mandic00@live.com
|
||||
|
||||
- cleanup blazepose code
|
||||
- 0.40.4
|
||||
- fix demo
|
||||
|
||||
### **0.40.3** 2021/03/05 mandic00@live.com
|
||||
|
||||
|
||||
### **0.40.2** 2021/03/05 mandic00@live.com
|
||||
|
||||
- 0.40.3
|
||||
- 0.40.2
|
||||
- added blazepose-upper
|
||||
|
||||
### **0.40.1** 2021/03/04 mandic00@live.com
|
||||
|
||||
- 0.40.1
|
||||
- implement blazepose and update demos
|
||||
- add todo list
|
||||
|
||||
### **0.30.6** 2021/03/03 mandic00@live.com
|
||||
|
||||
- 0.30.6
|
||||
- fine tuning age and face models
|
||||
|
||||
### **0.30.5** 2021/03/02 mandic00@live.com
|
||||
|
||||
- 0.30.5
|
||||
- add debug logging flag
|
||||
|
||||
### **0.30.4** 2021/03/01 mandic00@live.com
|
||||
|
||||
- 0.30.4
|
||||
- added skipinitial flag
|
||||
|
||||
### **0.30.3** 2021/02/28 mandic00@live.com
|
||||
|
||||
- 0.30.3
|
||||
- typo
|
||||
|
||||
### **0.30.2** 2021/02/26 mandic00@live.com
|
||||
|
||||
- 0.30.2
|
||||
- rebuild
|
||||
- fix typo
|
||||
|
||||
### **0.30.1** 2021/02/25 mandic00@live.com
|
||||
|
||||
|
||||
### **0.20.11** 2021/02/24 mandic00@live.com
|
||||
|
||||
|
||||
### **0.20.10** 2021/02/22 mandic00@live.com
|
||||
|
||||
|
||||
### **0.20.9** 2021/02/21 mandic00@live.com
|
||||
|
||||
- remove extra items
|
||||
- simmilarity fix
|
||||
|
||||
### **0.20.8** 2021/02/21 mandic00@live.com
|
||||
|
||||
- embedding fix
|
||||
|
||||
### **0.20.7** 2021/02/21 mandic00@live.com
|
||||
|
||||
- 0.30.1
|
||||
- 0.20.11
|
||||
- 0.20.10
|
||||
- 0.20.9
|
||||
- 0.20.8
|
||||
- 0.20.7
|
||||
- build fix
|
||||
|
||||
### **0.20.6** 2021/02/21 mandic00@live.com
|
||||
|
||||
- 0.20.6
|
||||
- embedding fix
|
||||
|
||||
### **0.20.5** 2021/02/21 mandic00@live.com
|
||||
|
||||
- 0.20.5
|
||||
- fix imagefx and add dev builds
|
||||
|
||||
### **0.20.4** 2021/02/19 mandic00@live.com
|
||||
|
||||
- 0.20.4
|
||||
|
||||
### **0.20.3** 2021/02/17 mandic00@live.com
|
||||
|
||||
- 0.20.3
|
||||
- rebuild
|
||||
|
||||
### **0.20.2** 2021/02/13 mandic00@live.com
|
||||
|
||||
- 0.20.2
|
||||
- merge branch 'main' of https://github.com/vladmandic/human into main
|
||||
- create codeql-analysis.yml
|
||||
- create security.md
|
||||
- add templates
|
||||
|
||||
### **0.20.1** 2021/02/08 mandic00@live.com
|
||||
|
||||
- 0.20.1
|
||||
- menu fixes
|
||||
- convert to typescript
|
||||
|
||||
### **0.11.5** 2021/02/06 mandic00@live.com
|
||||
|
||||
- 0.11.5
|
||||
- added faceboxes alternative model
|
||||
|
||||
### **0.11.4** 2021/02/06 mandic00@live.com
|
||||
|
||||
|
||||
### **0.11.3** 2021/02/02 mandic00@live.com
|
||||
|
||||
|
||||
### **0.11.2** 2021/01/30 mandic00@live.com
|
||||
|
||||
- 0.11.4
|
||||
- 0.11.3
|
||||
- 0.11.2
|
||||
- added warmup for nodejs
|
||||
|
||||
### **update for tfjs 3.0.0** 2021/01/29 mandic00@live.com
|
||||
|
||||
|
||||
### **0.11.1** 2021/01/29 mandic00@live.com
|
||||
|
||||
|
||||
### **0.10.2** 2021/01/22 mandic00@live.com
|
||||
|
||||
|
||||
### **0.10.1** 2021/01/20 mandic00@live.com
|
||||
|
||||
- 0.11.1
|
||||
- 0.10.2
|
||||
- 0.10.1
|
||||
|
||||
### **0.9.26** 2021/01/18 mandic00@live.com
|
||||
|
||||
- fix face detection when mesh is disabled
|
||||
- added minification notes
|
||||
- version bump
|
||||
|
||||
### **0.9.25** 2021/01/13 mandic00@live.com
|
||||
|
@ -437,7 +785,6 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
|
|||
|
||||
- conditional hand rotation
|
||||
- staggered skipframes
|
||||
- fix permissions
|
||||
|
||||
### **0.9.13** 2020/12/08 mandic00@live.com
|
||||
|
||||
|
@ -489,9 +836,7 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
|
|||
### **0.9.3** 2020/11/16 mandic00@live.com
|
||||
|
||||
- switched to minified build
|
||||
|
||||
### **release: 1.2** 2020/11/15 mandic00@live.com
|
||||
|
||||
- web worker fixes
|
||||
- full rebuild
|
||||
|
||||
### **0.9.2** 2020/11/14 mandic00@live.com
|
||||
|
@ -548,7 +893,6 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
|
|||
- optimized model loader
|
||||
- merge branch 'main' of https://github.com/vladmandic/human into main
|
||||
- created wiki
|
||||
- delete bug_report.md
|
||||
- optimize font resizing
|
||||
- fix nms sync call
|
||||
|
||||
|
@ -572,7 +916,6 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
|
|||
|
||||
- optimized camera and mobile layout
|
||||
- fixed worker and filter compatibility
|
||||
- removed test code
|
||||
|
||||
### **0.7.2** 2020/11/04 mandic00@live.com
|
||||
|
||||
|
@ -649,7 +992,6 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
|
|||
### **0.4.8** 2020/10/28 mandic00@live.com
|
||||
|
||||
- revert "updated menu handler"
|
||||
- fix webpack compatibility issue
|
||||
|
||||
### **0.4.7** 2020/10/27 mandic00@live.com
|
||||
|
||||
|
@ -737,7 +1079,6 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
|
|||
|
||||
### **0.2.8** 2020/10/13 mandic00@live.com
|
||||
|
||||
- added example image
|
||||
|
||||
### **0.2.7** 2020/10/13 mandic00@live.com
|
||||
|
||||
|
@ -753,7 +1094,6 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
|
|||
|
||||
### **0.2.4** 2020/10/12 mandic00@live.com
|
||||
|
||||
- removed extra files
|
||||
|
||||
### **0.2.3** 2020/10/12 mandic00@live.com
|
||||
|
||||
|
@ -761,9 +1101,6 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
|
|||
### **0.2.2** 2020/10/12 mandic00@live.com
|
||||
|
||||
|
||||
### **release: 1.0** 2020/10/12 mandic00@live.com
|
||||
|
||||
|
||||
### **0.2.1** 2020/10/12 mandic00@live.com
|
||||
|
||||
- added sample image
|
||||
|
|
|
@ -22,3 +22,12 @@ Any of the following behavior is unacceptable:
|
|||
If you believe someone is violating the code of conduct, we ask that you report it
|
||||
|
||||
Participants asked to stop any harassing behavior are expected to comply immediately
|
||||
|
||||
<br>
|
||||
|
||||
## Usage Restrictions
|
||||
|
||||
`Human` library does not alow for usage in following scenarios:
|
||||
|
||||
- Any life-critical decisions
|
||||
- Any form of surveillance without consent of the user is explicitly out of scope
|
||||
|
|
234
README.md
|
@ -9,7 +9,7 @@
|
|||
|
||||
**AI-powered 3D Face Detection & Rotation Tracking, Face Description & Recognition,**
|
||||
**Body Pose Tracking, 3D Hand & Finger Tracking, Iris Analysis,**
|
||||
**Age & Gender & Emotion Prediction, Gesture Recognition**
|
||||
**Age & Gender & Emotion Prediction, Gaze Tracking, Gesture Recognition, Body Segmentation**
|
||||
|
||||
<br>
|
||||
|
||||
|
@ -18,38 +18,80 @@ JavaScript module using TensorFlow/JS Machine Learning library
|
|||
- **Browser**:
|
||||
Compatible with both desktop and mobile platforms
|
||||
Compatible with *CPU*, *WebGL*, *WASM* backends
|
||||
Compatible with *WebWorker* execution
|
||||
Compatible with *WebWorker* execution
|
||||
Compatible with *WebView*
|
||||
- **NodeJS**:
|
||||
Compatible with both software *tfjs-node* and
|
||||
GPU accelerated backends *tfjs-node-gpu* using CUDA libraries
|
||||
|
||||
Check out [**Live Demo**](https://vladmandic.github.io/human/demo/index.html) for processing of live WebCam video or static images
|
||||
<br>
|
||||
|
||||
*Check out [**Simple Live Demo**](https://vladmandic.github.io/human/demo/typescript/index.html) fully annotated app as a good start starting point ([html](https://github.com/vladmandic/human/blob/main/demo/typescript/index.html))([code](https://github.com/vladmandic/human/blob/main/demo/typescript/index.ts))*
|
||||
|
||||
*Check out [**Main Live Demo**](https://vladmandic.github.io/human/demo/index.html) app for advanced processing of of webcam, video stream or images static images with all possible tunable options*
|
||||
|
||||
- To start video detection, simply press *Play*
|
||||
- To process images, simply drag & drop in your Browser window
|
||||
- Note: For optimal performance, select only models you'd like to use
|
||||
- Note: If you have modern GPU, WebGL (default) backend is preferred, otherwise select WASM backend
|
||||
|
||||
<br>
|
||||
|
||||
## Releases
|
||||
- [Release Notes](https://github.com/vladmandic/human/releases)
|
||||
- [NPM Link](https://www.npmjs.com/package/@vladmandic/human)
|
||||
## Demos
|
||||
|
||||
- [**Main Application**](https://vladmandic.github.io/human/demo/index.html)
|
||||
- [**Face Extraction, Description, Identification and Matching**](https://vladmandic.github.io/human/demo/facematch.html)
|
||||
- [**Face Extraction and 3D Rendering**](https://vladmandic.github.io/human/demo/face3d.html)
|
||||
- [**Details on Demo Applications**](https://github.com/vladmandic/human/wiki/Demos)
|
||||
- [**List of all Demo applications**](https://github.com/vladmandic/human/wiki/Demos)
|
||||
- [**Live Examples galery**](https://vladmandic.github.io/human/samples/index.html)
|
||||
|
||||
### Browser Demos
|
||||
|
||||
*All browser demos are self-contained without any external dependencies*
|
||||
|
||||
- **Full** [[*Live*]](https://vladmandic.github.io/human/demo/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo): Main browser demo app that showcases all Human capabilities
|
||||
- **Simple** [[*Live*]](https://vladmandic.github.io/human/demo/typescript/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/typescript): Simple demo in WebCam processing demo in TypeScript
|
||||
- **Face Match** [[*Live*]](https://vladmandic.github.io/human/demo/facematch/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facematch): Extract faces from images, calculates face descriptors and simmilarities and matches them to known database
|
||||
- **Face ID** [[*Live*]](https://vladmandic.github.io/human/demo/faceid/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/faceid): Runs multiple checks to validate webcam input before performing face match to faces in IndexDB
|
||||
- **Multi-thread** [[*Live*]](https://vladmandic.github.io/human/demo/multithread/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/multithread): Runs each Human module in a separate web worker for highest possible performance
|
||||
- **NextJS** [[*Live*]](https://vladmandic.github.io/human-next/out/index.html) [[*Details*]](https://github.com/vladmandic/human-next): Use Human with TypeScript, NextJS and ReactJS
|
||||
- **ElectronJS** [[*Details*]](https://github.com/vladmandic/human-electron): Use Human with TypeScript and ElectonJS to create standalone cross-platform apps
|
||||
- **3D Analysis** [[*Live*]](https://vladmandic.github.io/human-motion/src/index.html) [[*Details*]](https://github.com/vladmandic/human-motion): 3D tracking and visualization of heead, face, eye, body and hand
|
||||
- **Avatar Bone Mapping** [[*Live*]](https://vladmandic.github.io/human-vrm/src/human-avatar.html) [[*Details*]](https://github.com/vladmandic/human-avatar): Human skeleton with full bone mapping using look and inverse kinematics controllers
|
||||
- **Virtual Model Tracking** [[*Live*]](https://vladmandic.github.io/human-vrm/src/human-vrm.html) [[*Details*]](https://github.com/vladmandic/human-vrm): VR model with head, face, eye, body and hand tracking
|
||||
|
||||
### NodeJS Demos
|
||||
|
||||
*NodeJS demos may require extra dependencies which are used to decode inputs*
|
||||
*See header of each demo to see its dependencies as they are not automatically installed with `Human`*
|
||||
|
||||
- **Main** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Process images from files, folders or URLs using native methods
|
||||
- **Canvas** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Process image from file or URL and draw results to a new image file using `node-canvas`
|
||||
- **Video** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Processing of video input using `ffmpeg`
|
||||
- **WebCam** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Processing of webcam screenshots using `fswebcam`
|
||||
- **Events** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Showcases usage of `Human` eventing to get notifications on processing
|
||||
- **Similarity** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Compares two input images for similarity of detected faces
|
||||
- **Face Match** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facematch): Parallel processing of face **match** in multiple child worker threads
|
||||
- **Multiple Workers** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Runs multiple parallel `human` by dispaching them to pool of pre-created worker processes
|
||||
|
||||
## Project pages
|
||||
|
||||
- [**Code Repository**](https://github.com/vladmandic/human)
|
||||
- [**NPM Package**](https://www.npmjs.com/package/@vladmandic/human)
|
||||
- [**Issues Tracker**](https://github.com/vladmandic/human/issues)
|
||||
- [**TypeDoc API Specification: Human**](https://vladmandic.github.io/human/typedoc/classes/human.html)
|
||||
- [**TypeDoc API Specification: Root**](https://vladmandic.github.io/human/typedoc/)
|
||||
- [**TypeDoc API Specification**](https://vladmandic.github.io/human/typedoc/classes/Human.html)
|
||||
- [**Change Log**](https://github.com/vladmandic/human/blob/main/CHANGELOG.md)
|
||||
- [**Current To-do List**](https://github.com/vladmandic/human/blob/main/TODO.md)
|
||||
|
||||
## Wiki pages
|
||||
|
||||
- [**Home**](https://github.com/vladmandic/human/wiki)
|
||||
- [**Installation**](https://github.com/vladmandic/human/wiki/Install)
|
||||
- [**Usage & Functions**](https://github.com/vladmandic/human/wiki/Usage)
|
||||
- [**Configuration Details**](https://github.com/vladmandic/human/wiki/Configuration)
|
||||
- [**Output Details**](https://github.com/vladmandic/human/wiki/Outputs)
|
||||
- [**Configuration Details**](https://github.com/vladmandic/human/wiki/Config)
|
||||
- [**Result Details**](https://github.com/vladmandic/human/wiki/Result)
|
||||
- [**Caching & Smoothing**](https://github.com/vladmandic/human/wiki/Caching)
|
||||
- [**Input Processing**](https://github.com/vladmandic/human/wiki/Image)
|
||||
- [**Face Recognition & Face Description**](https://github.com/vladmandic/human/wiki/Embedding)
|
||||
- [**Gesture Recognition**](https://github.com/vladmandic/human/wiki/Gesture)
|
||||
- [**Common Issues**](https://github.com/vladmandic/human/wiki/Issues)
|
||||
|
@ -57,13 +99,19 @@ Check out [**Live Demo**](https://vladmandic.github.io/human/demo/index.html) fo
|
|||
|
||||
## Additional notes
|
||||
|
||||
- [**Notes on Backends**](https://github.com/vladmandic/human/wiki/Backends)
|
||||
- [**Comparing Backends**](https://github.com/vladmandic/human/wiki/Backends)
|
||||
- [**Development Server**](https://github.com/vladmandic/human/wiki/Development-Server)
|
||||
- [**Build Process**](https://github.com/vladmandic/human/wiki/Build-Process)
|
||||
- [**Adding Custom Modules**](https://github.com/vladmandic/human/wiki/Module)
|
||||
- [**Performance Notes**](https://github.com/vladmandic/human/wiki/Performance)
|
||||
- [**Performance Profiling**](https://github.com/vladmandic/human/wiki/Profiling)
|
||||
- [**Platform Support**](https://github.com/vladmandic/human/wiki/Platforms)
|
||||
- [**Diagnostic and Performance trace information**](https://github.com/vladmandic/human/wiki/Diag)
|
||||
- [**Dockerize Human applications**](https://github.com/vladmandic/human/wiki/Docker)
|
||||
- [**List of Models & Credits**](https://github.com/vladmandic/human/wiki/Models)
|
||||
- [**Models Download Repository**](https://github.com/vladmandic/human-models)
|
||||
- [**Security & Privacy Policy**](https://github.com/vladmandic/human/blob/main/SECURITY.md)
|
||||
- [**License & Usage Restrictions**](https://github.com/vladmandic/human/blob/main/LICENSE)
|
||||
|
||||
<br>
|
||||
|
||||
|
@ -73,43 +121,64 @@ Check out [**Live Demo**](https://vladmandic.github.io/human/demo/index.html) fo
|
|||
|
||||
<hr><br>
|
||||
|
||||
## Examples
|
||||
|
||||
Visit [Examples galery](https://vladmandic.github.io/human/samples/samples.html) for more examples
|
||||
<https://vladmandic.github.io/human/samples/samples.html>
|
||||
|
||||

|
||||
|
||||
<br>
|
||||
|
||||
## Options
|
||||
|
||||
As presented in the demo application...
|
||||
> [demo/index.html](demo/index.html)
|
||||
All options as presented in the demo application...
|
||||
> [demo/index.html](demo/index.html)
|
||||
|
||||

|
||||
|
||||
<br>
|
||||
|
||||
## Examples
|
||||
**Results Browser:**
|
||||
[ *Demo -> Display -> Show Results* ]<br>
|
||||

|
||||
|
||||
<br>
|
||||
|
||||
**Training image:**
|
||||
> [demo/index.html](demo/index.html?image=%22../assets/human-sample-upper.jpg%22)
|
||||
## Advanced Examples
|
||||
|
||||

|
||||
|
||||
**Using static images:**
|
||||
> [demo/index.html](demo/index.html?images=true)
|
||||
|
||||

|
||||
|
||||
**Live WebCam view:**
|
||||
> [demo/index.html](demo/index.html)
|
||||
|
||||

|
||||
|
||||
**Face Similarity Matching:**
|
||||
> [demo/facematch.html](demo/facematch.html)
|
||||
1. **Face Similarity Matching:**
|
||||
Extracts all faces from provided input images,
|
||||
sorts them by similarity to selected face
|
||||
and optionally matches detected face with database of known people to guess their names
|
||||
> [demo/facematch](demo/facematch/index.html)
|
||||
|
||||

|
||||
|
||||
**Face3D OpenGL Rendering:**
|
||||
> [demo/face3d.html](demo/face3d.html)
|
||||
<br>
|
||||
|
||||

|
||||
2. **3D Rendering:**
|
||||
> [human-motion](https://github.com/vladmandic/human-motion)
|
||||
|
||||

|
||||

|
||||

|
||||
|
||||
<br>
|
||||
|
||||
3. **Avatar Bone Mapping:**
|
||||
> [human-avatar](https://github.com/vladmandic/human-avatar)
|
||||
|
||||

|
||||
|
||||
<br>
|
||||
|
||||
4. **VR Model Tracking:**
|
||||
> [human-vrmmotion](https://github.com/vladmandic/human-vrm)
|
||||
|
||||

|
||||
|
||||
<br>
|
||||
|
||||
**468-Point Face Mesh Defails:**
|
||||
(view in full resolution to see keypoints)
|
||||
|
@ -126,7 +195,7 @@ Simply load `Human` (*IIFE version*) directly from a cloud CDN in your HTML file
|
|||
```html
|
||||
<script src="https://cdn.jsdelivr.net/npm/@vladmandic/human/dist/human.js"></script>
|
||||
<script src="https://unpkg.dev/@vladmandic/human/dist/human.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/human/1.4.1/human.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/human/2.1.5/human.js"></script>
|
||||
```
|
||||
|
||||
For details, including how to use `Browser ESM` version or `NodeJS` version of `Human`, see [**Installation**](https://github.com/vladmandic/human/wiki/Install)
|
||||
|
@ -149,7 +218,7 @@ Additionally, `HTMLVideoElement`, `HTMLMediaElement` can be a standard `<video>`
|
|||
Live streaming examples:
|
||||
- **HLS** (*HTTP Live Streaming*) using `hls.js`
|
||||
- **DASH** (Dynamic Adaptive Streaming over HTTP) using `dash.js`
|
||||
- **WebRTC** media track
|
||||
- **WebRTC** media track using built-in support
|
||||
|
||||
<br>
|
||||
|
||||
|
@ -162,11 +231,11 @@ draw output on screen using internal draw helper functions
|
|||
// create instance of human with simple configuration using default values
|
||||
const config = { backend: 'webgl' };
|
||||
const human = new Human(config);
|
||||
// select input HTMLVideoElement and output HTMLCanvasElement from page
|
||||
const inputVideo = document.getElementById('video-id');
|
||||
const outputCanvas = document.getElementById('canvas-id');
|
||||
|
||||
function detectVideo() {
|
||||
// select input HTMLVideoElement and output HTMLCanvasElement from page
|
||||
const inputVideo = document.getElementById('video-id');
|
||||
const outputCanvas = document.getElementById('canvas-id');
|
||||
// perform processing using default configuration
|
||||
human.detect(inputVideo).then((result) => {
|
||||
// result object will contain detected details
|
||||
|
@ -191,47 +260,104 @@ or using `async/await`:
|
|||
```js
|
||||
// create instance of human with simple configuration using default values
|
||||
const config = { backend: 'webgl' };
|
||||
const human = new Human(config);
|
||||
const human = new Human(config); // create instance of Human
|
||||
const inputVideo = document.getElementById('video-id');
|
||||
const outputCanvas = document.getElementById('canvas-id');
|
||||
|
||||
async function detectVideo() {
|
||||
const inputVideo = document.getElementById('video-id');
|
||||
const outputCanvas = document.getElementById('canvas-id');
|
||||
const result = await human.detect(inputVideo);
|
||||
human.draw.all(outputCanvas, result);
|
||||
requestAnimationFrame(detectVideo);
|
||||
const result = await human.detect(inputVideo); // run detection
|
||||
human.draw.all(outputCanvas, result); // draw all results
|
||||
requestAnimationFrame(detectVideo); // run loop
|
||||
}
|
||||
|
||||
detectVideo();
|
||||
detectVideo(); // start loop
|
||||
```
|
||||
|
||||
or using `Events`:
|
||||
|
||||
```js
|
||||
// create instance of human with simple configuration using default values
|
||||
const config = { backend: 'webgl' };
|
||||
const human = new Human(config); // create instance of Human
|
||||
const inputVideo = document.getElementById('video-id');
|
||||
const outputCanvas = document.getElementById('canvas-id');
|
||||
|
||||
human.events.addEventListener('detect', () => { // event gets triggered when detect is complete
|
||||
human.draw.all(outputCanvas, human.result); // draw all results
|
||||
});
|
||||
|
||||
function detectVideo() {
|
||||
human.detect(inputVideo) // run detection
|
||||
.then(() => requestAnimationFrame(detectVideo)); // upon detect complete start processing of the next frame
|
||||
}
|
||||
|
||||
detectVideo(); // start loop
|
||||
```
|
||||
|
||||
or using interpolated results for smooth video processing by separating detection and drawing loops:
|
||||
|
||||
```js
|
||||
const human = new Human(); // create instance of Human
|
||||
const inputVideo = document.getElementById('video-id');
|
||||
const outputCanvas = document.getElementById('canvas-id');
|
||||
let result;
|
||||
|
||||
async function detectVideo() {
|
||||
result = await human.detect(inputVideo); // run detection
|
||||
requestAnimationFrame(detectVideo); // run detect loop
|
||||
}
|
||||
|
||||
async function drawVideo() {
|
||||
if (result) { // check if result is available
|
||||
const interpolated = human.next(result); // calculate next interpolated frame
|
||||
human.draw.all(outputCanvas, interpolated); // draw the frame
|
||||
}
|
||||
requestAnimationFrame(drawVideo); // run draw loop
|
||||
}
|
||||
|
||||
detectVideo(); // start detection loop
|
||||
drawVideo(); // start draw loop
|
||||
```
|
||||
|
||||
And for even better results, you can run detection in a separate web worker thread
|
||||
|
||||
<br><hr><br>
|
||||
|
||||
## Default models
|
||||
|
||||
Default models in Human library are:
|
||||
|
||||
- **Face Detection**: MediaPipe BlazeFace (Back version)
|
||||
- **Face Detection**: MediaPipe BlazeFace Back variation
|
||||
- **Face Mesh**: MediaPipe FaceMesh
|
||||
- **Face Description**: HSE FaceRes
|
||||
- **Face Iris Analysis**: MediaPipe Iris
|
||||
- **Face Description**: HSE FaceRes
|
||||
- **Emotion Detection**: Oarriaga Emotion
|
||||
- **Body Analysis**: PoseNet (AtomicBits version)
|
||||
- **Body Analysis**: MoveNet Lightning variation
|
||||
- **Hand Analysis**: HandTrack & MediaPipe HandLandmarks
|
||||
- **Body Segmentation**: Google Selfie
|
||||
- **Object Detection**: CenterNet with MobileNet v3
|
||||
|
||||
Note that alternative models are provided and can be enabled via configuration
|
||||
For example, `PoseNet` model can be switched for `BlazePose` model depending on the use case
|
||||
For example, `PoseNet` model can be switched for `BlazePose`, `EfficientPose` or `MoveNet` model depending on the use case
|
||||
|
||||
For more info, see [**Configuration Details**](https://github.com/vladmandic/human/wiki/Configuration) and [**List of Models**](https://github.com/vladmandic/human/wiki/Models)
|
||||
|
||||
<br><hr><br>
|
||||
|
||||
`Human` library is written in `TypeScript` [4.2](https://www.typescriptlang.org/docs/handbook/intro.html)
|
||||
Conforming to `JavaScript` [ECMAScript version 2020](https://www.ecma-international.org/ecma-262/11.0/index.html) standard
|
||||
Build target is `JavaScript` **EMCAScript version 2018**
|
||||
## Diagnostics
|
||||
|
||||
- [How to get diagnostic information or performance trace information](https://github.com/vladmandic/human/wiki/Diag)
|
||||
|
||||
<br><hr><br>
|
||||
|
||||
`Human` library is written in `TypeScript` [4.8](https://www.typescriptlang.org/docs/handbook/intro.html)
|
||||
Conforming to latest `JavaScript` [ECMAScript version 2022](https://262.ecma-international.org/) standard
|
||||
Build target is `JavaScript` [EMCAScript version 2018](https://262.ecma-international.org/11.0/)
|
||||
|
||||
<br>
|
||||
|
||||
For details see [**Wiki Pages**](https://github.com/vladmandic/human/wiki)
|
||||
and [**API Specification**](https://vladmandic.github.io/human/typedoc/classes/human.html)
|
||||
and [**API Specification**](https://vladmandic.github.io/human/typedoc/classes/Human.html)
|
||||
|
||||
<br>
|
||||
|
||||
|
|
33
SECURITY.md
|
@ -1,5 +1,32 @@
|
|||
# Security Policy
|
||||
# Security & Privacy Policy
|
||||
|
||||
All issues are tracked publicly on GitHub
|
||||
<br>
|
||||
|
||||
Entire code base and indluded dependencies is automatically scanned against known security vulnerabilities
|
||||
## Issues
|
||||
|
||||
All issues are tracked publicly on GitHub: <https://github.com/vladmandic/human/issues>
|
||||
|
||||
<br>
|
||||
|
||||
## Vulnerabilities
|
||||
|
||||
`Human` library code base and indluded dependencies are automatically scanned against known security vulnerabilities
|
||||
Any code commit is validated before merge
|
||||
|
||||
- [Dependencies](https://github.com/vladmandic/human/security/dependabot)
|
||||
- [Scanning Alerts](https://github.com/vladmandic/human/security/code-scanning)
|
||||
|
||||
<br>
|
||||
|
||||
## Privacy
|
||||
|
||||
`Human` library and included demo apps:
|
||||
|
||||
- Are fully self-contained and does not send or share data of any kind with external targets
|
||||
- Do not store any user or system data tracking, user provided inputs (images, video) or detection results
|
||||
- Do not utilize any analytic services (such as Google Analytics)
|
||||
|
||||
`Human` library can establish external connections *only* for following purposes and *only* when explicitly configured by user:
|
||||
|
||||
- Load models from externally hosted site (e.g. CDN)
|
||||
- Load inputs for detection from *http & https* sources
|
||||
|
|
96
TODO.md
|
@ -1,21 +1,95 @@
|
|||
# To-Do list for Human library
|
||||
|
||||
## Big Ticket Items
|
||||
## Work-in-Progress
|
||||
|
||||
N/A
|
||||
|
||||
## Exploring Features
|
||||
<hr><br>
|
||||
|
||||
N/A
|
||||
## Exploring
|
||||
|
||||
## Explore Models
|
||||
- Optical flow for intelligent temporal interpolation
|
||||
<https://docs.opencv.org/3.3.1/db/d7f/tutorial_js_lucas_kanade.html>
|
||||
- Advanced histogram equalization for optimization of badly lit scenes
|
||||
**Adaptive**, **Contrast Limited**, **CLAHE**
|
||||
- TFLite models
|
||||
<https://js.tensorflow.org/api_tflite/0.0.1-alpha.4/>
|
||||
- Body segmentation with temporal analysis
|
||||
<https://github.com/PeterL1n/RobustVideoMatting>
|
||||
|
||||
- InsightFace
|
||||
RetinaFace detector and ArcFace recognition
|
||||
<https://github.com/deepinsight/insightface>
|
||||
- Blazepose
|
||||
Needs detector before running pose to center the image
|
||||
<hr><br>
|
||||
|
||||
## In Progress
|
||||
## Known Issues
|
||||
|
||||
N/A
|
||||
### Face with Attention
|
||||
|
||||
`FaceMesh-Attention` is not supported in browser using `WASM` backend due to missing kernel op in **TFJS**
|
||||
|
||||
### Object Detection
|
||||
|
||||
`NanoDet` model is not supported in in browser using `WASM` backend due to missing kernel op in **TFJS**
|
||||
|
||||
### WebGPU
|
||||
|
||||
Experimental support only until support is officially added in Chromium
|
||||
Enable via <chrome://flags/#enable-unsafe-webgpu>
|
||||
|
||||
### Firefox
|
||||
|
||||
Running in **web workers** requires `OffscreenCanvas` which is still disabled by default in **Firefox**
|
||||
Enable via `about:config` -> `gfx.offscreencanvas.enabled`
|
||||
|
||||
<hr><br>
|
||||
|
||||
## Pending Release Changes
|
||||
|
||||
- Update **TFJS** to **3.20.0**
|
||||
- Update **TypeScript** to **4.8**
|
||||
- Switch default backend from custom `humangl` to `webgl`
|
||||
Stability and availability of features in standard **TFJS** allows to switch back
|
||||
- Add **InsightFace** model as alternative for face embedding/descriptor detection
|
||||
Compatible with multiple variations of **InsightFace** models
|
||||
Configurable using `config.face.insightface` config section
|
||||
See `demo/faceid/index.ts` for usage
|
||||
Models can be downloaded from <https://github.com/vladmandic/insightface>
|
||||
- Add `human.check()` which validates all kernel ops for currently loaded models with currently selected backend
|
||||
Example: `console.error(human.check());`
|
||||
- Add `config.softwareKernels` config option which uses **CPU** implementation for missing ops
|
||||
Disabled by default
|
||||
If enabled, it is used by face and hand rotation correction (`config.face.rotation` and `config.hand.rotation`)
|
||||
- Add underlying **tensorflow** library version detection when running in NodeJS to
|
||||
`human.env` and check if **GPU** is used for acceleration
|
||||
Example: `console.log(human.env.tensorflow)`
|
||||
- Treat models that cannot be found & loaded as non-critical error
|
||||
Instead of creating runtime exception, `human` will now report that model could not be loaded
|
||||
- Improve `human.reset()` method to reset all config values to defaults
|
||||
- Host models in <https://github.com/vladmandic/human-models>
|
||||
Models can be directly used without downloading to local storage
|
||||
Example: `modelBasePath: 'https://vladmandic.github.io/human-models/models/'`
|
||||
- Allow hosting models in **Google Cloud Bucket**
|
||||
Hosted models can be directly used without downloading to local storage
|
||||
Example: `modelBasePath: 'https://storage.googleapis.com/human-models/'`
|
||||
- Stricter linting rules for both **TypeScript** and **JavaScript**
|
||||
See `./eslintrc.json` for details
|
||||
- Enhanced type safety across entire library
|
||||
- Fix **MobileFaceNet** model as alternative for face embedding/descriptor detection
|
||||
Configurable using `config.face.mobilefacenet` config section
|
||||
- Fix **EfficientPose** module as alternative body detection
|
||||
- Fix **NanoDet** module as alternative object detection
|
||||
- Fix `demo/multithread/node-multiprocess.js` demo
|
||||
- Fix `human.match` when using mixed descriptor lengths
|
||||
- Fix **WASM** feature detection issue in TFJS with Edge/Chromium
|
||||
Example: `console.log(human.env.wasm)`
|
||||
- Reorganized init & load order for faster library startup
|
||||
- Increased **NodeJS** test coverage
|
||||
Run using: `npm run test`
|
||||
Runs tests for `tfjs-node`, `tfjs-node-gpu` and `wasm`
|
||||
- Increased **Browser** test coverage
|
||||
Run using: `demo/browser.html`
|
||||
Runs tests for `webgl`, `humangl`, `webgpu` and `wasm`
|
||||
Runs tests for ESM and IIFE versions of library
|
||||
- Add `config.flags` settings to allow setting of custom **TFJS** flags during backend configuration
|
||||
- Increase availability of alternative models
|
||||
See `models/model.json` for full list
|
||||
- Update profiling methods in `human.profile()`
|
||||
- Update project dependencies to latest versions
|
||||
|
|
Before Width: | Height: | Size: 1.1 MiB After Width: | Height: | Size: 595 KiB |
After Width: | Height: | Size: 261 KiB |
Before Width: | Height: | Size: 366 KiB After Width: | Height: | Size: 321 KiB |
Before Width: | Height: | Size: 144 KiB After Width: | Height: | Size: 41 KiB |
After Width: | Height: | Size: 34 KiB |
After Width: | Height: | Size: 56 KiB |
After Width: | Height: | Size: 42 KiB |
|
@ -0,0 +1,110 @@
|
|||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||
const Build = require('@vladmandic/build').Build; // eslint-disable-line node/no-unpublished-require
|
||||
const APIExtractor = require('@microsoft/api-extractor'); // eslint-disable-line node/no-unpublished-require
|
||||
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||
const packageJSON = require('./package.json');
|
||||
|
||||
const logFile = 'test/build.log';
|
||||
const modelsOut = 'models/models.json';
|
||||
const modelsFolders = [
|
||||
'./models',
|
||||
'../human-models/models',
|
||||
'../blazepose/model/',
|
||||
'../anti-spoofing/model',
|
||||
'../efficientpose/models',
|
||||
'../insightface/models',
|
||||
'../movenet/models',
|
||||
'../nanodet/models',
|
||||
];
|
||||
|
||||
const apiExtractorIgnoreList = [ // eslint-disable-line no-unused-vars
|
||||
'ae-missing-release-tag',
|
||||
'tsdoc-param-tag-missing-hyphen',
|
||||
'tsdoc-escape-right-brace',
|
||||
'tsdoc-undefined-tag',
|
||||
'tsdoc-escape-greater-than',
|
||||
'ae-unresolved-link',
|
||||
'ae-forgotten-export',
|
||||
'tsdoc-malformed-inline-tag',
|
||||
'tsdoc-unnecessary-backslash',
|
||||
];
|
||||
|
||||
function copy(src, dst) {
|
||||
if (!fs.existsSync(src)) return;
|
||||
const buffer = fs.readFileSync(src);
|
||||
fs.writeFileSync(dst, buffer);
|
||||
}
|
||||
|
||||
async function analyzeModels() {
|
||||
log.info('Analyze models:', { folders: modelsFolders.length, result: modelsOut });
|
||||
let totalSize = 0;
|
||||
const models = {};
|
||||
const allModels = [];
|
||||
for (const folder of modelsFolders) {
|
||||
try {
|
||||
if (!fs.existsSync(folder)) continue;
|
||||
const stat = fs.statSync(folder);
|
||||
if (!stat.isDirectory) continue;
|
||||
const dir = fs.readdirSync(folder);
|
||||
const found = dir.map((f) => `file://${folder}/${f}`).filter((f) => f.endsWith('json'));
|
||||
log.state('Models', { folder, models: found.length });
|
||||
allModels.push(...found);
|
||||
} catch {
|
||||
// log.warn('Cannot enumerate:', modelFolder);
|
||||
}
|
||||
}
|
||||
for (const url of allModels) {
|
||||
// if (!f.endsWith('.json')) continue;
|
||||
// const url = `file://${modelsDir}/${f}`;
|
||||
const model = new tf.GraphModel(url); // create model prototype and decide if load from cache or from original modelurl
|
||||
model.findIOHandler();
|
||||
const artifacts = await model.handler.load();
|
||||
const size = artifacts?.weightData?.byteLength || 0;
|
||||
totalSize += size;
|
||||
const name = path.basename(url).replace('.json', '');
|
||||
if (!models[name]) models[name] = size;
|
||||
}
|
||||
const json = JSON.stringify(models, null, 2);
|
||||
fs.writeFileSync(modelsOut, json);
|
||||
log.state('Models:', { count: Object.keys(models).length, totalSize });
|
||||
}
|
||||
|
||||
async function main() {
|
||||
log.logFile(logFile);
|
||||
log.data('Build', { name: packageJSON.name, version: packageJSON.version });
|
||||
// run production build
|
||||
const build = new Build();
|
||||
await build.run('production');
|
||||
// patch tfjs typedefs
|
||||
log.state('Copy:', { input: 'tfjs/tfjs.esm.d.ts' });
|
||||
copy('tfjs/tfjs.esm.d.ts', 'types/lib/dist/tfjs.esm.d.ts');
|
||||
// run api-extractor to create typedef rollup
|
||||
const extractorConfig = APIExtractor.ExtractorConfig.loadFileAndPrepare('.api-extractor.json');
|
||||
const extractorResult = APIExtractor.Extractor.invoke(extractorConfig, {
|
||||
localBuild: true,
|
||||
showVerboseMessages: false,
|
||||
messageCallback: (msg) => {
|
||||
msg.handled = true;
|
||||
if (msg.logLevel === 'none' || msg.logLevel === 'verbose' || msg.logLevel === 'info') return;
|
||||
if (msg.sourceFilePath?.includes('/node_modules/')) return;
|
||||
// if (apiExtractorIgnoreList.reduce((prev, curr) => prev || msg.messageId.includes(curr), false)) return; // those are external issues outside of human control
|
||||
log.data('API', { level: msg.logLevel, category: msg.category, id: msg.messageId, file: msg.sourceFilePath, line: msg.sourceFileLine, text: msg.text });
|
||||
},
|
||||
});
|
||||
log.state('API-Extractor:', { succeeeded: extractorResult.succeeded, errors: extractorResult.errorCount, warnings: extractorResult.warningCount });
|
||||
// distribute typedefs
|
||||
log.state('Copy:', { input: 'types/human.d.ts' });
|
||||
copy('types/human.d.ts', 'dist/human.esm-nobundle.d.ts');
|
||||
copy('types/human.d.ts', 'dist/human.esm.d.ts');
|
||||
copy('types/human.d.ts', 'dist/human.d.ts');
|
||||
copy('types/human.d.ts', 'dist/human.node-gpu.d.ts');
|
||||
copy('types/human.d.ts', 'dist/human.node.d.ts');
|
||||
copy('types/human.d.ts', 'dist/human.node-wasm.d.ts');
|
||||
// generate model signature
|
||||
await analyzeModels();
|
||||
log.info('Human Build complete...', { logFile });
|
||||
}
|
||||
|
||||
main();
|
|
@ -1,5 +1,64 @@
|
|||
# Human Library: Demos
|
||||
|
||||
For details see Wiki:
|
||||
For details on other demos see Wiki: [**Demos**](https://github.com/vladmandic/human/wiki/Demos)
|
||||
|
||||
- [**Demos**](https://github.com/vladmandic/human/wiki/Demos)
|
||||
## Main Demo
|
||||
|
||||
|
||||
`index.html`: Full demo using `Human` ESM module running in Browsers,
|
||||
|
||||
Includes:
|
||||
- Selectable inputs:
|
||||
- Sample images
|
||||
- Image via drag & drop
|
||||
- Image via URL param
|
||||
- WebCam input
|
||||
- Video stream
|
||||
- WebRTC stream
|
||||
- Selectable active `Human` modules
|
||||
- With interactive module params
|
||||
- Interactive `Human` image filters
|
||||
- Selectable interactive `results` browser
|
||||
- Selectable `backend`
|
||||
- Multiple execution methods:
|
||||
- Sync vs Async
|
||||
- in main thread or web worker
|
||||
- live on git pages, on user-hosted web server or via included [**micro http2 server**](https://github.com/vladmandic/human/wiki/Development-Server)
|
||||
|
||||
### Demo Options
|
||||
|
||||
- General `Human` library options
|
||||
in `index.js:userConfig`
|
||||
- General `Human` `draw` options
|
||||
in `index.js:drawOptions`
|
||||
- Demo PWA options
|
||||
in `index.js:pwa`
|
||||
- Demo specific options
|
||||
in `index.js:ui`
|
||||
|
||||
```js
|
||||
console: true, // log messages to browser console
|
||||
useWorker: true, // use web workers for processing
|
||||
buffered: true, // should output be buffered between frames
|
||||
interpolated: true, // should output be interpolated for smoothness between frames
|
||||
results: false, // show results tree
|
||||
useWebRTC: false, // use webrtc as camera source instead of local webcam
|
||||
```
|
||||
|
||||
Demo implements several ways to use `Human` library,
|
||||
|
||||
### URL Params
|
||||
|
||||
Demo app can use URL parameters to override configuration values
|
||||
For example:
|
||||
|
||||
- Force using `WASM` as backend: <https://vladmandic.github.io/human/demo/index.html?backend=wasm>
|
||||
- Enable `WebWorkers`: <https://vladmandic.github.io/human/demo/index.html?worker=true>
|
||||
- Skip pre-loading and warming up: <https://vladmandic.github.io/human/demo/index.html?preload=false&warmup=false>
|
||||
|
||||
### WebRTC
|
||||
|
||||
Note that WebRTC connection requires a WebRTC server that provides a compatible media track such as H.264 video track
|
||||
For such a WebRTC server implementation see <https://github.com/vladmandic/stream-rtsp> project
|
||||
that implements a connection to IP Security camera using RTSP protocol and transcodes it to WebRTC
|
||||
ready to be consumed by a client such as `Human`
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
# Human Face Recognition: FaceID
|
||||
|
||||
`faceid` runs multiple checks to validate webcam input before performing face match
|
||||
Detected face image and descriptor are stored in client-side IndexDB
|
||||
|
||||
## Workflow
|
||||
- Starts webcam
|
||||
- Waits until input video contains validated face or timeout is reached
|
||||
- Number of people
|
||||
- Face size
|
||||
- Face and gaze direction
|
||||
- Detection scores
|
||||
- Blink detection (including temporal check for blink speed) to verify live input
|
||||
- Runs `antispoofing` optional module
|
||||
- Runs `liveness` optional module
|
||||
- Runs match against database of registered faces and presents best match with scores
|
||||
|
||||
## Notes
|
||||
|
||||
Both `antispoof` and `liveness` models are tiny and
|
||||
designed to serve as a quick check when used together with other indicators:
|
||||
- size below 1MB
|
||||
- very quick inference times as they are very simple (11 ops for antispoof and 23 ops for liveness)
|
||||
- trained on low-resolution inputs
|
||||
|
||||
### Anti-spoofing Module
|
||||
- Checks if input is realistic (e.g. computer generated faces)
|
||||
- Configuration: `human.config.face.antispoof`.enabled
|
||||
- Result: `human.result.face[0].real` as score
|
||||
|
||||
### Liveness Module
|
||||
- Checks if input has obvious artifacts due to recording (e.g. playing back phone recording of a face)
|
||||
- Configuration: `human.config.face.liveness`.enabled
|
||||
- Result: `human.result.face[0].live` as score
|
||||
|
||||
### Models
|
||||
|
||||
**FaceID** is compatible with
|
||||
- `faceres.json` (default) perfoms combined age/gender/descriptor analysis
|
||||
- `faceres-deep.json` higher resolution variation of `faceres`
|
||||
- `insightface` alternative model for face descriptor analysis
|
||||
- `mobilefacenet` alternative model for face descriptor analysis
|
|
@ -0,0 +1,40 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>Human: Face Recognition</title>
|
||||
<meta name="viewport" content="width=device-width" id="viewport">
|
||||
<meta name="keywords" content="Human">
|
||||
<meta name="application-name" content="Human">
|
||||
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||
<meta name="theme-color" content="#000000">
|
||||
<link rel="manifest" href="../manifest.webmanifest">
|
||||
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
|
||||
<link rel="apple-touch-icon" href="../../assets/icon.png">
|
||||
<script src="./index.js" type="module"></script>
|
||||
<style>
|
||||
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
|
||||
html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
|
||||
body { margin: 0; padding: 16px; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
|
||||
body::-webkit-scrollbar { display: none; }
|
||||
.button { padding: 2px; cursor: pointer; box-shadow: 2px 2px black; width: 64px; text-align: center; place-content: center; margin-left: 16px; height: 16px; display: none }
|
||||
.ok { position: absolute; top: 64px; right: 20px; width: 100px; background-color: grey; padding: 4px; color: black; font-size: 14px }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<canvas id="canvas" style="padding: 8px"></canvas>
|
||||
<canvas id="source" style="padding: 8px"></canvas>
|
||||
<video id="video" playsinline style="display: none"></video>
|
||||
<pre id="fps" style="position: absolute; bottom: 16px; right: 20px; background-color: grey; padding: 8px; box-shadow: 2px 2px black"></pre>
|
||||
<pre id="log" style="padding: 8px"></pre>
|
||||
<div id="match" style="display: none; padding: 8px">
|
||||
<label for="name">name:</label>
|
||||
<input id="name" type="text" value="" style="height: 16px; border: none; padding: 2px; margin-left: 8px">
|
||||
<span id="save" class="button" style="background-color: royalblue">save</span>
|
||||
<span id="delete" class="button" style="background-color: lightcoral">delete</span>
|
||||
</div>
|
||||
<div id="retry" class="button" style="background-color: darkslategray; width: 350px; margin-top: 32px; padding: 4px">retry</div>
|
||||
<div id="ok"></div>
|
||||
</body>
|
||||
</html>
|
|
@ -0,0 +1,274 @@
|
|||
/**
|
||||
* Human demo for browsers
|
||||
* @default Human Library
|
||||
* @summary <https://github.com/vladmandic/human>
|
||||
* @author <https://github.com/vladmandic>
|
||||
* @copyright <https://github.com/vladmandic>
|
||||
* @license MIT
|
||||
*/
|
||||
|
||||
import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
|
||||
import * as indexDb from './indexdb'; // methods to deal with indexdb
|
||||
|
||||
const humanConfig = { // user configuration for human, used to fine-tune behavior
|
||||
modelBasePath: '../../models',
|
||||
filter: { equalization: true }, // lets run with histogram equilizer
|
||||
face: {
|
||||
enabled: true,
|
||||
detector: { rotation: true, return: true, cropFactor: 1.6, mask: false }, // return tensor is used to get detected face image
|
||||
description: { enabled: true }, // default model for face descriptor extraction is faceres
|
||||
// mobilefacenet: { enabled: true, modelPath: 'https://vladmandic.github.io/human-models/models/mobilefacenet.json' }, // alternative model
|
||||
// insightface: { enabled: true, modelPath: 'https://vladmandic.github.io/insightface/models/insightface-mobilenet-swish.json' }, // alternative model
|
||||
iris: { enabled: true }, // needed to determine gaze direction
|
||||
emotion: { enabled: false }, // not needed
|
||||
antispoof: { enabled: true }, // enable optional antispoof module
|
||||
liveness: { enabled: true }, // enable optional liveness module
|
||||
},
|
||||
body: { enabled: false },
|
||||
hand: { enabled: false },
|
||||
object: { enabled: false },
|
||||
gesture: { enabled: true }, // parses face and iris gestures
|
||||
};
|
||||
|
||||
// const matchOptions = { order: 2, multiplier: 1000, min: 0.0, max: 1.0 }; // for embedding model
|
||||
const matchOptions = { order: 2, multiplier: 25, min: 0.2, max: 0.8 }; // for faceres model
|
||||
|
||||
const options = {
|
||||
minConfidence: 0.6, // overal face confidence for box, face, gender, real, live
|
||||
minSize: 224, // min input to face descriptor model before degradation
|
||||
maxTime: 10000, // max time before giving up
|
||||
blinkMin: 10, // minimum duration of a valid blink
|
||||
blinkMax: 800, // maximum duration of a valid blink
|
||||
threshold: 0.5, // minimum similarity
|
||||
mask: humanConfig.face.detector.mask,
|
||||
rotation: humanConfig.face.detector.rotation,
|
||||
cropFactor: humanConfig.face.detector.cropFactor,
|
||||
...matchOptions,
|
||||
};
|
||||
|
||||
const ok = { // must meet all rules
|
||||
faceCount: false,
|
||||
faceConfidence: false,
|
||||
facingCenter: false,
|
||||
lookingCenter: false,
|
||||
blinkDetected: false,
|
||||
faceSize: false,
|
||||
antispoofCheck: false,
|
||||
livenessCheck: false,
|
||||
elapsedMs: 0, // total time while waiting for valid face
|
||||
};
|
||||
const allOk = () => ok.faceCount && ok.faceSize && ok.blinkDetected && ok.facingCenter && ok.lookingCenter && ok.faceConfidence && ok.antispoofCheck && ok.livenessCheck;
|
||||
const current: { face: H.FaceResult | null, record: indexDb.FaceRecord | null } = { face: null, record: null }; // current face record and matched database record
|
||||
|
||||
const blink = { // internal timers for blink start/end/duration
|
||||
start: 0,
|
||||
end: 0,
|
||||
time: 0,
|
||||
};
|
||||
|
||||
// let db: Array<{ name: string, source: string, embedding: number[] }> = []; // holds loaded face descriptor database
|
||||
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
|
||||
|
||||
human.env.perfadd = false; // is performance data showing instant or total values
|
||||
human.draw.options.font = 'small-caps 18px "Lato"'; // set font used to draw labels when using draw methods
|
||||
human.draw.options.lineHeight = 20;
|
||||
|
||||
const dom = { // grab instances of dom objects so we dont have to look them up later
|
||||
video: document.getElementById('video') as HTMLVideoElement,
|
||||
canvas: document.getElementById('canvas') as HTMLCanvasElement,
|
||||
log: document.getElementById('log') as HTMLPreElement,
|
||||
fps: document.getElementById('fps') as HTMLPreElement,
|
||||
match: document.getElementById('match') as HTMLDivElement,
|
||||
name: document.getElementById('name') as HTMLInputElement,
|
||||
save: document.getElementById('save') as HTMLSpanElement,
|
||||
delete: document.getElementById('delete') as HTMLSpanElement,
|
||||
retry: document.getElementById('retry') as HTMLDivElement,
|
||||
source: document.getElementById('source') as HTMLCanvasElement,
|
||||
ok: document.getElementById('ok') as HTMLDivElement,
|
||||
};
|
||||
const timestamp = { detect: 0, draw: 0 }; // holds information used to calculate performance and possible memory leaks
|
||||
const fps = { detect: 0, draw: 0 }; // holds calculated fps information for both detect and screen refresh
|
||||
let startTime = 0;
|
||||
|
||||
const log = (...msg) => { // helper method to output messages
|
||||
dom.log.innerText += msg.join(' ') + '\n';
|
||||
console.log(...msg); // eslint-disable-line no-console
|
||||
};
|
||||
const printFPS = (msg) => dom.fps.innerText = msg; // print status element
|
||||
|
||||
async function webCam() { // initialize webcam
|
||||
printFPS('starting webcam...');
|
||||
// @ts-ignore resizeMode is not yet defined in tslib
|
||||
const cameraOptions: MediaStreamConstraints = { audio: false, video: { facingMode: 'user', resizeMode: 'none', width: { ideal: document.body.clientWidth } } };
|
||||
const stream: MediaStream = await navigator.mediaDevices.getUserMedia(cameraOptions);
|
||||
const ready = new Promise((resolve) => { dom.video.onloadeddata = () => resolve(true); });
|
||||
dom.video.srcObject = stream;
|
||||
void dom.video.play();
|
||||
await ready;
|
||||
dom.canvas.width = dom.video.videoWidth;
|
||||
dom.canvas.height = dom.video.videoHeight;
|
||||
if (human.env.initial) log('video:', dom.video.videoWidth, dom.video.videoHeight, '|', stream.getVideoTracks()[0].label);
|
||||
dom.canvas.onclick = () => { // pause when clicked on screen and resume on next click
|
||||
if (dom.video.paused) void dom.video.play();
|
||||
else dom.video.pause();
|
||||
};
|
||||
}
|
||||
|
||||
async function detectionLoop() { // main detection loop
|
||||
if (!dom.video.paused) {
|
||||
if (current.face?.tensor) human.tf.dispose(current.face.tensor); // dispose previous tensor
|
||||
await human.detect(dom.video); // actual detection; were not capturing output in a local variable as it can also be reached via human.result
|
||||
const now = human.now();
|
||||
fps.detect = 1000 / (now - timestamp.detect);
|
||||
timestamp.detect = now;
|
||||
requestAnimationFrame(detectionLoop); // start new frame immediately
|
||||
}
|
||||
}
|
||||
|
||||
async function validationLoop(): Promise<H.FaceResult> { // main screen refresh loop
|
||||
const interpolated = human.next(human.result); // smoothen result using last-known results
|
||||
human.draw.canvas(dom.video, dom.canvas); // draw canvas to screen
|
||||
await human.draw.all(dom.canvas, interpolated); // draw labels, boxes, lines, etc.
|
||||
const now = human.now();
|
||||
fps.draw = 1000 / (now - timestamp.draw);
|
||||
timestamp.draw = now;
|
||||
printFPS(`fps: ${fps.detect.toFixed(1).padStart(5, ' ')} detect | ${fps.draw.toFixed(1).padStart(5, ' ')} draw`); // write status
|
||||
ok.faceCount = human.result.face.length === 1; // must be exactly detected face
|
||||
if (ok.faceCount) { // skip the rest if no face
|
||||
const gestures: string[] = Object.values(human.result.gesture).map((gesture: H.GestureResult) => gesture.gesture); // flatten all gestures
|
||||
if (gestures.includes('blink left eye') || gestures.includes('blink right eye')) blink.start = human.now(); // blink starts when eyes get closed
|
||||
if (blink.start > 0 && !gestures.includes('blink left eye') && !gestures.includes('blink right eye')) blink.end = human.now(); // if blink started how long until eyes are back open
|
||||
ok.blinkDetected = ok.blinkDetected || (Math.abs(blink.end - blink.start) > options.blinkMin && Math.abs(blink.end - blink.start) < options.blinkMax);
|
||||
if (ok.blinkDetected && blink.time === 0) blink.time = Math.trunc(blink.end - blink.start);
|
||||
ok.facingCenter = gestures.includes('facing center');
|
||||
ok.lookingCenter = gestures.includes('looking center'); // must face camera and look at camera
|
||||
ok.faceConfidence = (human.result.face[0].boxScore || 0) > options.minConfidence && (human.result.face[0].faceScore || 0) > options.minConfidence;
|
||||
ok.antispoofCheck = (human.result.face[0].real || 0) > options.minConfidence;
|
||||
ok.livenessCheck = (human.result.face[0].live || 0) > options.minConfidence;
|
||||
ok.faceSize = human.result.face[0].box[2] >= options.minSize && human.result.face[0].box[3] >= options.minSize;
|
||||
}
|
||||
let y = 32;
|
||||
for (const [key, val] of Object.entries(ok)) {
|
||||
let el = document.getElementById(`ok-${key}`);
|
||||
if (!el) {
|
||||
el = document.createElement('div');
|
||||
el.innerText = key;
|
||||
el.className = 'ok';
|
||||
el.style.top = `${y}px`;
|
||||
dom.ok.appendChild(el);
|
||||
}
|
||||
if (typeof val === 'boolean') el.style.backgroundColor = val ? 'lightgreen' : 'lightcoral';
|
||||
else el.innerText = `${key}:${val}`;
|
||||
y += 28;
|
||||
}
|
||||
if (allOk()) { // all criteria met
|
||||
dom.video.pause();
|
||||
return human.result.face[0];
|
||||
}
|
||||
if (ok.elapsedMs > options.maxTime) { // give up
|
||||
dom.video.pause();
|
||||
return human.result.face[0];
|
||||
}
|
||||
// run again
|
||||
ok.elapsedMs = Math.trunc(human.now() - startTime);
|
||||
return new Promise((resolve) => {
|
||||
setTimeout(async () => {
|
||||
await validationLoop(); // run validation loop until conditions are met
|
||||
resolve(human.result.face[0]); // recursive promise resolve
|
||||
}, 30); // use to slow down refresh from max refresh rate to target of 30 fps
|
||||
});
|
||||
}
|
||||
|
||||
async function saveRecords() {
|
||||
if (dom.name.value.length > 0) {
|
||||
const image = dom.canvas.getContext('2d')?.getImageData(0, 0, dom.canvas.width, dom.canvas.height) as ImageData;
|
||||
const rec = { id: 0, name: dom.name.value, descriptor: current.face?.embedding as number[], image };
|
||||
await indexDb.save(rec);
|
||||
log('saved face record:', rec.name, 'descriptor length:', current.face?.embedding?.length);
|
||||
log('known face records:', await indexDb.count());
|
||||
} else {
|
||||
log('invalid name');
|
||||
}
|
||||
}
|
||||
|
||||
async function deleteRecord() {
|
||||
if (current.record && current.record.id > 0) {
|
||||
await indexDb.remove(current.record);
|
||||
}
|
||||
}
|
||||
|
||||
async function detectFace() {
|
||||
dom.canvas.getContext('2d')?.clearRect(0, 0, options.minSize, options.minSize);
|
||||
if (!current?.face?.tensor || !current?.face?.embedding) return false;
|
||||
console.log('face record:', current.face); // eslint-disable-line no-console
|
||||
human.tf.browser.toPixels(current.face.tensor as unknown as H.TensorLike, dom.canvas);
|
||||
if (await indexDb.count() === 0) {
|
||||
log('face database is empty');
|
||||
document.body.style.background = 'black';
|
||||
dom.delete.style.display = 'none';
|
||||
return false;
|
||||
}
|
||||
const db = await indexDb.load();
|
||||
const descriptors = db.map((rec) => rec.descriptor).filter((desc) => desc.length > 0);
|
||||
const res = human.match(current.face.embedding, descriptors, matchOptions);
|
||||
current.record = db[res.index] || null;
|
||||
if (current.record) {
|
||||
log(`best match: ${current.record.name} | id: ${current.record.id} | similarity: ${Math.round(1000 * res.similarity) / 10}%`);
|
||||
dom.name.value = current.record.name;
|
||||
dom.source.style.display = '';
|
||||
dom.source.getContext('2d')?.putImageData(current.record.image, 0, 0);
|
||||
}
|
||||
document.body.style.background = res.similarity > options.threshold ? 'darkgreen' : 'maroon';
|
||||
return res.similarity > options.threshold;
|
||||
}
|
||||
|
||||
async function main() { // main entry point
|
||||
ok.faceCount = false;
|
||||
ok.faceConfidence = false;
|
||||
ok.facingCenter = false;
|
||||
ok.blinkDetected = false;
|
||||
ok.faceSize = false;
|
||||
ok.antispoofCheck = false;
|
||||
ok.livenessCheck = false;
|
||||
ok.elapsedMs = 0;
|
||||
dom.match.style.display = 'none';
|
||||
dom.retry.style.display = 'none';
|
||||
dom.source.style.display = 'none';
|
||||
document.body.style.background = 'black';
|
||||
await webCam();
|
||||
await detectionLoop(); // start detection loop
|
||||
startTime = human.now();
|
||||
current.face = await validationLoop(); // start validation loop
|
||||
dom.canvas.width = current.face.tensor?.shape[1] || options.minSize;
|
||||
dom.canvas.height = current.face.tensor?.shape[0] || options.minSize;
|
||||
dom.source.width = dom.canvas.width;
|
||||
dom.source.height = dom.canvas.height;
|
||||
dom.canvas.style.width = '';
|
||||
dom.match.style.display = 'flex';
|
||||
dom.save.style.display = 'flex';
|
||||
dom.delete.style.display = 'flex';
|
||||
dom.retry.style.display = 'block';
|
||||
if (!allOk()) { // is all criteria met?
|
||||
log('did not find valid face');
|
||||
return false;
|
||||
}
|
||||
return detectFace();
|
||||
}
|
||||
|
||||
async function init() {
|
||||
log('human version:', human.version, '| tfjs version:', human.tf.version['tfjs-core']);
|
||||
log('face embedding model:', humanConfig.face.description.enabled ? 'faceres' : '', humanConfig.face['mobilefacenet']?.enabled ? 'mobilefacenet' : '', humanConfig.face['insightface']?.enabled ? 'insightface' : '');
|
||||
log('options:', JSON.stringify(options).replace(/{|}|"|\[|\]/g, '').replace(/,/g, ' '));
|
||||
printFPS('loading...');
|
||||
log('known face records:', await indexDb.count());
|
||||
await webCam(); // start webcam
|
||||
await human.load(); // preload all models
|
||||
printFPS('initializing...');
|
||||
dom.retry.addEventListener('click', main);
|
||||
dom.save.addEventListener('click', saveRecords);
|
||||
dom.delete.addEventListener('click', deleteRecord);
|
||||
await human.warmup(); // warmup function to initialize backend for future faster detection
|
||||
await main();
|
||||
}
|
||||
|
||||
window.onload = init;
|
|
@ -0,0 +1,65 @@
|
|||
let db: IDBDatabase; // instance of indexdb
|
||||
|
||||
const database = 'human';
|
||||
const table = 'person';
|
||||
|
||||
export interface FaceRecord { id: number, name: string, descriptor: number[], image: ImageData }
|
||||
|
||||
const log = (...msg) => console.log('indexdb', ...msg); // eslint-disable-line no-console
|
||||
|
||||
export async function open() {
|
||||
if (db) return true;
|
||||
return new Promise((resolve) => {
|
||||
const request: IDBOpenDBRequest = indexedDB.open(database, 1);
|
||||
request.onerror = (evt) => log('error:', evt);
|
||||
request.onupgradeneeded = (evt: IDBVersionChangeEvent) => { // create if doesnt exist
|
||||
log('create:', evt.target);
|
||||
db = (evt.target as IDBOpenDBRequest).result;
|
||||
db.createObjectStore(table, { keyPath: 'id', autoIncrement: true });
|
||||
};
|
||||
request.onsuccess = (evt) => { // open
|
||||
db = (evt.target as IDBOpenDBRequest).result;
|
||||
log('open:', db);
|
||||
resolve(true);
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
export async function load(): Promise<FaceRecord[]> {
|
||||
const faceDB: FaceRecord[] = [];
|
||||
if (!db) await open(); // open or create if not already done
|
||||
return new Promise((resolve) => {
|
||||
const cursor: IDBRequest = db.transaction([table], 'readwrite').objectStore(table).openCursor(null, 'next');
|
||||
cursor.onerror = (evt) => log('load error:', evt);
|
||||
cursor.onsuccess = (evt) => {
|
||||
if ((evt.target as IDBRequest).result) {
|
||||
faceDB.push((evt.target as IDBRequest).result.value);
|
||||
(evt.target as IDBRequest).result.continue();
|
||||
} else {
|
||||
resolve(faceDB);
|
||||
}
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
export async function count(): Promise<number> {
|
||||
if (!db) await open(); // open or create if not already done
|
||||
return new Promise((resolve) => {
|
||||
const store: IDBRequest = db.transaction([table], 'readwrite').objectStore(table).count();
|
||||
store.onerror = (evt) => log('count error:', evt);
|
||||
store.onsuccess = () => resolve(store.result);
|
||||
});
|
||||
}
|
||||
|
||||
export async function save(faceRecord: FaceRecord) {
|
||||
if (!db) await open(); // open or create if not already done
|
||||
const newRecord = { name: faceRecord.name, descriptor: faceRecord.descriptor, image: faceRecord.image }; // omit id as its autoincrement
|
||||
db.transaction([table], 'readwrite').objectStore(table).put(newRecord);
|
||||
log('save:', newRecord);
|
||||
}
|
||||
|
||||
export async function remove(faceRecord: FaceRecord) {
|
||||
if (!db) await open(); // open or create if not already done
|
||||
db.transaction([table], 'readwrite').objectStore(table).delete(faceRecord.id); // delete based on id
|
||||
log('delete:', faceRecord);
|
||||
}
|
|
@ -1,42 +0,0 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<title>Human</title>
|
||||
<meta http-equiv="content-type" content="text/html; charset=utf-8">
|
||||
<meta name="viewport" content="width=device-width, shrink-to-fit=yes">
|
||||
<meta name="keywords" content="Human">
|
||||
<meta name="application-name" content="Human">
|
||||
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||
<link rel="manifest" href="./manifest.webmanifest">
|
||||
<link rel="shortcut icon" href="../favicon.ico" type="image/x-icon">
|
||||
<link rel="apple-touch-icon" href="../assets/icon.png">
|
||||
<script src="./facematch.js" type="module"></script>
|
||||
<style>
|
||||
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../assets/lato-light.woff2') }
|
||||
html { font-family: 'Lato', 'Segoe UI'; font-size: 24px; font-variant: small-caps; }
|
||||
body { margin: 0; background: black; color: white; overflow-x: hidden; }
|
||||
img { object-fit: contain; }
|
||||
.face { width: 128px; height: 128px; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div style="display: block">
|
||||
<div style="display: flex">
|
||||
<div>
|
||||
Selected Face<br>
|
||||
<canvas id="orig" style="width: 200px; height: 200px; padding: 20px"></canvas>
|
||||
</div>
|
||||
<div style="width: 20px"></div>
|
||||
<div>
|
||||
Sample Images<br>
|
||||
<div id="images" style="display: flex; flex-wrap: wrap; width: 85vw"></div>
|
||||
</div>
|
||||
<span id="desc" style="visibility: hidden; font-size: 0.4rem;"></span><br>
|
||||
</div>
|
||||
<div id="list" style="height: 10px"></div>
|
||||
Extracted Faces - click on a face to sort by similarity and get a known face match:<br>
|
||||
<div id="faces"></div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
|
@ -1,236 +0,0 @@
|
|||
// @ts-nocheck
|
||||
|
||||
import Human from '../dist/human.esm.js';
|
||||
|
||||
const userConfig = {
|
||||
backend: 'wasm',
|
||||
async: false,
|
||||
warmup: 'none',
|
||||
debug: true,
|
||||
face: {
|
||||
enabled: true,
|
||||
detector: { rotation: true, return: true },
|
||||
mesh: { enabled: true },
|
||||
embedding: { enabled: false },
|
||||
iris: { enabled: false },
|
||||
age: { enabled: false },
|
||||
gender: { enabled: false },
|
||||
emotion: { enabled: true },
|
||||
description: { enabled: true },
|
||||
},
|
||||
hand: { enabled: false },
|
||||
gesture: { enabled: false },
|
||||
body: { enabled: false },
|
||||
filter: {
|
||||
enabled: false,
|
||||
},
|
||||
};
|
||||
|
||||
const human = new Human(userConfig); // new instance of human
|
||||
|
||||
const all = []; // array that will hold all detected faces
|
||||
let db = []; // array that holds all known faces
|
||||
|
||||
const minScore = 0.6;
|
||||
const minConfidence = 0.8;
|
||||
|
||||
function log(...msg) {
|
||||
const dt = new Date();
|
||||
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
|
||||
// eslint-disable-next-line no-console
|
||||
console.log(ts, ...msg);
|
||||
}
|
||||
|
||||
async function getFaceDB() {
|
||||
// download db with known faces
|
||||
try {
|
||||
let res = await fetch('/demo/facematch-faces.json');
|
||||
if (!res || !res.ok) res = await fetch('/human/demo/facematch-faces.json');
|
||||
db = (res && res.ok) ? await res.json() : [];
|
||||
for (const rec of db) {
|
||||
rec.embedding = rec.embedding.map((a) => parseFloat(a.toFixed(4)));
|
||||
}
|
||||
} catch (err) {
|
||||
log('Could not load faces database', err);
|
||||
}
|
||||
}
|
||||
|
||||
async function analyze(face) {
|
||||
// refresh faces database
|
||||
await getFaceDB();
|
||||
|
||||
// if we have face image tensor, enhance it and display it
|
||||
if (face.tensor) {
|
||||
const enhanced = human.enhance(face);
|
||||
const desc = document.getElementById('desc');
|
||||
desc.innerText = `{"name":"unknown", "source":"${face.fileName}", "embedding":[${face.embedding}]},`;
|
||||
const embedding = face.embedding.map((a) => parseFloat(a.toFixed(4)));
|
||||
navigator.clipboard.writeText(`{"name":"unknown", "source":"${face.fileName}", "embedding":[${embedding}]},`);
|
||||
if (enhanced) {
|
||||
const c = document.getElementById('orig');
|
||||
const squeeze = enhanced.squeeze().div(255);
|
||||
await human.tf.browser.toPixels(squeeze, c);
|
||||
enhanced.dispose();
|
||||
squeeze.dispose();
|
||||
const ctx = c.getContext('2d');
|
||||
ctx.font = 'small-caps 0.4rem "Lato"';
|
||||
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
|
||||
}
|
||||
}
|
||||
|
||||
// loop through all canvases that contain faces
|
||||
const canvases = document.getElementsByClassName('face');
|
||||
for (const canvas of canvases) {
|
||||
// calculate similarity from selected face to current one in the loop
|
||||
const current = all[canvas.tag.sample][canvas.tag.face];
|
||||
const similarity = human.similarity(face.embedding, current.embedding, 3);
|
||||
// get best match
|
||||
// draw the canvas
|
||||
canvas.title = similarity;
|
||||
await human.tf.browser.toPixels(current.tensor, canvas);
|
||||
const ctx = canvas.getContext('2d');
|
||||
ctx.font = 'small-caps 1rem "Lato"';
|
||||
ctx.fillStyle = 'rgba(0, 0, 0, 1)';
|
||||
ctx.fillText(`${(100 * similarity).toFixed(1)}%`, 3, 23);
|
||||
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
|
||||
ctx.fillText(`${(100 * similarity).toFixed(1)}%`, 4, 24);
|
||||
ctx.font = 'small-caps 0.8rem "Lato"';
|
||||
ctx.fillText(`${current.age}y ${(100 * (current.genderConfidence || 0)).toFixed(1)}% ${current.gender}`, 4, canvas.height - 6);
|
||||
// identify person
|
||||
ctx.font = 'small-caps 1rem "Lato"';
|
||||
const person = await human.match(current.embedding, db);
|
||||
if (person.similarity && person.similarity > minScore && current.confidence > minConfidence) ctx.fillText(`${(100 * person.similarity).toFixed(1)}% ${person.name}`, 4, canvas.height - 30);
|
||||
}
|
||||
|
||||
// sort all faces by similarity
|
||||
const sorted = document.getElementById('faces');
|
||||
[...sorted.children]
|
||||
.sort((a, b) => parseFloat(b.title) - parseFloat(a.title))
|
||||
.forEach((canvas) => sorted.appendChild(canvas));
|
||||
}
|
||||
|
||||
async function faces(index, res, fileName) {
|
||||
all[index] = res.face;
|
||||
for (const i in res.face) {
|
||||
all[index][i].fileName = fileName;
|
||||
const canvas = document.createElement('canvas');
|
||||
canvas.tag = { sample: index, face: i };
|
||||
canvas.width = 200;
|
||||
canvas.height = 200;
|
||||
canvas.className = 'face';
|
||||
// mouse click on any face canvas triggers analysis
|
||||
canvas.addEventListener('click', (evt) => {
|
||||
log('Select:', 'Image:', evt.target.tag.sample, 'Face:', evt.target.tag.face, all[evt.target.tag.sample][evt.target.tag.face]);
|
||||
analyze(all[evt.target.tag.sample][evt.target.tag.face]);
|
||||
});
|
||||
// if we actually got face image tensor, draw canvas with that face
|
||||
if (res.face[i].tensor) {
|
||||
await human.tf.browser.toPixels(res.face[i].tensor, canvas);
|
||||
document.getElementById('faces').appendChild(canvas);
|
||||
const ctx = canvas.getContext('2d');
|
||||
ctx.font = 'small-caps 0.8rem "Lato"';
|
||||
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
|
||||
ctx.fillText(`${res.face[i].age}y ${(100 * (res.face[i].genderConfidence || 0)).toFixed(1)}% ${res.face[i].gender}`, 4, canvas.height - 6);
|
||||
const person = await human.match(res.face[i].embedding, db);
|
||||
ctx.font = 'small-caps 1rem "Lato"';
|
||||
if (person.similarity && person.similarity > minScore && res.face[i].confidence > minConfidence) ctx.fillText(`${(100 * person.similarity).toFixed(1)}% ${person.name}`, 4, canvas.height - 30);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function process(index, image) {
|
||||
return new Promise((resolve) => {
|
||||
const img = new Image(128, 128);
|
||||
img.onload = () => { // must wait until image is loaded
|
||||
human.detect(img).then(async (res) => {
|
||||
await faces(index, res, image); // then wait until image is analyzed
|
||||
log('Add image:', index + 1, image, 'faces:', res.face.length);
|
||||
document.getElementById('images').appendChild(img); // and finally we can add it
|
||||
resolve(true);
|
||||
});
|
||||
};
|
||||
img.onerror = () => {
|
||||
log('Add image error:', index + 1, image);
|
||||
resolve(false);
|
||||
};
|
||||
img.title = image;
|
||||
img.src = encodeURI(image);
|
||||
});
|
||||
}
|
||||
|
||||
async function createDB() {
|
||||
log('Creating Faces DB...');
|
||||
for (const image of all) {
|
||||
for (const face of image) db.push({ name: 'unknown', source: face.fileName, embedding: face.embedding });
|
||||
}
|
||||
log(db);
|
||||
}
|
||||
|
||||
async function main() {
|
||||
window.addEventListener('unhandledrejection', (evt) => {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error(evt.reason || evt);
|
||||
document.getElementById('list').innerHTML = evt?.reason?.message || evt?.reason || evt;
|
||||
evt.preventDefault();
|
||||
});
|
||||
|
||||
// pre-load human models
|
||||
await human.load();
|
||||
|
||||
let res;
|
||||
let images = [];
|
||||
let dir = [];
|
||||
// load face descriptor database
|
||||
await getFaceDB();
|
||||
|
||||
// enumerate all sample images in /assets
|
||||
res = await fetch('/assets');
|
||||
dir = (res && res.ok) ? await res.json() : [];
|
||||
images = images.concat(dir.filter((img) => (img.endsWith('.jpg') && img.includes('sample'))));
|
||||
// enumerate additional private test images in /private, not includded in git repository
|
||||
res = await fetch('/private/me');
|
||||
dir = (res && res.ok) ? await res.json() : [];
|
||||
images = images.concat(dir.filter((img) => (img.endsWith('.jpg'))));
|
||||
|
||||
// enumerate additional error images, not includded in git repository
|
||||
res = await fetch('/private/err');
|
||||
dir = (res && res.ok) ? await res.json() : [];
|
||||
images = images.concat(dir.filter((img) => (img.endsWith('.jpg'))));
|
||||
log('Enumerated:', images.length, 'images');
|
||||
|
||||
// could not dynamically enumerate images so using static list
|
||||
if (images.length === 0) {
|
||||
images = [
|
||||
'sample1.jpg',
|
||||
'sample2.jpg',
|
||||
'sample3.jpg',
|
||||
'sample4.jpg',
|
||||
'sample5.jpg',
|
||||
'sample6.jpg',
|
||||
'sample6.jpg',
|
||||
'sample-me.jpg',
|
||||
'human-sample-face.jpg',
|
||||
'human-sample-upper.jpg',
|
||||
'human-sample-body.jpg',
|
||||
];
|
||||
// add prefix for gitpages
|
||||
images = images.map((a) => `/human/assets/${a}`);
|
||||
log('Adding static image list:', images.length, 'images');
|
||||
}
|
||||
|
||||
// download and analyze all images
|
||||
for (let i = 0; i < images.length; i++) await process(i, images[i]);
|
||||
|
||||
// print stats
|
||||
const num = all.reduce((prev, cur) => prev += cur.length, 0);
|
||||
log('Extracted faces:', num, 'from images:', all.length);
|
||||
log(human.tf.engine().memory());
|
||||
|
||||
// if we didn't download db, generate it from current faces
|
||||
if (!db || db.length === 0) await createDB();
|
||||
else log('Loaded Faces DB:', db.length);
|
||||
|
||||
log('Ready');
|
||||
}
|
||||
|
||||
window.onload = main;
|
|
@ -0,0 +1,83 @@
|
|||
# Human Face Recognition & Matching
|
||||
|
||||
- **Browser** demo: `index.html` & `facematch.js`:
|
||||
Loads sample images, extracts faces and runs match and similarity analysis
|
||||
- **NodeJS** demo `node-match.js` and `node-match-worker.js`
|
||||
Advanced multithreading demo that runs number of worker threads to process high number of matches
|
||||
- Sample face database: `faces.json`
|
||||
|
||||
<br>
|
||||
|
||||
## Browser Face Recognition Demo
|
||||
|
||||
- `demo/facematch`: Demo for Browsers that uses all face description and embedding features to
|
||||
detect, extract and identify all faces plus calculate simmilarity between them
|
||||
|
||||
It highlights functionality such as:
|
||||
|
||||
- Loading images
|
||||
- Extracting faces from images
|
||||
- Calculating face embedding descriptors
|
||||
- Finding face similarity and sorting them by similarity
|
||||
- Finding best face match based on a known list of faces and printing matches
|
||||
|
||||
<br>
|
||||
|
||||
## NodeJS Multi-Threading Match Solution
|
||||
|
||||
### Methods and Properties in `node-match`
|
||||
|
||||
- `createBuffer`: create shared buffer array
|
||||
single copy of data regardless of number of workers
|
||||
fixed size based on `options.dbMax`
|
||||
- `appendRecord`: add additional batch of descriptors to buffer
|
||||
can append batch of records to buffer at anytime
|
||||
workers are informed of the new content after append has been completed
|
||||
- `workersStart`: start or expand pool of `threadPoolSize` workers
|
||||
each worker runs `node-match-worker` and listens for messages from main thread
|
||||
can shutdown workers or create additional worker threads on-the-fly
|
||||
safe against workers that exit
|
||||
- `workersClose`: close workers in a pool
|
||||
first request workers to exit then terminate after timeout
|
||||
- `match`: dispach a match job to a worker
|
||||
returns first match that satisfies `minThreshold`
|
||||
assigment to workers using round-robin
|
||||
since timing for each job is near-fixed and predictable
|
||||
- `getDescriptor`: get descriptor array for a given id from a buffer
|
||||
- `fuzDescriptor`: small randomize descriptor content for harder match
|
||||
- `getLabel`: fetch label for resolved descriptor index
|
||||
- `loadDB`: load face database from a JSON file `dbFile`
|
||||
extracts descriptors and adds them to buffer
|
||||
extracts labels and maintains them in main thread
|
||||
for test purposes loads same database `dbFact` times to create a very large database
|
||||
|
||||
`node-match` runs in a listens for messages from workers until `maxJobs` have been reached
|
||||
|
||||
### Performance
|
||||
|
||||
Linear performance decrease that depends on number of records in database
|
||||
Non-linear performance that increases with number of worker threads due to communication overhead
|
||||
|
||||
- Face dataase with 10k records:
|
||||
> threadPoolSize: 1 => ~60 ms / match job
|
||||
> threadPoolSize: 6 => ~25 ms / match job
|
||||
- Face database with 50k records:
|
||||
> threadPoolSize: 1 => ~300 ms / match job
|
||||
> threadPoolSize: 6 => ~100 ms / match job
|
||||
- Face database with 100k records:
|
||||
> threadPoolSize: 1 => ~600 ms / match job
|
||||
> threadPoolSize: 6 => ~200 ms / match job
|
||||
|
||||
### Example
|
||||
|
||||
> node node-match
|
||||
|
||||
```js
|
||||
2021-10-13 07:53:36 INFO: options: { dbFile: './faces.json', dbMax: 10000, threadPoolSize: 6, workerSrc: './node-match-worker.js', debug: false, minThreshold: 0.9, descLength: 1024 }
|
||||
2021-10-13 07:53:36 DATA: created shared buffer: { maxDescriptors: 10000, totalBytes: 40960000, totalElements: 10240000 }
|
||||
2021-10-13 07:53:36 DATA: db loaded: { existingRecords: 0, newRecords: 5700 }
|
||||
2021-10-13 07:53:36 INFO: starting worker thread pool: { totalWorkers: 6, alreadyActive: 0 }
|
||||
2021-10-13 07:53:36 STATE: submitted: { matchJobs: 100, poolSize: 6, activeWorkers: 6 }
|
||||
2021-10-13 07:53:38 STATE: { matchJobsFinished: 100, totalTimeMs: 1769, averageTimeMs: 17.69 }
|
||||
2021-10-13 07:53:38 INFO: closing workers: { poolSize: 6, activeWorkers: 6 }
|
||||
```
|
|
@ -0,0 +1,262 @@
|
|||
/**
|
||||
* Human demo for browsers
|
||||
*
|
||||
* Demo for face descriptor analysis and face simmilarity analysis
|
||||
*/
|
||||
|
||||
/** @type {Human} */
|
||||
import { Human } from '../../dist/human.esm.js';
|
||||
|
||||
const userConfig = {
|
||||
backend: 'humangl',
|
||||
async: true,
|
||||
warmup: 'none',
|
||||
cacheSensitivity: 0,
|
||||
debug: true,
|
||||
modelBasePath: '../../models/',
|
||||
deallocate: true,
|
||||
filter: {
|
||||
enabled: true,
|
||||
equalization: true,
|
||||
width: 0,
|
||||
},
|
||||
face: {
|
||||
enabled: true,
|
||||
// detector: { rotation: false, return: true, maxDetected: 50, iouThreshold: 0.206, minConfidence: 0.122 },
|
||||
detector: { return: true, rotation: true, maxDetected: 50, iouThreshold: 0.01, minConfidence: 0.2 },
|
||||
mesh: { enabled: true },
|
||||
iris: { enabled: false },
|
||||
emotion: { enabled: true },
|
||||
description: { enabled: true },
|
||||
},
|
||||
hand: { enabled: false },
|
||||
gesture: { enabled: false },
|
||||
body: { enabled: false },
|
||||
segmentation: { enabled: false },
|
||||
};
|
||||
|
||||
const human = new Human(userConfig); // new instance of human
|
||||
|
||||
const all = []; // array that will hold all detected faces
|
||||
let db = []; // array that holds all known faces
|
||||
|
||||
const minScore = 0.4;
|
||||
|
||||
function log(...msg) {
|
||||
const dt = new Date();
|
||||
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
|
||||
console.log(ts, ...msg); // eslint-disable-line no-console
|
||||
}
|
||||
|
||||
function title(msg) {
|
||||
document.getElementById('title').innerHTML = msg;
|
||||
}
|
||||
|
||||
async function loadFaceMatchDB() {
|
||||
// download db with known faces
|
||||
try {
|
||||
let res = await fetch('/demo/facematch/faces.json');
|
||||
if (!res || !res.ok) res = await fetch('/human/demo/facematch/faces.json');
|
||||
db = (res && res.ok) ? await res.json() : [];
|
||||
log('Loaded Faces DB:', db);
|
||||
} catch (err) {
|
||||
log('Could not load faces database', err);
|
||||
}
|
||||
}
|
||||
|
||||
async function SelectFaceCanvas(face) {
|
||||
// if we have face image tensor, enhance it and display it
|
||||
let embedding;
|
||||
document.getElementById('orig').style.filter = 'blur(16px)';
|
||||
if (face.tensor) {
|
||||
title('Sorting Faces by Similarity');
|
||||
const enhanced = human.enhance(face);
|
||||
if (enhanced) {
|
||||
const c = document.getElementById('orig');
|
||||
const squeeze = human.tf.squeeze(enhanced);
|
||||
const normalize = human.tf.div(squeeze, 255);
|
||||
await human.tf.browser.toPixels(normalize, c);
|
||||
human.tf.dispose([enhanced, squeeze, normalize]);
|
||||
const ctx = c.getContext('2d');
|
||||
ctx.font = 'small-caps 0.4rem "Lato"';
|
||||
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
|
||||
}
|
||||
const arr = db.map((rec) => rec.embedding);
|
||||
const res = await human.match(face.embedding, arr);
|
||||
log('Match:', db[res.index].name);
|
||||
const emotion = face.emotion[0] ? `${Math.round(100 * face.emotion[0].score)}% ${face.emotion[0].emotion}` : 'N/A';
|
||||
document.getElementById('desc').innerHTML = `
|
||||
source: ${face.fileName}<br>
|
||||
match: ${Math.round(1000 * res.similarity) / 10}% ${db[res.index].name}<br>
|
||||
score: ${Math.round(100 * face.boxScore)}% detection ${Math.round(100 * face.faceScore)}% analysis<br>
|
||||
age: ${face.age} years<br>
|
||||
gender: ${Math.round(100 * face.genderScore)}% ${face.gender}<br>
|
||||
emotion: ${emotion}<br>
|
||||
`;
|
||||
embedding = face.embedding.map((a) => parseFloat(a.toFixed(4)));
|
||||
navigator.clipboard.writeText(`{"name":"unknown", "source":"${face.fileName}", "embedding":[${embedding}]},`);
|
||||
}
|
||||
|
||||
// loop through all canvases that contain faces
|
||||
const canvases = document.getElementsByClassName('face');
|
||||
let time = 0;
|
||||
for (const canvas of canvases) {
|
||||
// calculate similarity from selected face to current one in the loop
|
||||
const current = all[canvas.tag.sample][canvas.tag.face];
|
||||
const similarity = human.similarity(face.embedding, current.embedding);
|
||||
canvas.tag.similarity = similarity;
|
||||
// get best match
|
||||
// draw the canvas
|
||||
await human.tf.browser.toPixels(current.tensor, canvas);
|
||||
const ctx = canvas.getContext('2d');
|
||||
ctx.font = 'small-caps 1rem "Lato"';
|
||||
ctx.fillStyle = 'rgba(0, 0, 0, 1)';
|
||||
ctx.fillText(`${(100 * similarity).toFixed(1)}%`, 3, 23);
|
||||
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
|
||||
ctx.fillText(`${(100 * similarity).toFixed(1)}%`, 4, 24);
|
||||
ctx.font = 'small-caps 0.8rem "Lato"';
|
||||
ctx.fillText(`${current.age}y ${(100 * (current.genderScore || 0)).toFixed(1)}% ${current.gender}`, 4, canvas.height - 6);
|
||||
// identify person
|
||||
ctx.font = 'small-caps 1rem "Lato"';
|
||||
const start = human.now();
|
||||
const arr = db.map((rec) => rec.embedding);
|
||||
const res = await human.match(current.embedding, arr);
|
||||
time += (human.now() - start);
|
||||
if (res.similarity > minScore) ctx.fillText(`DB: ${(100 * res.similarity).toFixed(1)}% ${db[res.index].name}`, 4, canvas.height - 30);
|
||||
}
|
||||
|
||||
log('Analyzed:', 'Face:', canvases.length, 'DB:', db.length, 'Time:', time);
|
||||
// sort all faces by similarity
|
||||
const sorted = document.getElementById('faces');
|
||||
[...sorted.children]
|
||||
.sort((a, b) => parseFloat(b.tag.similarity) - parseFloat(a.tag.similarity))
|
||||
.forEach((canvas) => sorted.appendChild(canvas));
|
||||
document.getElementById('orig').style.filter = 'blur(0)';
|
||||
title('Selected Face');
|
||||
}
|
||||
|
||||
async function AddFaceCanvas(index, res, fileName) {
|
||||
all[index] = res.face;
|
||||
for (const i in res.face) {
|
||||
if (!res.face[i].tensor) continue; // did not get valid results
|
||||
if ((res.face[i].faceScore || 0) < human.config.face.detector.minConfidence) continue; // face analysis score too low
|
||||
all[index][i].fileName = fileName;
|
||||
const canvas = document.createElement('canvas');
|
||||
canvas.tag = { sample: index, face: i, source: fileName };
|
||||
canvas.width = 200;
|
||||
canvas.height = 200;
|
||||
canvas.className = 'face';
|
||||
const emotion = res.face[i].emotion[0] ? `${Math.round(100 * res.face[i].emotion[0].score)}% ${res.face[i].emotion[0].emotion}` : 'N/A';
|
||||
canvas.title = `
|
||||
source: ${res.face[i].fileName}
|
||||
score: ${Math.round(100 * res.face[i].boxScore)}% detection ${Math.round(100 * res.face[i].faceScore)}% analysis
|
||||
age: ${res.face[i].age} years
|
||||
gender: ${Math.round(100 * res.face[i].genderScore)}% ${res.face[i].gender}
|
||||
emotion: ${emotion}
|
||||
`.replace(/ /g, ' ');
|
||||
await human.tf.browser.toPixels(res.face[i].tensor, canvas);
|
||||
const ctx = canvas.getContext('2d');
|
||||
if (!ctx) return false;
|
||||
ctx.font = 'small-caps 0.8rem "Lato"';
|
||||
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
|
||||
ctx.fillText(`${res.face[i].age}y ${(100 * (res.face[i].genderScore || 0)).toFixed(1)}% ${res.face[i].gender}`, 4, canvas.height - 6);
|
||||
const arr = db.map((rec) => rec.embedding);
|
||||
const result = human.match(res.face[i].embedding, arr);
|
||||
ctx.font = 'small-caps 1rem "Lato"';
|
||||
if (result.similarity && res.similarity > minScore) ctx.fillText(`${(100 * result.similarity).toFixed(1)}% ${db[result.index].name}`, 4, canvas.height - 30);
|
||||
document.getElementById('faces').appendChild(canvas);
|
||||
canvas.addEventListener('click', (evt) => {
|
||||
log('Select:', 'Image:', evt.target.tag.sample, 'Face:', evt.target.tag.face, 'Source:', evt.target.tag.source, all[evt.target.tag.sample][evt.target.tag.face]);
|
||||
SelectFaceCanvas(all[evt.target.tag.sample][evt.target.tag.face]);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
async function AddImageElement(index, image, length) {
|
||||
const faces = all.reduce((prev, curr) => prev += curr.length, 0);
|
||||
title(`Analyzing Input Images<br> ${Math.round(100 * index / length)}% [${index} / ${length}]<br>Found ${faces} Faces`);
|
||||
return new Promise((resolve) => {
|
||||
const img = new Image(128, 128);
|
||||
img.onload = () => { // must wait until image is loaded
|
||||
document.getElementById('images').appendChild(img); // and finally we can add it
|
||||
human.detect(img, userConfig)
|
||||
.then((res) => { // eslint-disable-line promise/always-return
|
||||
AddFaceCanvas(index, res, image); // then wait until image is analyzed
|
||||
resolve(true);
|
||||
})
|
||||
.catch(() => log('human detect error'));
|
||||
};
|
||||
img.onerror = () => {
|
||||
log('Add image error:', index + 1, image);
|
||||
resolve(false);
|
||||
};
|
||||
img.title = image;
|
||||
img.src = encodeURI(image);
|
||||
});
|
||||
}
|
||||
|
||||
function createFaceMatchDB() {
|
||||
log('Creating Faces DB...');
|
||||
for (const image of all) {
|
||||
for (const face of image) db.push({ name: 'unknown', source: face.fileName, embedding: face.embedding });
|
||||
}
|
||||
log(db);
|
||||
}
|
||||
|
||||
async function main() {
|
||||
// pre-load human models
|
||||
await human.load();
|
||||
|
||||
title('Loading Face Match Database');
|
||||
let images = [];
|
||||
let dir = [];
|
||||
// load face descriptor database
|
||||
await loadFaceMatchDB();
|
||||
|
||||
// enumerate all sample images in /assets
|
||||
title('Enumerating Input Images');
|
||||
const res = await fetch('/samples/in');
|
||||
dir = (res && res.ok) ? await res.json() : [];
|
||||
images = images.concat(dir.filter((img) => (img.endsWith('.jpg') && img.includes('sample'))));
|
||||
|
||||
// could not dynamically enumerate images so using static list
|
||||
if (images.length === 0) {
|
||||
images = [
|
||||
'ai-body.jpg', 'solvay1927.jpg', 'ai-upper.jpg',
|
||||
'person-carolina.jpg', 'person-celeste.jpg', 'person-leila1.jpg', 'person-leila2.jpg', 'person-lexi.jpg', 'person-linda.jpg', 'person-nicole.jpg', 'person-tasia.jpg',
|
||||
'person-tetiana.jpg', 'person-vlado1.jpg', 'person-vlado5.jpg', 'person-vlado.jpg', 'person-christina.jpg', 'person-lauren.jpg',
|
||||
'group-1.jpg', 'group-2.jpg', 'group-3.jpg', 'group-4.jpg', 'group-5.jpg', 'group-6.jpg', 'group-7.jpg',
|
||||
'daz3d-brianna.jpg', 'daz3d-chiyo.jpg', 'daz3d-cody.jpg', 'daz3d-drew-01.jpg', 'daz3d-drew-02.jpg', 'daz3d-ella-01.jpg', 'daz3d-ella-02.jpg', 'daz3d-gillian.jpg',
|
||||
'daz3d-hye-01.jpg', 'daz3d-hye-02.jpg', 'daz3d-kaia.jpg', 'daz3d-karen.jpg', 'daz3d-kiaria-01.jpg', 'daz3d-kiaria-02.jpg', 'daz3d-lilah-01.jpg', 'daz3d-lilah-02.jpg',
|
||||
'daz3d-lilah-03.jpg', 'daz3d-lila.jpg', 'daz3d-lindsey.jpg', 'daz3d-megah.jpg', 'daz3d-selina-01.jpg', 'daz3d-selina-02.jpg', 'daz3d-snow.jpg',
|
||||
'daz3d-sunshine.jpg', 'daz3d-taia.jpg', 'daz3d-tuesday-01.jpg', 'daz3d-tuesday-02.jpg', 'daz3d-tuesday-03.jpg', 'daz3d-zoe.jpg', 'daz3d-ginnifer.jpg',
|
||||
'daz3d-_emotions01.jpg', 'daz3d-_emotions02.jpg', 'daz3d-_emotions03.jpg', 'daz3d-_emotions04.jpg', 'daz3d-_emotions05.jpg',
|
||||
];
|
||||
// add prefix for gitpages
|
||||
images = images.map((a) => `/human/samples/in/${a}`);
|
||||
log('Adding static image list:', images);
|
||||
} else {
|
||||
log('Discovered images:', images);
|
||||
}
|
||||
|
||||
// images = ['/samples/in/person-lexi.jpg', '/samples/in/person-carolina.jpg', '/samples/in/solvay1927.jpg'];
|
||||
|
||||
const t0 = human.now();
|
||||
for (let i = 0; i < images.length; i++) await AddImageElement(i, images[i], images.length);
|
||||
const t1 = human.now();
|
||||
|
||||
// print stats
|
||||
const num = all.reduce((prev, cur) => prev += cur.length, 0);
|
||||
log('Extracted faces:', num, 'from images:', all.length, 'time:', Math.round(t1 - t0));
|
||||
log(human.tf.engine().memory());
|
||||
|
||||
// if we didn't download db, generate it from current faces
|
||||
if (!db || db.length === 0) createFaceMatchDB();
|
||||
|
||||
title('');
|
||||
log('Ready');
|
||||
human.validate(userConfig);
|
||||
human.similarity([], []);
|
||||
}
|
||||
|
||||
window.onload = main;
|
|
@ -0,0 +1,50 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>Human</title>
|
||||
<!-- <meta http-equiv="content-type" content="text/html; charset=utf-8"> -->
|
||||
<meta name="viewport" content="width=device-width, shrink-to-fit=yes">
|
||||
<meta name="keywords" content="Human">
|
||||
<meta name="application-name" content="Human">
|
||||
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||
<meta name="theme-color" content="#000000">
|
||||
<link rel="manifest" href="../manifest.webmanifest">
|
||||
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
|
||||
<link rel="apple-touch-icon" href="../../assets/icon.png">
|
||||
<script src="./facematch.js" type="module"></script>
|
||||
<style>
|
||||
img { object-fit: contain; }
|
||||
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
|
||||
html { font-family: 'Lato', 'Segoe UI'; font-size: 24px; font-variant: small-caps; }
|
||||
body { margin: 24px; background: black; color: white; overflow: hidden; text-align: -webkit-center; min-height: 100%; max-height: 100%; }
|
||||
::-webkit-scrollbar { height: 8px; border: 0; border-radius: 0; }
|
||||
::-webkit-scrollbar-thumb { background: grey }
|
||||
::-webkit-scrollbar-track { margin: 3px; }
|
||||
.orig { width: 200px; height: 200px; padding-bottom: 20px; filter: blur(16px); transition : all 0.5s ease; }
|
||||
.text { margin: 24px; }
|
||||
.face { width: 128px; height: 128px; margin: 2px; padding: 2px; cursor: grab; transform: scale(1.00); transition : all 0.3s ease; }
|
||||
.face:hover { filter: grayscale(1); transform: scale(1.08); transition : all 0.3s ease; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div style="display: block">
|
||||
<div style="display: flex">
|
||||
<div style="min-width: 400px">
|
||||
<div class="text" id="title"></div>
|
||||
<canvas id="orig" class="orig"></canvas>
|
||||
<div id="desc" style="font-size: 0.8rem; text-align: left;"></div>
|
||||
</div>
|
||||
<div style="width: 20px"></div>
|
||||
<div>
|
||||
<div class="text">Input Images</div>
|
||||
<div id="images" style="display: flex; width: 60vw; overflow-x: auto; overflow-y: hidden; scroll-behavior: smooth"></div>
|
||||
</div>
|
||||
</div>
|
||||
<div id="list" style="height: 10px"></div>
|
||||
<div class="text">Select person to sort by similarity and get a known face match</div>
|
||||
<div id="faces" style="height: 50vh; overflow-y: auto"></div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
|
@ -0,0 +1,75 @@
|
|||
/**
|
||||
* Runs in a worker thread started by `node-match` demo app
|
||||
*
|
||||
*/
|
||||
|
||||
const threads = require('worker_threads');
|
||||
|
||||
let debug = false;
|
||||
|
||||
/** @type SharedArrayBuffer */
|
||||
let buffer;
|
||||
/** @type Float32Array */
|
||||
let view;
|
||||
let threshold = 0;
|
||||
let records = 0;
|
||||
|
||||
const descLength = 1024; // descriptor length in bytes
|
||||
|
||||
function distance(descBuffer, index, options = { order: 2, multiplier: 20 }) {
|
||||
const descriptor = new Float32Array(descBuffer);
|
||||
let sum = 0;
|
||||
for (let i = 0; i < descriptor.length; i++) {
|
||||
const diff = (options.order === 2) ? (descriptor[i] - view[index * descLength + i]) : (Math.abs(descriptor[i] - view[index * descLength + i]));
|
||||
sum += (options.order === 2) ? (diff * diff) : (diff ** options.order);
|
||||
}
|
||||
return (options.multiplier || 20) * sum;
|
||||
}
|
||||
|
||||
function match(descBuffer, options = { order: 2, multiplier: 20 }) {
|
||||
let best = Number.MAX_SAFE_INTEGER;
|
||||
let index = -1;
|
||||
for (let i = 0; i < records; i++) {
|
||||
const res = distance(descBuffer, i, { order: options.order, multiplier: options.multiplier });
|
||||
if (res < best) {
|
||||
best = res;
|
||||
index = i;
|
||||
}
|
||||
if (best < threshold || best === 0) break; // short circuit
|
||||
}
|
||||
best = (options.order === 2) ? Math.sqrt(best) : best ** (1 / options.order);
|
||||
return { index, distance: best, similarity: Math.max(0, 100 - best) / 100.0 };
|
||||
}
|
||||
|
||||
threads.parentPort?.on('message', (msg) => {
|
||||
if (typeof msg.descriptor !== 'undefined') { // actual work order to find a match
|
||||
const t0 = performance.now();
|
||||
const result = match(msg.descriptor);
|
||||
const t1 = performance.now();
|
||||
threads.parentPort?.postMessage({ request: msg.request, time: Math.trunc(t1 - t0), ...result });
|
||||
return; // short circuit
|
||||
}
|
||||
if (msg instanceof SharedArrayBuffer) { // called only once to receive reference to shared array buffer
|
||||
buffer = msg;
|
||||
view = new Float32Array(buffer); // initialize f64 view into buffer
|
||||
if (debug) threads.parentPort?.postMessage(`buffer: ${buffer.byteLength}`);
|
||||
}
|
||||
if (typeof msg.records !== 'undefined') { // recived every time when number of records changes
|
||||
records = msg.records;
|
||||
if (debug) threads.parentPort?.postMessage(`records: ${records}`);
|
||||
}
|
||||
if (typeof msg.debug !== 'undefined') { // set verbose logging
|
||||
debug = msg.debug;
|
||||
if (debug) threads.parentPort?.postMessage(`debug: ${debug}`);
|
||||
}
|
||||
if (typeof msg.threshold !== 'undefined') { // set minimum similarity threshold
|
||||
threshold = msg.threshold;
|
||||
if (debug) threads.parentPort?.postMessage(`threshold: ${threshold}`);
|
||||
}
|
||||
if (typeof msg.shutdown !== 'undefined') { // got message to close worker
|
||||
if (debug) threads.parentPort?.postMessage('shutting down');
|
||||
process.exit(0); // eslint-disable-line no-process-exit
|
||||
}
|
||||
});
|
||||
|
||||
if (debug) threads.parentPort?.postMessage('started');
|
|
@ -0,0 +1,184 @@
|
|||
/**
|
||||
* Human demo app for NodeJS that generates random facial descriptors
|
||||
* and uses NodeJS multi-threading to start multiple threads for face matching
|
||||
* uses `node-match-worker.js` to perform actual face matching analysis
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const threads = require('worker_threads');
|
||||
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||
|
||||
// global optinos
|
||||
const options = {
|
||||
dbFile: 'demo/facematch/faces.json', // sample face db
|
||||
dbMax: 10000, // maximum number of records to hold in memory
|
||||
threadPoolSize: 12, // number of worker threads to create in thread pool
|
||||
workerSrc: './node-match-worker.js', // code that executes in the worker thread
|
||||
debug: false, // verbose messages
|
||||
minThreshold: 0.5, // match returns first record that meets the similarity threshold, set to 0 to always scan all records
|
||||
descLength: 1024, // descriptor length
|
||||
};
|
||||
|
||||
// test options
|
||||
const testOptions = {
|
||||
dbFact: 175, // load db n times to fake huge size
|
||||
maxJobs: 200, // exit after processing this many jobs
|
||||
fuzDescriptors: true, // randomize descriptor content before match for harder jobs
|
||||
};
|
||||
|
||||
// global data structures
|
||||
const data = {
|
||||
/** @type string[] */
|
||||
labels: [], // array of strings, length of array serves as overal number of records so has to be maintained carefully
|
||||
/** @type SharedArrayBuffer | null */
|
||||
buffer: null,
|
||||
/** @type Float32Array | null */
|
||||
view: null,
|
||||
/** @type threads.Worker[] */
|
||||
workers: [], // holds instance of workers. worker can be null if exited
|
||||
requestID: 0, // each request should increment this counter as its used for round robin assignment
|
||||
};
|
||||
|
||||
let t0 = process.hrtime.bigint(); // used for perf counters
|
||||
|
||||
const appendRecords = (labels, descriptors) => {
|
||||
if (!data.view) return 0;
|
||||
if (descriptors.length !== labels.length) {
|
||||
log.error('append error:', { descriptors: descriptors.length, labels: labels.length });
|
||||
}
|
||||
// if (options.debug) log.state('appending:', { descriptors: descriptors.length, labels: labels.length });
|
||||
for (let i = 0; i < descriptors.length; i++) {
|
||||
for (let j = 0; j < descriptors[i].length; j++) {
|
||||
data.view[data.labels.length * descriptors[i].length + j] = descriptors[i][j]; // add each descriptors element to buffer
|
||||
}
|
||||
data.labels.push(labels[i]); // finally add to labels
|
||||
}
|
||||
for (const worker of data.workers) { // inform all workers how many records we have
|
||||
if (worker) worker.postMessage({ records: data.labels.length });
|
||||
}
|
||||
return data.labels.length;
|
||||
};
|
||||
|
||||
const getLabel = (index) => data.labels[index];
|
||||
|
||||
const getDescriptor = (index) => {
|
||||
if (!data.view) return [];
|
||||
const descriptor = [];
|
||||
for (let i = 0; i < 1024; i++) descriptor.push(data.view[index * options.descLength + i]);
|
||||
return descriptor;
|
||||
};
|
||||
|
||||
const fuzDescriptor = (descriptor) => {
|
||||
for (let i = 0; i < descriptor.length; i++) descriptor[i] += Math.random() - 0.5;
|
||||
return descriptor;
|
||||
};
|
||||
|
||||
const delay = (ms) => new Promise((resolve) => { setTimeout(resolve, ms); });
|
||||
|
||||
async function workersClose() {
|
||||
const current = data.workers.filter((worker) => !!worker).length;
|
||||
log.info('closing workers:', { poolSize: data.workers.length, activeWorkers: current });
|
||||
for (const worker of data.workers) {
|
||||
if (worker) worker.postMessage({ shutdown: true }); // tell worker to exit
|
||||
}
|
||||
await delay(250); // wait a little for threads to exit on their own
|
||||
const remaining = data.workers.filter((worker) => !!worker).length;
|
||||
if (remaining > 0) {
|
||||
log.info('terminating remaining workers:', { remaining: current, pool: data.workers.length });
|
||||
for (const worker of data.workers) {
|
||||
if (worker) worker.terminate(); // if worker did not exit cleany terminate it
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const workerMessage = (index, msg) => {
|
||||
if (msg.request) {
|
||||
if (options.debug) log.data('message:', { worker: index, request: msg.request, time: msg.time, label: getLabel(msg.index), similarity: msg.similarity });
|
||||
if (msg.request >= testOptions.maxJobs) {
|
||||
const t1 = process.hrtime.bigint();
|
||||
const elapsed = Math.round(Number(t1 - t0) / 1000 / 1000);
|
||||
log.state({ matchJobsFinished: testOptions.maxJobs, totalTimeMs: elapsed, averageTimeMs: Math.round(100 * elapsed / testOptions.maxJobs) / 100 });
|
||||
workersClose();
|
||||
}
|
||||
} else {
|
||||
log.data('message:', { worker: index, msg });
|
||||
}
|
||||
};
|
||||
|
||||
async function workerClose(id, code) {
|
||||
const previous = data.workers.filter((worker) => !!worker).length;
|
||||
delete data.workers[id];
|
||||
const current = data.workers.filter((worker) => !!worker).length;
|
||||
if (options.debug) log.state('worker exit:', { id, code, previous, current });
|
||||
}
|
||||
|
||||
async function workersStart(numWorkers) {
|
||||
const previous = data.workers.filter((worker) => !!worker).length;
|
||||
log.info('starting worker thread pool:', { totalWorkers: numWorkers, alreadyActive: previous });
|
||||
for (let i = 0; i < numWorkers; i++) {
|
||||
if (!data.workers[i]) { // worker does not exist, so create it
|
||||
const worker = new threads.Worker(path.join(__dirname, options.workerSrc));
|
||||
worker.on('message', (msg) => workerMessage(i, msg));
|
||||
worker.on('error', (err) => log.error('worker error:', { err }));
|
||||
worker.on('exit', (code) => workerClose(i, code));
|
||||
worker.postMessage(data.buffer); // send buffer to worker
|
||||
data.workers[i] = worker;
|
||||
}
|
||||
data.workers[i]?.postMessage({ records: data.labels.length, threshold: options.minThreshold, debug: options.debug }); // inform worker how many records there are
|
||||
}
|
||||
await delay(100); // just wait a bit for everything to settle down
|
||||
}
|
||||
|
||||
const match = (descriptor) => {
|
||||
// const arr = Float32Array.from(descriptor);
|
||||
const buffer = new ArrayBuffer(options.descLength * 4);
|
||||
const view = new Float32Array(buffer);
|
||||
view.set(descriptor);
|
||||
const available = data.workers.filter((worker) => !!worker).length; // find number of available workers
|
||||
if (available > 0) data.workers[data.requestID % available].postMessage({ descriptor: buffer, request: data.requestID }, [buffer]); // round robin to first available worker
|
||||
else log.error('no available workers');
|
||||
};
|
||||
|
||||
async function loadDB(count) {
|
||||
const previous = data.labels.length;
|
||||
if (!fs.existsSync(options.dbFile)) {
|
||||
log.error('db file does not exist:', options.dbFile);
|
||||
return;
|
||||
}
|
||||
t0 = process.hrtime.bigint();
|
||||
for (let i = 0; i < count; i++) { // test loop: load entire face db from array of objects n times into buffer
|
||||
const db = JSON.parse(fs.readFileSync(options.dbFile).toString());
|
||||
const names = db.map((record) => record.name);
|
||||
const descriptors = db.map((record) => record.embedding);
|
||||
appendRecords(names, descriptors);
|
||||
}
|
||||
log.data('db loaded:', { existingRecords: previous, newRecords: data.labels.length });
|
||||
}
|
||||
|
||||
async function createBuffer() {
|
||||
data.buffer = new SharedArrayBuffer(4 * options.dbMax * options.descLength); // preallocate max number of records as sharedarraybuffers cannot grow
|
||||
data.view = new Float32Array(data.buffer); // create view into buffer
|
||||
data.labels.length = 0;
|
||||
log.data('created shared buffer:', { maxDescriptors: (data.view.length || 0) / options.descLength, totalBytes: data.buffer.byteLength, totalElements: data.view.length });
|
||||
}
|
||||
|
||||
async function main() {
|
||||
log.header();
|
||||
log.info('options:', options);
|
||||
|
||||
await createBuffer(); // create shared buffer array
|
||||
await loadDB(testOptions.dbFact); // loadDB is a test method that calls actual addRecords
|
||||
await workersStart(options.threadPoolSize); // can be called at anytime to modify worker pool size
|
||||
for (let i = 0; i < testOptions.maxJobs; i++) {
|
||||
const idx = Math.trunc(data.labels.length * Math.random()); // grab a random descriptor index that we'll search for
|
||||
const descriptor = getDescriptor(idx); // grab a descriptor at index
|
||||
data.requestID++; // increase request id
|
||||
if (testOptions.fuzDescriptors) match(fuzDescriptor(descriptor)); // fuz descriptor for harder match
|
||||
else match(descriptor);
|
||||
if (options.debug) log.info('submited job', data.requestID); // we already know what we're searching for so we can compare results
|
||||
}
|
||||
log.state('submitted:', { matchJobs: testOptions.maxJobs, poolSize: data.workers.length, activeWorkers: data.workers.filter((worker) => !!worker).length });
|
||||
}
|
||||
|
||||
main();
|
|
@ -0,0 +1,3 @@
|
|||
# Helper libraries
|
||||
|
||||
Used by main `Human` demo app
|
|
@ -1,5 +1,3 @@
|
|||
/* eslint-disable max-len */
|
||||
|
||||
// based on: https://github.com/munrocket/gl-bench
|
||||
|
||||
const UICSS = `
|
||||
|
@ -38,15 +36,13 @@ const UISVG = `
|
|||
|
||||
class GLBench {
|
||||
/** GLBench constructor
|
||||
* @param { WebGLRenderingContext | WebGL2RenderingContext } gl context
|
||||
* @param { WebGLRenderingContext | WebGL2RenderingContext | null } gl context
|
||||
* @param { Object | undefined } settings additional settings
|
||||
*/
|
||||
constructor(gl, settings = {}) {
|
||||
this.css = UICSS;
|
||||
this.svg = UISVG;
|
||||
// eslint-disable-next-line @typescript-eslint/no-empty-function
|
||||
this.paramLogger = () => {};
|
||||
// eslint-disable-next-line @typescript-eslint/no-empty-function
|
||||
this.chartLogger = () => {};
|
||||
this.chartLen = 20;
|
||||
this.chartHz = 20;
|
||||
|
@ -93,7 +89,6 @@ class GLBench {
|
|||
|
||||
const addProfiler = (fn, self, target) => {
|
||||
const t = self.now();
|
||||
// eslint-disable-next-line prefer-rest-params
|
||||
fn.apply(target, arguments);
|
||||
if (self.trackGPU) self.finished.push(glFinish(t, self.activeAccums.slice(0)));
|
||||
};
|
||||
|
@ -108,13 +103,11 @@ class GLBench {
|
|||
if (gl[fn]) {
|
||||
gl[fn] = addProfiler(gl[fn], this, gl);
|
||||
} else {
|
||||
// eslint-disable-next-line no-console
|
||||
console.log('bench: cannot attach to webgl function');
|
||||
}
|
||||
|
||||
/*
|
||||
gl.getExtension = ((fn, self) => {
|
||||
// eslint-disable-next-line prefer-rest-params
|
||||
const ext = fn.apply(gl, arguments);
|
||||
if (ext) {
|
||||
['drawElementsInstancedANGLE', 'drawBuffersWEBGL'].forEach((fn2) => {
|
||||
|
@ -149,7 +142,6 @@ class GLBench {
|
|||
return (i, cpu, gpu, mem, fps, totalTime, frameId) => {
|
||||
nodes['gl-cpu'][i].style.strokeDasharray = (cpu * 0.27).toFixed(0) + ' 100';
|
||||
nodes['gl-gpu'][i].style.strokeDasharray = (gpu * 0.27).toFixed(0) + ' 100';
|
||||
// eslint-disable-next-line no-nested-ternary
|
||||
nodes['gl-mem'][i].innerHTML = names[i] ? names[i] : (mem ? 'mem: ' + mem.toFixed(0) + 'mb' : '');
|
||||
nodes['gl-fps'][i].innerHTML = 'FPS: ' + fps.toFixed(1);
|
||||
logger(names[i], cpu, gpu, mem, fps, totalTime, frameId);
|
||||
|
|
|
@ -0,0 +1,157 @@
|
|||
let callbackFunction = null;
|
||||
|
||||
function createElement(type, config) {
|
||||
const htmlElement = document.createElement(type);
|
||||
if (config === undefined) return htmlElement;
|
||||
if (config.className) htmlElement.className = config.className;
|
||||
if (config.content) htmlElement.textContent = config.content;
|
||||
if (config.style) htmlElement.style = config.style;
|
||||
if (config.children) config.children.forEach((el) => !el || htmlElement.appendChild(el));
|
||||
return htmlElement;
|
||||
}
|
||||
|
||||
function createExpandedElement(node) {
|
||||
const iElem = createElement('i');
|
||||
if (node.expanded) { iElem.className = 'fas fa-caret-down'; } else { iElem.className = 'fas fa-caret-right'; }
|
||||
const caretElem = createElement('div', { style: 'width: 18px; text-align: center; cursor: pointer', children: [iElem] });
|
||||
const handleClick = node.toggle.bind(node);
|
||||
caretElem.addEventListener('click', handleClick);
|
||||
const indexElem = createElement('div', { className: 'json json-index', content: node.key });
|
||||
indexElem.addEventListener('click', handleClick);
|
||||
const typeElem = createElement('div', { className: 'json json-type', content: node.type });
|
||||
const keyElem = createElement('div', { className: 'json json-key', content: node.key });
|
||||
keyElem.addEventListener('click', handleClick);
|
||||
const sizeElem = createElement('div', { className: 'json json-size' });
|
||||
sizeElem.addEventListener('click', handleClick);
|
||||
if (node.type === 'array') {
|
||||
sizeElem.innerText = `[${node.children.length} items]`;
|
||||
} else if (node.type === 'object') {
|
||||
const size = node.children.find((item) => item.key === 'size');
|
||||
sizeElem.innerText = size ? `{${size.value.toLocaleString()} bytes}` : `{${node.children.length} properties}`;
|
||||
}
|
||||
let lineChildren;
|
||||
if (node.key === null) lineChildren = [caretElem, typeElem, sizeElem];
|
||||
else if (node.parent.type === 'array') lineChildren = [caretElem, indexElem, sizeElem];
|
||||
else lineChildren = [caretElem, keyElem, sizeElem];
|
||||
const lineElem = createElement('div', { className: 'json-line', children: lineChildren });
|
||||
if (node.depth > 0) lineElem.style = `margin-left: ${node.depth * 24}px;`;
|
||||
return lineElem;
|
||||
}
|
||||
|
||||
function createNotExpandedElement(node) {
|
||||
const caretElem = createElement('div', { style: 'width: 18px' });
|
||||
const keyElem = createElement('div', { className: 'json json-key', content: node.key });
|
||||
const separatorElement = createElement('div', { className: 'json-separator', content: ':' });
|
||||
const valueType = ` json-${typeof node.value}`;
|
||||
const valueContent = node.value.toLocaleString();
|
||||
const valueElement = createElement('div', { className: `json json-value${valueType}`, content: valueContent });
|
||||
const lineElem = createElement('div', { className: 'json-line', children: [caretElem, keyElem, separatorElement, valueElement] });
|
||||
if (node.depth > 0) lineElem.style = `margin-left: ${node.depth * 24}px;`;
|
||||
return lineElem;
|
||||
}
|
||||
|
||||
function createNode() {
|
||||
return {
|
||||
key: '',
|
||||
parent: {},
|
||||
value: null,
|
||||
expanded: false,
|
||||
type: '',
|
||||
children: [],
|
||||
elem: {},
|
||||
depth: 0,
|
||||
|
||||
hideChildren() {
|
||||
if (Array.isArray(this.children)) {
|
||||
this.children.forEach((item) => {
|
||||
item['elem']['classList'].add('hide');
|
||||
if (item['expanded']) item.hideChildren();
|
||||
});
|
||||
}
|
||||
},
|
||||
showChildren() {
|
||||
if (Array.isArray(this.children)) {
|
||||
this.children.forEach((item) => {
|
||||
item['elem']['classList'].remove('hide');
|
||||
if (item['expanded']) item.showChildren();
|
||||
});
|
||||
}
|
||||
},
|
||||
toggle() {
|
||||
if (this.expanded) {
|
||||
this.hideChildren();
|
||||
const icon = this.elem?.querySelector('.fas');
|
||||
icon.classList.replace('fa-caret-down', 'fa-caret-right');
|
||||
if (callbackFunction !== null) callbackFunction(null);
|
||||
} else {
|
||||
this.showChildren();
|
||||
const icon = this.elem?.querySelector('.fas');
|
||||
icon.classList.replace('fa-caret-right', 'fa-caret-down');
|
||||
if (this.type === 'object') {
|
||||
if (callbackFunction !== null) callbackFunction(`${this.parent?.key}/${this.key}`);
|
||||
}
|
||||
}
|
||||
this.expanded = !this.expanded;
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function getType(val) {
|
||||
let type
|
||||
if (Array.isArray(val)) type = 'array';
|
||||
else if (val === null) type = 'null';
|
||||
else type = typeof val;
|
||||
return type;
|
||||
}
|
||||
|
||||
function traverseObject(obj, parent, filter) {
|
||||
for (const key in obj) {
|
||||
const child = createNode();
|
||||
child.parent = parent;
|
||||
child.key = key;
|
||||
child.type = getType(obj[key]);
|
||||
child.depth = parent.depth + 1;
|
||||
child.expanded = false;
|
||||
if (Array.isArray(filter)) {
|
||||
for (const filtered of filter) {
|
||||
if (key === filtered) return;
|
||||
}
|
||||
}
|
||||
if (typeof obj[key] === 'object') {
|
||||
child.children = [];
|
||||
parent.children.push(child);
|
||||
traverseObject(obj[key], child, filter);
|
||||
child.elem = createExpandedElement(child);
|
||||
} else {
|
||||
child.value = obj[key];
|
||||
child.elem = createNotExpandedElement(child);
|
||||
parent.children.push(child);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function createTree(obj, title, filter) {
|
||||
const tree = createNode();
|
||||
tree.type = title;
|
||||
tree.key = title;
|
||||
tree.children = [];
|
||||
tree.expanded = true;
|
||||
traverseObject(obj, tree, filter);
|
||||
tree.elem = createExpandedElement(tree);
|
||||
return tree;
|
||||
}
|
||||
|
||||
function traverseTree(node, callback) {
|
||||
callback(node);
|
||||
if (node.children !== null) node.children.forEach((item) => traverseTree(item, callback));
|
||||
}
|
||||
|
||||
async function jsonView(json, element, title = '', filter = []) {
|
||||
const tree = createTree(json, title, filter);
|
||||
traverseTree(tree, (node) => {
|
||||
if (!node.expanded) node.hideChildren();
|
||||
element.appendChild(node.elem);
|
||||
});
|
||||
}
|
||||
|
||||
export default jsonView;
|
|
@ -19,7 +19,7 @@ function createCSS() {
|
|||
if (CSScreated) return;
|
||||
const css = `
|
||||
:root { --rounded: 0.1rem; }
|
||||
.menu { position: absolute; top: 0rem; right: 0; min-width: 180px; width: max-content; padding: 0.2rem 0.2rem 0 0.2rem; line-height: 1.8rem; z-index: 10; background: ${theme.background}; border: none }
|
||||
.menu { position: absolute; top: 0rem; right: 0; min-width: 180px; width: max-content; padding: 0.2rem 0.8rem 0 0.8rem; line-height: 1.8rem; z-index: 10; background: ${theme.background}; border: none }
|
||||
.button { text-shadow: none; }
|
||||
|
||||
.menu-container { display: block; max-height: 100vh; }
|
||||
|
@ -44,7 +44,7 @@ function createCSS() {
|
|||
.menu-button:hover { background: ${theme.buttonHover}; box-shadow: 4px 4px 4px 0 black; }
|
||||
.menu-button:focus { outline: none; }
|
||||
|
||||
.menu-checkbox { width: 2.6rem; height: 1rem; background: ${theme.itemBackground}; margin: 0.5rem 0.5rem 0 0; position: relative; border-radius: var(--rounded); }
|
||||
.menu-checkbox { width: 2.6rem; height: 1rem; background: ${theme.itemBackground}; margin: 0.5rem 1.0rem 0 0; position: relative; border-radius: var(--rounded); }
|
||||
.menu-checkbox:after { content: 'OFF'; color: ${theme.checkboxOff}; position: absolute; right: 0.2rem; top: -0.4rem; font-weight: 800; font-size: 0.5rem; }
|
||||
.menu-checkbox:before { content: 'ON'; color: ${theme.checkboxOn}; position: absolute; left: 0.3rem; top: -0.4rem; font-weight: 800; font-size: 0.5rem; }
|
||||
.menu-checkbox-label { width: 1.3rem; height: 1rem; cursor: pointer; position: absolute; top: 0; left: 0rem; z-index: 1; background: ${theme.checkboxOff};
|
||||
|
@ -53,7 +53,7 @@ function createCSS() {
|
|||
input[type=checkbox] { visibility: hidden; }
|
||||
input[type=checkbox]:checked + label { left: 1.4rem; background: ${theme.checkboxOn}; }
|
||||
|
||||
.menu-range { margin: 0.2rem 0.5rem 0 0; width: 3.5rem; background: transparent; color: ${theme.rangeBackground}; }
|
||||
.menu-range { margin: 0.2rem 1.0rem 0 0; width: 5rem; background: transparent; color: ${theme.rangeBackground}; }
|
||||
.menu-range:before { color: ${theme.rangeLabel}; margin: 0 0.4rem 0 0; font-weight: 800; font-size: 0.6rem; position: relative; top: 0.3rem; content: attr(value); }
|
||||
|
||||
input[type=range] { -webkit-appearance: none; }
|
||||
|
@ -84,6 +84,7 @@ class Menu {
|
|||
}
|
||||
|
||||
createMenu(parent, title = '', position = { top: null, left: null, bottom: null, right: null }) {
|
||||
/** @type {HTMLDivElement} */
|
||||
this.menu = document.createElement('div');
|
||||
this.menu.id = `menu-${instance}`;
|
||||
this.menu.className = 'menu';
|
||||
|
@ -131,11 +132,11 @@ class Menu {
|
|||
}
|
||||
|
||||
get width() {
|
||||
return this.menu.offsetWidth || 0;
|
||||
return this.menu ? this.menu.offsetWidth : 0;
|
||||
}
|
||||
|
||||
get height() {
|
||||
return this.menu.offsetHeight || 0;
|
||||
return this.menu ? this.menu.offsetHeight : 0;
|
||||
}
|
||||
|
||||
hide() {
|
||||
|
@ -203,8 +204,10 @@ class Menu {
|
|||
el.innerHTML = `<div class="menu-checkbox"><input class="menu-checkbox" type="checkbox" id="${this.newID}" ${object[variable] ? 'checked' : ''}/><label class="menu-checkbox-label" for="${this.ID}"></label></div>${title}`;
|
||||
if (this.container) this.container.appendChild(el);
|
||||
el.addEventListener('change', (evt) => {
|
||||
object[variable] = evt.target.checked;
|
||||
if (callback) callback(evt.target.checked);
|
||||
if (evt.target) {
|
||||
object[variable] = evt.target['checked'];
|
||||
if (callback) callback(evt.target['checked']);
|
||||
}
|
||||
});
|
||||
return el;
|
||||
}
|
||||
|
@ -217,13 +220,13 @@ class Menu {
|
|||
const def = item === selected ? 'selected' : '';
|
||||
options += `<option value="${item}" ${def}>${item}</option>`;
|
||||
}
|
||||
el.innerHTML = `<div class="menu-list"><select name="${this.ID}" class="menu-list-item">${options}</select><label for="${this.ID}"></label></div>${title}`;
|
||||
el.innerHTML = `<div class="menu-list"><select name="${this.ID}" title="${title}" class="menu-list-item">${options}</select><label for="${this.ID}"></label></div>${title}`;
|
||||
el.style.fontFamily = document.body.style.fontFamily;
|
||||
el.style.fontSize = document.body.style.fontSize;
|
||||
el.style.fontVariant = document.body.style.fontVariant;
|
||||
if (this.container) this.container.appendChild(el);
|
||||
el.addEventListener('change', (evt) => {
|
||||
if (callback) callback(items[evt.target.selectedIndex]);
|
||||
if (callback && evt.target) callback(items[evt.target['selectedIndex']]);
|
||||
});
|
||||
return el;
|
||||
}
|
||||
|
@ -231,16 +234,16 @@ class Menu {
|
|||
addRange(title, object, variable, min, max, step, callback) {
|
||||
const el = document.createElement('div');
|
||||
el.className = 'menu-item';
|
||||
el.innerHTML = `<input class="menu-range" type="range" id="${this.newID}" min="${min}" max="${max}" step="${step}" value="${object[variable]}">${title}`;
|
||||
el.innerHTML = `<input class="menu-range" type="range" title="${title}" id="${this.newID}" min="${min}" max="${max}" step="${step}" value="${object[variable]}">${title}`;
|
||||
if (this.container) this.container.appendChild(el);
|
||||
el.addEventListener('change', (evt) => {
|
||||
if (evt.target) {
|
||||
object[variable] = parseInt(evt.target.value) === parseFloat(evt.target.value) ? parseInt(evt.target.value) : parseFloat(evt.target.value);
|
||||
evt.target.setAttribute('value', evt.target.value);
|
||||
if (callback) callback(evt.target.value);
|
||||
object[variable] = parseInt(evt.target['value']) === parseFloat(evt.target['value']) ? parseInt(evt.target['value']) : parseFloat(evt.target['value']);
|
||||
evt.target.setAttribute('value', evt.target['value']);
|
||||
if (callback) callback(evt.target['value']);
|
||||
}
|
||||
});
|
||||
el.input = el.children[0];
|
||||
el['input'] = el.children[0];
|
||||
return el;
|
||||
}
|
||||
|
||||
|
@ -280,7 +283,6 @@ class Menu {
|
|||
return el;
|
||||
}
|
||||
|
||||
// eslint-disable-next-line class-methods-use-this
|
||||
updateValue(title, val, suffix = '') {
|
||||
const el = document.getElementById(`menu-val-${title}`);
|
||||
if (el) el.innerText = `${title}: ${val}${suffix}`;
|
||||
|
@ -297,12 +299,13 @@ class Menu {
|
|||
return el;
|
||||
}
|
||||
|
||||
// eslint-disable-next-line class-methods-use-this
|
||||
async updateChart(id, values) {
|
||||
if (!values || (values.length === 0)) return;
|
||||
/** @type {HTMLCanvasElement} */
|
||||
const canvas = document.getElementById(`menu-canvas-${id}`);
|
||||
if (!canvas) return;
|
||||
const ctx = canvas.getContext('2d');
|
||||
if (!ctx) return;
|
||||
ctx.fillStyle = theme.background;
|
||||
ctx.fillRect(0, 0, canvas.width, canvas.height);
|
||||
const width = canvas.width / values.length;
|
||||
|
@ -316,7 +319,7 @@ class Menu {
|
|||
ctx.fillRect(i * width, 0, width - 4, canvas.height);
|
||||
ctx.fillStyle = theme.background;
|
||||
ctx.font = `${width / 1.5}px "Segoe UI"`;
|
||||
ctx.fillText(Math.round(values[i]), i * width + 1, canvas.height - 1, width - 1);
|
||||
ctx.fillText(Math.round(values[i]).toString(), i * width + 1, canvas.height - 1, width - 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,11 +4,26 @@ async function log(...msg) {
|
|||
if (debug) {
|
||||
const dt = new Date();
|
||||
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
|
||||
// eslint-disable-next-line no-console
|
||||
console.log(ts, 'webrtc', ...msg);
|
||||
console.log(ts, 'webrtc', ...msg); // eslint-disable-line no-console
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* helper implementation of webrtc
|
||||
* performs:
|
||||
* - discovery
|
||||
* - handshake
|
||||
* - connct to webrtc stream
|
||||
* - assign webrtc stream to video element
|
||||
*
|
||||
* for development purposes i'm using test webrtc server that reads rtsp stream from a security camera:
|
||||
* <https://github.com/vladmandic/stream-rtsp>
|
||||
*
|
||||
* @param {string} server
|
||||
* @param {string} streamName
|
||||
* @param {HTMLVideoElement} elementName
|
||||
* @return {promise}
|
||||
*/
|
||||
async function webRTC(server, streamName, elementName) {
|
||||
const suuid = streamName;
|
||||
log('client starting');
|
||||
|
|
|
@ -0,0 +1,133 @@
|
|||
/**
|
||||
* PWA Service Worker for Human main demo
|
||||
*/
|
||||
|
||||
/* eslint-disable no-restricted-globals */
|
||||
/// <reference lib="webworker" />
|
||||
|
||||
const skipCaching = false;
|
||||
|
||||
const cacheName = 'Human';
|
||||
const cacheFiles = ['/favicon.ico', 'manifest.webmanifest']; // assets and models are cached on first access
|
||||
|
||||
let cacheModels = true; // *.bin; *.json
|
||||
let cacheWASM = true; // *.wasm
|
||||
let cacheOther = false; // *
|
||||
|
||||
let listening = false;
|
||||
const stats = { hit: 0, miss: 0 };
|
||||
|
||||
const log = (...msg) => {
|
||||
const dt = new Date();
|
||||
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
|
||||
console.log(ts, 'pwa', ...msg); // eslint-disable-line no-console
|
||||
};
|
||||
|
||||
async function updateCached(req) {
|
||||
fetch(req)
|
||||
.then((update) => {
|
||||
// update cache if request is ok
|
||||
if (update.ok) {
|
||||
caches // eslint-disable-line promise/no-nesting
|
||||
.open(cacheName)
|
||||
.then((cache) => cache.put(req, update))
|
||||
.catch((err) => log('cache update error', err));
|
||||
}
|
||||
return true;
|
||||
})
|
||||
.catch((err) => {
|
||||
log('fetch error', err);
|
||||
return false;
|
||||
});
|
||||
}
|
||||
|
||||
async function getCached(evt) {
|
||||
// just fetch
|
||||
if (skipCaching) return fetch(evt.request);
|
||||
|
||||
// get from cache or fetch if not in cache
|
||||
let found = await caches.match(evt.request);
|
||||
if (found && found.ok) {
|
||||
stats.hit += 1;
|
||||
} else {
|
||||
stats.miss += 1;
|
||||
found = await fetch(evt.request);
|
||||
}
|
||||
|
||||
// if still don't have it, return offline page
|
||||
if (!found || !found.ok) {
|
||||
found = await caches.match('offline.html');
|
||||
}
|
||||
|
||||
// update cache in the background
|
||||
if (found && found.type === 'basic' && found.ok) {
|
||||
const uri = new URL(evt.request.url);
|
||||
if (uri.pathname.endsWith('.bin') || uri.pathname.endsWith('.json')) {
|
||||
if (cacheModels) updateCached(evt.request);
|
||||
} else if (uri.pathname.endsWith('.wasm')) {
|
||||
if (cacheWASM) updateCached(evt.request);
|
||||
} else if (cacheOther) {
|
||||
updateCached(evt.request);
|
||||
}
|
||||
}
|
||||
|
||||
return found;
|
||||
}
|
||||
|
||||
function cacheInit() {
|
||||
caches.open(cacheName)
|
||||
.then((cache) => cache.addAll(cacheFiles) // eslint-disable-line promise/no-nesting
|
||||
.then(
|
||||
() => log('cache refresh:', cacheFiles.length, 'files'),
|
||||
(err) => log('cache error', err),
|
||||
))
|
||||
.catch(() => log('cache error'));
|
||||
}
|
||||
|
||||
if (!listening) {
|
||||
// get messages from main app to update configuration
|
||||
self.addEventListener('message', (evt) => {
|
||||
log('event message:', evt.data);
|
||||
switch (evt.data.key) {
|
||||
case 'cacheModels': cacheModels = evt.data.val; break;
|
||||
case 'cacheWASM': cacheWASM = evt.data.val; break;
|
||||
case 'cacheOther': cacheOther = evt.data.val; break;
|
||||
default:
|
||||
}
|
||||
});
|
||||
|
||||
self.addEventListener('install', (evt) => {
|
||||
log('install');
|
||||
self.skipWaiting();
|
||||
evt.waitUntil(cacheInit);
|
||||
});
|
||||
|
||||
self.addEventListener('activate', (evt) => {
|
||||
log('activate');
|
||||
evt.waitUntil(self.clients.claim());
|
||||
});
|
||||
|
||||
self.addEventListener('fetch', (evt) => {
|
||||
const uri = new URL(evt.request.url);
|
||||
// if (uri.pathname === '/') { log('cache skip /', evt.request); return; } // skip root access requests
|
||||
if (evt.request.cache === 'only-if-cached' && evt.request.mode !== 'same-origin') return; // required due to chrome bug
|
||||
if (uri.origin !== self.location.origin) return; // skip non-local requests
|
||||
if (evt.request.method !== 'GET') return; // only cache get requests
|
||||
if (evt.request.url.includes('/api/')) return; // don't cache api requests, failures are handled at the time of call
|
||||
|
||||
const response = getCached(evt);
|
||||
if (response) evt.respondWith(response);
|
||||
else log('fetch response missing');
|
||||
});
|
||||
|
||||
// only trigger controllerchange once
|
||||
let refreshed = false;
|
||||
self.addEventListener('controllerchange', (evt) => {
|
||||
log(`PWA: ${evt.type}`);
|
||||
if (refreshed) return;
|
||||
refreshed = true;
|
||||
self.location.reload();
|
||||
});
|
||||
|
||||
listening = true;
|
||||
}
|
|
@ -1,30 +1,37 @@
|
|||
import Human from '../dist/human.esm.js';
|
||||
/**
|
||||
* Web worker used by main demo app
|
||||
* Loaded from index.js
|
||||
*/
|
||||
|
||||
/// <reference lib="webworker"/>
|
||||
|
||||
// load Human using IIFE script as Chome Mobile does not support Modules as Workers
|
||||
self.importScripts('../dist/human.js'); // eslint-disable-line no-restricted-globals
|
||||
|
||||
let busy = false;
|
||||
const human = new Human();
|
||||
// eslint-disable-next-line new-cap, no-undef
|
||||
const human = new Human.default();
|
||||
|
||||
function log(...msg) {
|
||||
const dt = new Date();
|
||||
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
|
||||
// eslint-disable-next-line no-console
|
||||
if (msg) console.log(ts, 'Human:', ...msg);
|
||||
}
|
||||
|
||||
onmessage = async (msg) => {
|
||||
onmessage = async (msg) => { // receive message from main thread
|
||||
if (busy) return;
|
||||
busy = true;
|
||||
// received from index.js using:
|
||||
// worker.postMessage({ image: image.data.buffer, width: canvas.width, height: canvas.height, config }, [image.data.buffer]);
|
||||
const image = new ImageData(new Uint8ClampedArray(msg.data.image), msg.data.width, msg.data.height);
|
||||
let result = {};
|
||||
try {
|
||||
result = await human.detect(image, msg.data.userConfig);
|
||||
} catch (err) {
|
||||
result.error = err.message;
|
||||
log('worker thread error:', err.message);
|
||||
result = await human.detect(image, msg.data.userConfig);
|
||||
result.tensors = human.tf.engine().state.numTensors; // append to result object so main thread get info
|
||||
result.backend = human.tf.getBackend(); // append to result object so main thread get info
|
||||
if (result.canvas) { // convert canvas to imageData and send it by reference
|
||||
const canvas = new OffscreenCanvas(result.canvas.width, result.canvas.height);
|
||||
const ctx = canvas.getContext('2d');
|
||||
if (ctx) ctx.drawImage(result.canvas, 0, 0);
|
||||
const img = ctx ? ctx.getImageData(0, 0, result.canvas.width, result.canvas.height) : null;
|
||||
result.canvas = null; // must strip original canvas from return value as it cannot be transfered from worker thread
|
||||
if (img) postMessage({ result, image: img.data.buffer, width: msg.data.width, height: msg.data.height }, [img.data.buffer]);
|
||||
else postMessage({ result }); // send message back to main thread with canvas
|
||||
} else {
|
||||
postMessage({ result }); // send message back to main thread without canvas
|
||||
}
|
||||
// must strip canvas from return value as it cannot be transfered from worker thread
|
||||
if (result.canvas) result.canvas = null;
|
||||
// @ts-ignore tslint wrong type matching for worker
|
||||
postMessage({ result });
|
||||
busy = false;
|
||||
};
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>Human</title>
|
||||
<meta http-equiv="content-type" content="text/html; charset=utf-8">
|
||||
<meta name="viewport" content="width=device-width" id="viewport">
|
||||
<meta name="keywords" content="Human">
|
||||
<meta name="application-name" content="Human">
|
||||
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||
<meta name="theme-color" content="#000000">
|
||||
<link rel="manifest" href="./manifest.webmanifest">
|
||||
<link rel="shortcut icon" href="../favicon.ico" type="image/x-icon">
|
||||
<link rel="apple-touch-icon" href="../assets/icon.png">
|
||||
|
@ -34,7 +35,7 @@
|
|||
.video { display: none; }
|
||||
.canvas { margin: 0 auto; }
|
||||
.bench { position: absolute; right: 0; bottom: 0; }
|
||||
.compare-image { width: 200px; position: absolute; top: 150px; left: 30px; box-shadow: 0 0 2px 2px black; background: black; }
|
||||
.compare-image { width: 200px; position: absolute; top: 150px; left: 30px; box-shadow: 0 0 2px 2px black; background: black; display: none; }
|
||||
.loader { width: 300px; height: 300px; border: 3px solid transparent; border-radius: 50%; border-top: 4px solid #f15e41; animation: spin 4s linear infinite; position: absolute; bottom: 15%; left: 50%; margin-left: -150px; z-index: 15; }
|
||||
.loader::before, .loader::after { content: ""; position: absolute; top: 6px; bottom: 6px; left: 6px; right: 6px; border-radius: 50%; border: 4px solid transparent; }
|
||||
.loader::before { border-top-color: #bad375; animation: 3s spin linear infinite; }
|
||||
|
@ -63,33 +64,59 @@
|
|||
|
||||
.icon { width: 180px; text-align: -webkit-center; text-align: -moz-center; filter: grayscale(1); }
|
||||
.icon:hover { background: #505050; filter: grayscale(0); }
|
||||
.hint { opacity: 0; transition-duration: 0.5s; transition-property: opacity; font-style: italic; position: fixed; top: 5rem; padding: 8px; margin: 8px; box-shadow: 0 0 2px 2px #303030; }
|
||||
.input-file { align-self: center; width: 5rem; }
|
||||
|
||||
.results { position: absolute; left: 0; top: 5rem; background: #303030; width: 20rem; height: 90%; font-size: 0.8rem; overflow-y: auto; display: none }
|
||||
.results::-webkit-scrollbar { background-color: #303030; }
|
||||
.results::-webkit-scrollbar-thumb { background: black; border-radius: 10px; }
|
||||
.json-line { margin: 4px 0; display: flex; justify-content: flex-start; }
|
||||
.json { margin-right: 8px; margin-left: 8px; }
|
||||
.json-type { color: lightyellow; }
|
||||
.json-key { color: white; }
|
||||
.json-index { color: lightcoral; }
|
||||
.json-value { margin-left: 20px; }
|
||||
.json-number { color: lightgreen; }
|
||||
.json-boolean { color: lightyellow; }
|
||||
.json-string { color: lightblue; }
|
||||
.json-size { color: gray; }
|
||||
.hide { display: none; }
|
||||
.fas { display: inline-block; width: 0; height: 0; border-style: solid; }
|
||||
.fa-caret-down { border-width: 10px 8px 0 8px; border-color: white transparent }
|
||||
.fa-caret-right { border-width: 10px 0 8px 10px; border-color: transparent transparent transparent white; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div id="play" class="play icon-play"></div>
|
||||
<div id="background">
|
||||
<div class='wave one'></div>
|
||||
<div class='wave two'></div>
|
||||
<div class='wave three'></div>
|
||||
<div class="wave one"></div>
|
||||
<div class="wave two"></div>
|
||||
<div class="wave three"></div>
|
||||
</div>
|
||||
<div id="loader" class="loader"></div>
|
||||
<div id="status" class="status"></div>
|
||||
<div id="menubar" class="menubar">
|
||||
<div id="btnDisplay" class="icon"><div class="icon-binoculars"> </div>display options</div>
|
||||
<div id="btnImage" class="icon"><div class="icon-brush"></div>image processing</div>
|
||||
<div id="btnProcess" class="icon"><div class="icon-stats"></div>model processing</div>
|
||||
<div id="btnModel" class="icon"><div class="icon-games"></div>model selection</div>
|
||||
<div id="btnDisplay" class="icon"><div class="icon-binoculars"> </div>display</div>
|
||||
<div id="btnImage" class="icon"><div class="icon-brush"></div>input</div>
|
||||
<div id="btnProcess" class="icon"><div class="icon-stats"></div>options</div>
|
||||
<div id="btnModel" class="icon"><div class="icon-games"></div>models</div>
|
||||
<div id="btnStart" class="icon"><div class="icon-webcam"></div><span id="btnStartText">start video</span></div>
|
||||
</div>
|
||||
<div id="media">
|
||||
<canvas id="canvas" class="canvas"></canvas>
|
||||
<video id="video" playsinline class="video"></video>
|
||||
</div>
|
||||
<div id="compare-container" style="display: none" class="compare-image">
|
||||
<div id="compare-container" class="compare-image">
|
||||
<canvas id="compare-canvas" width="200" height="200"></canvas>
|
||||
<div id="similarity"></div>
|
||||
</div>
|
||||
<div id="segmentation-container" class="compare-image">
|
||||
<canvas id="segmentation-mask" width="256" height="256" style="width: 256px; height: 256px;"></canvas>
|
||||
<canvas id="segmentation-canvas" width="256" height="256" style="width: 256px; height: 256px;"></canvas>
|
||||
</div>
|
||||
<div id="samples-container" class="samples-container"></div>
|
||||
<div id="hint" class="hint"></div>
|
||||
<div id="log" class="log"></div>
|
||||
<div id="results" class="results"></div>
|
||||
</body>
|
||||
</html>
|
||||
|
|
903
demo/index.js
|
@ -1,10 +1,10 @@
|
|||
{
|
||||
"name": "Human Library",
|
||||
"short_name": "Human",
|
||||
"icons": [{ "src": "../assets/icon.png", "sizes": "500x484", "type": "image/png", "purpose": "any maskable" }],
|
||||
"icons": [{ "src": "../assets/icon.png", "sizes": "512x512", "type": "image/png", "purpose": "any maskable" }],
|
||||
"start_url": "./index.html",
|
||||
"scope": "/",
|
||||
"display": "standalone",
|
||||
"background_color": "#000000",
|
||||
"theme_color": "#000000"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
# Human Multithreading Demos
|
||||
|
||||
- **Browser** demo `multithread` & `worker`
|
||||
Runs each `human` module in a separate web worker for highest possible performance
|
||||
- **NodeJS** demo `node-multiprocess` & `node-multiprocess-worker`
|
||||
Runs multiple parallel `human` by dispaching them to pool of pre-created worker processes
|
||||
|
||||
<br><hr><br>
|
||||
|
||||
## NodeJS Multi-process Demo
|
||||
|
||||
`nodejs/node-multiprocess.js` and `nodejs/node-multiprocess-worker.js`: Demo using NodeJS with CommonJS module
|
||||
Demo that starts n child worker processes for parallel execution
|
||||
|
||||
```shell
|
||||
node demo/nodejs/node-multiprocess.js
|
||||
```
|
||||
|
||||
```json
|
||||
2021-06-01 08:54:19 INFO: @vladmandic/human version 2.0.0
|
||||
2021-06-01 08:54:19 INFO: User: vlado Platform: linux Arch: x64 Node: v16.0.0
|
||||
2021-06-01 08:54:19 INFO: Human multi-process test
|
||||
2021-06-01 08:54:19 STATE: Enumerated images: ./assets 15
|
||||
2021-06-01 08:54:19 STATE: Main: started worker: 130362
|
||||
2021-06-01 08:54:19 STATE: Main: started worker: 130363
|
||||
2021-06-01 08:54:19 STATE: Main: started worker: 130369
|
||||
2021-06-01 08:54:19 STATE: Main: started worker: 130370
|
||||
2021-06-01 08:54:20 STATE: Worker: PID: 130370 TensorFlow/JS 3.6.0 Human 2.0.0 Backend: tensorflow
|
||||
2021-06-01 08:54:20 STATE: Worker: PID: 130362 TensorFlow/JS 3.6.0 Human 2.0.0 Backend: tensorflow
|
||||
2021-06-01 08:54:20 STATE: Worker: PID: 130369 TensorFlow/JS 3.6.0 Human 2.0.0 Backend: tensorflow
|
||||
2021-06-01 08:54:20 STATE: Worker: PID: 130363 TensorFlow/JS 3.6.0 Human 2.0.0 Backend: tensorflow
|
||||
2021-06-01 08:54:21 STATE: Main: dispatching to worker: 130370
|
||||
2021-06-01 08:54:21 INFO: Latency: worker initializtion: 1348 message round trip: 0
|
||||
2021-06-01 08:54:21 DATA: Worker received message: 130370 { test: true }
|
||||
2021-06-01 08:54:21 STATE: Main: dispatching to worker: 130362
|
||||
2021-06-01 08:54:21 DATA: Worker received message: 130362 { image: 'samples/ai-face.jpg' }
|
||||
2021-06-01 08:54:21 DATA: Worker received message: 130370 { image: 'samples/ai-body.jpg' }
|
||||
2021-06-01 08:54:21 STATE: Main: dispatching to worker: 130369
|
||||
2021-06-01 08:54:21 STATE: Main: dispatching to worker: 130363
|
||||
2021-06-01 08:54:21 DATA: Worker received message: 130369 { image: 'assets/human-sample-upper.jpg' }
|
||||
2021-06-01 08:54:21 DATA: Worker received message: 130363 { image: 'assets/sample-me.jpg' }
|
||||
2021-06-01 08:54:24 DATA: Main: worker finished: 130362 detected faces: 1 bodies: 1 hands: 0 objects: 1
|
||||
2021-06-01 08:54:24 STATE: Main: dispatching to worker: 130362
|
||||
2021-06-01 08:54:24 DATA: Worker received message: 130362 { image: 'assets/sample1.jpg' }
|
||||
2021-06-01 08:54:25 DATA: Main: worker finished: 130369 detected faces: 1 bodies: 1 hands: 0 objects: 1
|
||||
2021-06-01 08:54:25 STATE: Main: dispatching to worker: 130369
|
||||
2021-06-01 08:54:25 DATA: Main: worker finished: 130370 detected faces: 1 bodies: 1 hands: 0 objects: 1
|
||||
2021-06-01 08:54:25 STATE: Main: dispatching to worker: 130370
|
||||
2021-06-01 08:54:25 DATA: Worker received message: 130369 { image: 'assets/sample2.jpg' }
|
||||
2021-06-01 08:54:25 DATA: Main: worker finished: 130363 detected faces: 1 bodies: 1 hands: 0 objects: 2
|
||||
2021-06-01 08:54:25 STATE: Main: dispatching to worker: 130363
|
||||
2021-06-01 08:54:25 DATA: Worker received message: 130370 { image: 'assets/sample3.jpg' }
|
||||
2021-06-01 08:54:25 DATA: Worker received message: 130363 { image: 'assets/sample4.jpg' }
|
||||
2021-06-01 08:54:30 DATA: Main: worker finished: 130362 detected faces: 3 bodies: 1 hands: 0 objects: 7
|
||||
2021-06-01 08:54:30 STATE: Main: dispatching to worker: 130362
|
||||
2021-06-01 08:54:30 DATA: Worker received message: 130362 { image: 'assets/sample5.jpg' }
|
||||
2021-06-01 08:54:31 DATA: Main: worker finished: 130369 detected faces: 3 bodies: 1 hands: 0 objects: 5
|
||||
2021-06-01 08:54:31 STATE: Main: dispatching to worker: 130369
|
||||
2021-06-01 08:54:31 DATA: Worker received message: 130369 { image: 'assets/sample6.jpg' }
|
||||
2021-06-01 08:54:31 DATA: Main: worker finished: 130363 detected faces: 4 bodies: 1 hands: 2 objects: 2
|
||||
2021-06-01 08:54:31 STATE: Main: dispatching to worker: 130363
|
||||
2021-06-01 08:54:39 STATE: Main: worker exit: 130370 0
|
||||
2021-06-01 08:54:39 DATA: Main: worker finished: 130362 detected faces: 1 bodies: 1 hands: 0 objects: 1
|
||||
2021-06-01 08:54:39 DATA: Main: worker finished: 130369 detected faces: 1 bodies: 1 hands: 1 objects: 3
|
||||
2021-06-01 08:54:39 STATE: Main: worker exit: 130362 0
|
||||
2021-06-01 08:54:39 STATE: Main: worker exit: 130369 0
|
||||
2021-06-01 08:54:41 DATA: Main: worker finished: 130363 detected faces: 9 bodies: 1 hands: 0 objects: 10
|
||||
2021-06-01 08:54:41 STATE: Main: worker exit: 130363 0
|
||||
2021-06-01 08:54:41 INFO: Processed: 15 images in total: 22006 ms working: 20658 ms average: 1377 ms
|
||||
```
|
|
@ -0,0 +1,33 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>Human</title>
|
||||
<meta name="viewport" content="width=device-width" id="viewport">
|
||||
<meta name="keywords" content="Human">
|
||||
<meta name="application-name" content="Human">
|
||||
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||
<meta name="theme-color" content="#000000">
|
||||
<link rel="manifest" href="../manifest.webmanifest">
|
||||
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
|
||||
<link rel="apple-touch-icon" href="../../assets/icon.png">
|
||||
<script src="./index.js" type="module"></script>
|
||||
<style>
|
||||
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
|
||||
html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
|
||||
body { margin: 0; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
|
||||
body::-webkit-scrollbar { display: none; }
|
||||
.status { position: absolute; width: 100vw; bottom: 10%; text-align: center; font-size: 3rem; font-weight: 100; text-shadow: 2px 2px #303030; }
|
||||
.log { position: absolute; bottom: 0; margin: 0.4rem 0.4rem 0 0.4rem; font-size: 0.9rem; }
|
||||
.video { display: none; }
|
||||
.canvas { margin: 0 auto; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div id="status" class="status"></div>
|
||||
<canvas id="canvas" class="canvas"></canvas>
|
||||
<video id="video" playsinline class="video"></video>
|
||||
<div id="log" class="log"></div>
|
||||
</body>
|
||||
</html>
|
|
@ -0,0 +1,264 @@
|
|||
/**
|
||||
* Human demo for browsers
|
||||
*
|
||||
* @description Demo app that enables all Human modules and runs them in separate worker threads
|
||||
*
|
||||
*/
|
||||
|
||||
import { Human } from '../../dist/human.esm.js'; // equivalent of @vladmandic/human
|
||||
import GLBench from '../helpers/gl-bench.js';
|
||||
|
||||
const workerJS = './worker.js';
|
||||
|
||||
const config = {
|
||||
main: { // processes input and runs gesture analysis
|
||||
warmup: 'none',
|
||||
backend: 'webgl',
|
||||
modelBasePath: '../../models/',
|
||||
async: false,
|
||||
filter: { enabled: true },
|
||||
face: { enabled: false },
|
||||
object: { enabled: false },
|
||||
gesture: { enabled: true },
|
||||
hand: { enabled: false },
|
||||
body: { enabled: false },
|
||||
segmentation: { enabled: false },
|
||||
},
|
||||
face: { // runs all face models
|
||||
warmup: 'none',
|
||||
backend: 'webgl',
|
||||
modelBasePath: '../../models/',
|
||||
async: false,
|
||||
filter: { enabled: false },
|
||||
face: { enabled: true },
|
||||
object: { enabled: false },
|
||||
gesture: { enabled: false },
|
||||
hand: { enabled: false },
|
||||
body: { enabled: false },
|
||||
segmentation: { enabled: false },
|
||||
},
|
||||
body: { // runs body model
|
||||
warmup: 'none',
|
||||
backend: 'webgl',
|
||||
modelBasePath: '../../models/',
|
||||
async: false,
|
||||
filter: { enabled: false },
|
||||
face: { enabled: false },
|
||||
object: { enabled: false },
|
||||
gesture: { enabled: false },
|
||||
hand: { enabled: false },
|
||||
body: { enabled: true },
|
||||
segmentation: { enabled: false },
|
||||
},
|
||||
hand: { // runs hands model
|
||||
warmup: 'none',
|
||||
backend: 'webgl',
|
||||
modelBasePath: '../../models/',
|
||||
async: false,
|
||||
filter: { enabled: false },
|
||||
face: { enabled: false },
|
||||
object: { enabled: false },
|
||||
gesture: { enabled: false },
|
||||
hand: { enabled: true },
|
||||
body: { enabled: false },
|
||||
segmentation: { enabled: false },
|
||||
},
|
||||
object: { // runs object model
|
||||
warmup: 'none',
|
||||
backend: 'webgl',
|
||||
modelBasePath: '../../models/',
|
||||
async: false,
|
||||
filter: { enabled: false },
|
||||
face: { enabled: false },
|
||||
object: { enabled: true },
|
||||
gesture: { enabled: false },
|
||||
hand: { enabled: false },
|
||||
body: { enabled: false },
|
||||
segmentation: { enabled: false },
|
||||
},
|
||||
};
|
||||
|
||||
let human;
|
||||
let canvas;
|
||||
let video;
|
||||
let bench;
|
||||
|
||||
const busy = {
|
||||
face: false,
|
||||
hand: false,
|
||||
body: false,
|
||||
object: false,
|
||||
};
|
||||
|
||||
const workers = {
|
||||
/** @type {Worker | null} */
|
||||
face: null,
|
||||
/** @type {Worker | null} */
|
||||
body: null,
|
||||
/** @type {Worker | null} */
|
||||
hand: null,
|
||||
/** @type {Worker | null} */
|
||||
object: null,
|
||||
};
|
||||
|
||||
const time = {
|
||||
main: 0,
|
||||
draw: 0,
|
||||
face: '[warmup]',
|
||||
body: '[warmup]',
|
||||
hand: '[warmup]',
|
||||
object: '[warmup]',
|
||||
};
|
||||
|
||||
const start = {
|
||||
main: 0,
|
||||
draw: 0,
|
||||
face: 0,
|
||||
body: 0,
|
||||
hand: 0,
|
||||
object: 0,
|
||||
};
|
||||
|
||||
const result = { // initialize empty result object which will be partially filled with results from each thread
|
||||
performance: {},
|
||||
hand: [],
|
||||
body: [],
|
||||
face: [],
|
||||
object: [],
|
||||
};
|
||||
|
||||
function log(...msg) {
|
||||
const dt = new Date();
|
||||
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
|
||||
console.log(ts, ...msg); // eslint-disable-line no-console
|
||||
}
|
||||
|
||||
async function drawResults() {
|
||||
start.draw = human.now();
|
||||
const interpolated = human.next(result);
|
||||
await human.draw.all(canvas, interpolated);
|
||||
time.draw = Math.round(1 + human.now() - start.draw);
|
||||
const fps = Math.round(10 * 1000 / time.main) / 10;
|
||||
const draw = Math.round(10 * 1000 / time.draw) / 10;
|
||||
const div = document.getElementById('log');
|
||||
if (div) div.innerText = `Human: version ${human.version} | Performance: Main ${time.main}ms Face: ${time.face}ms Body: ${time.body}ms Hand: ${time.hand}ms Object ${time.object}ms | FPS: ${fps} / ${draw}`;
|
||||
requestAnimationFrame(drawResults);
|
||||
}
|
||||
|
||||
async function receiveMessage(msg) {
|
||||
result[msg.data.type] = msg.data.result;
|
||||
busy[msg.data.type] = false;
|
||||
time[msg.data.type] = Math.round(human.now() - start[msg.data.type]);
|
||||
}
|
||||
|
||||
async function runDetection() {
|
||||
start.main = human.now();
|
||||
if (!bench) {
|
||||
bench = new GLBench(null, { trackGPU: false, chartHz: 20, chartLen: 20 });
|
||||
bench.begin('human');
|
||||
}
|
||||
const ctx = canvas.getContext('2d');
|
||||
ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
|
||||
const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
|
||||
if (!busy.face) {
|
||||
busy.face = true;
|
||||
start.face = human.now();
|
||||
if (workers.face) workers.face.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.face, type: 'face' }, [imageData.data.buffer.slice(0)]);
|
||||
}
|
||||
if (!busy.body) {
|
||||
busy.body = true;
|
||||
start.body = human.now();
|
||||
if (workers.body) workers.body.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.body, type: 'body' }, [imageData.data.buffer.slice(0)]);
|
||||
}
|
||||
if (!busy.hand) {
|
||||
busy.hand = true;
|
||||
start.hand = human.now();
|
||||
if (workers.hand) workers.hand.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.hand, type: 'hand' }, [imageData.data.buffer.slice(0)]);
|
||||
}
|
||||
if (!busy.object) {
|
||||
busy.object = true;
|
||||
start.object = human.now();
|
||||
if (workers.object) workers.object.postMessage({ image: imageData.data.buffer, width: canvas.width, height: canvas.height, config: config.object, type: 'object' }, [imageData.data.buffer.slice(0)]);
|
||||
}
|
||||
|
||||
time.main = Math.round(human.now() - start.main);
|
||||
|
||||
bench.nextFrame();
|
||||
requestAnimationFrame(runDetection);
|
||||
}
|
||||
|
||||
async function setupCamera() {
|
||||
video = document.getElementById('video');
|
||||
canvas = document.getElementById('canvas');
|
||||
const output = document.getElementById('log');
|
||||
let stream;
|
||||
const constraints = {
|
||||
audio: false,
|
||||
video: {
|
||||
facingMode: 'user',
|
||||
resizeMode: 'crop-and-scale',
|
||||
width: { ideal: document.body.clientWidth },
|
||||
aspectRatio: document.body.clientWidth / document.body.clientHeight,
|
||||
},
|
||||
};
|
||||
// enumerate devices for diag purposes
|
||||
navigator.mediaDevices.enumerateDevices()
|
||||
.then((devices) => log('enumerated devices:', devices))
|
||||
.catch(() => log('mediaDevices error'));
|
||||
log('camera constraints', constraints);
|
||||
try {
|
||||
stream = await navigator.mediaDevices.getUserMedia(constraints);
|
||||
} catch (err) {
|
||||
if (output) output.innerText += `\n${err.name}: ${err.message}`;
|
||||
log('camera error:', err);
|
||||
}
|
||||
if (stream) {
|
||||
const tracks = stream.getVideoTracks();
|
||||
log('enumerated viable tracks:', tracks);
|
||||
const track = stream.getVideoTracks()[0];
|
||||
const settings = track.getSettings();
|
||||
log('selected video source:', track, settings);
|
||||
} else {
|
||||
log('missing video stream');
|
||||
}
|
||||
const promise = !stream || new Promise((resolve) => {
|
||||
video.onloadeddata = () => {
|
||||
canvas.style.height = '100vh';
|
||||
canvas.width = video.videoWidth;
|
||||
canvas.height = video.videoHeight;
|
||||
video.play();
|
||||
resolve(true);
|
||||
};
|
||||
});
|
||||
// attach input to video element
|
||||
if (stream && video) video.srcObject = stream;
|
||||
return promise;
|
||||
}
|
||||
|
||||
async function startWorkers() {
|
||||
if (!workers.face) workers.face = new Worker(workerJS);
|
||||
if (!workers.body) workers.body = new Worker(workerJS);
|
||||
if (!workers.hand) workers.hand = new Worker(workerJS);
|
||||
if (!workers.object) workers.object = new Worker(workerJS);
|
||||
workers.face.onmessage = receiveMessage;
|
||||
workers.body.onmessage = receiveMessage;
|
||||
workers.hand.onmessage = receiveMessage;
|
||||
workers.object.onmessage = receiveMessage;
|
||||
}
|
||||
|
||||
async function main() {
|
||||
if (typeof Worker === 'undefined' || typeof OffscreenCanvas === 'undefined') {
|
||||
return;
|
||||
}
|
||||
|
||||
human = new Human(config.main);
|
||||
const div = document.getElementById('log');
|
||||
if (div) div.innerText = `Human: version ${human.version}`;
|
||||
|
||||
await startWorkers();
|
||||
await setupCamera();
|
||||
runDetection();
|
||||
drawResults();
|
||||
}
|
||||
|
||||
window.onload = main;
|
|
@ -1,17 +1,21 @@
|
|||
// @ts-nocheck
|
||||
/**
|
||||
* Human demo for NodeJS
|
||||
*
|
||||
* Used by node-multiprocess.js as an on-demand started worker process
|
||||
* Receives messages from parent process and sends results
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const log = require('@vladmandic/pilogger');
|
||||
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||
|
||||
// workers actual import tfjs and faceapi modules
|
||||
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
|
||||
const tf = require('@tensorflow/tfjs-node');
|
||||
const Human = require('../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
|
||||
// workers actual import tfjs and human modules
|
||||
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||
const Human = require('../../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
|
||||
|
||||
let human = null;
|
||||
|
||||
const myConfig = {
|
||||
backend: 'tensorflow',
|
||||
// backend: 'tensorflow',
|
||||
modelBasePath: 'file://models/',
|
||||
debug: false,
|
||||
async: true,
|
||||
|
@ -19,19 +23,19 @@ const myConfig = {
|
|||
enabled: true,
|
||||
detector: { enabled: true, rotation: false },
|
||||
mesh: { enabled: true },
|
||||
iris: { enabled: false },
|
||||
iris: { enabled: true },
|
||||
description: { enabled: true },
|
||||
emotion: { enabled: true },
|
||||
},
|
||||
hand: {
|
||||
enabled: false,
|
||||
enabled: true,
|
||||
},
|
||||
// body: { modelPath: 'blazepose.json', enabled: true },
|
||||
body: { enabled: false },
|
||||
object: { enabled: false },
|
||||
body: { enabled: true },
|
||||
object: { enabled: true },
|
||||
};
|
||||
|
||||
// read image from a file and create tensor to be used by faceapi
|
||||
// read image from a file and create tensor to be used by human
|
||||
// this way we don't need any monkey patches
|
||||
// you can add any pre-proocessing here such as resizing, etc.
|
||||
async function image(img) {
|
||||
|
@ -40,13 +44,15 @@ async function image(img) {
|
|||
return tensor;
|
||||
}
|
||||
|
||||
// actual faceapi detection
|
||||
// actual human detection
|
||||
async function detect(img) {
|
||||
const tensor = await image(img);
|
||||
const result = await human.detect(tensor);
|
||||
process.send({ image: img, detected: result }); // send results back to main
|
||||
process.send({ ready: true }); // send signal back to main that this worker is now idle and ready for next image
|
||||
tensor.dispose();
|
||||
if (process.send) { // check if ipc exists
|
||||
process.send({ image: img, detected: result }); // send results back to main
|
||||
process.send({ ready: true }); // send signal back to main that this worker is now idle and ready for next image
|
||||
}
|
||||
tf.dispose(tensor);
|
||||
}
|
||||
|
||||
async function main() {
|
||||
|
@ -57,8 +63,9 @@ async function main() {
|
|||
|
||||
// on worker start first initialize message handler so we don't miss any messages
|
||||
process.on('message', (msg) => {
|
||||
if (msg.exit) process.exit(); // if main told worker to exit
|
||||
if (msg.test) process.send({ test: true });
|
||||
// if main told worker to exit
|
||||
if (msg.exit && process.exit) process.exit(); // eslint-disable-line no-process-exit
|
||||
if (msg.test && process.send) process.send({ test: true });
|
||||
if (msg.image) detect(msg.image); // if main told worker to process image
|
||||
log.data('Worker received message:', process.pid, msg); // generic log
|
||||
});
|
||||
|
@ -68,11 +75,11 @@ async function main() {
|
|||
// wait until tf is ready
|
||||
await human.tf.ready();
|
||||
// pre-load models
|
||||
log.state('Worker: PID:', process.pid, `TensorFlow/JS ${human.tf.version_core} Human ${human.version} Backend: ${human.tf.getBackend()}`);
|
||||
log.state('Worker: PID:', process.pid, `TensorFlow/JS ${human.tf.version['tfjs-core']} Human ${human.version} Backend: ${human.tf.getBackend()}`);
|
||||
await human.load();
|
||||
|
||||
// now we're ready, so send message back to main that it knows it can use this worker
|
||||
process.send({ ready: true });
|
||||
if (process.send) process.send({ ready: true });
|
||||
}
|
||||
|
||||
main();
|
|
@ -1,13 +1,19 @@
|
|||
// @ts-nocheck
|
||||
/**
|
||||
* Human demo for NodeJS
|
||||
*
|
||||
* Uses NodeJS fork functionality with inter-processing-messaging
|
||||
* Starts a pool of worker processes and dispatch work items to each worker when they are available
|
||||
* Uses node-multiprocess-worker.js for actual processing
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
// eslint-disable-next-line import/no-extraneous-dependencies, node/no-unpublished-require
|
||||
const log = require('@vladmandic/pilogger'); // this is my simple logger with few extra features
|
||||
const child_process = require('child_process');
|
||||
// note that main process import faceapi or tfjs at all
|
||||
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||
const childProcess = require('child_process'); // eslint-disable-line camelcase
|
||||
// note that main process does not import human or tfjs at all, it's all done from worker process
|
||||
|
||||
const imgPathRoot = './demo'; // modify to include your sample images
|
||||
const workerFile = 'demo/multithread/node-multiprocess-worker.js';
|
||||
const imgPathRoot = './assets'; // modify to include your sample images
|
||||
const numWorkers = 4; // how many workers will be started
|
||||
const workers = []; // this holds worker processes
|
||||
const images = []; // this holds queue of enumerated images
|
||||
|
@ -33,14 +39,14 @@ function waitCompletion() {
|
|||
if (activeWorkers > 0) setImmediate(() => waitCompletion());
|
||||
else {
|
||||
t[1] = process.hrtime.bigint();
|
||||
log.info('Processed:', numImages, 'images in', 'total:', Math.trunc(parseInt(t[1] - t[0]) / 1000000), 'ms', 'working:', Math.trunc(parseInt(t[1] - t[2]) / 1000000), 'ms', 'average:', Math.trunc(parseInt(t[1] - t[2]) / numImages / 1000000), 'ms');
|
||||
log.info('Processed:', numImages, 'images in', 'total:', Math.trunc(Number(t[1] - t[0]) / 1000000), 'ms', 'working:', Math.trunc(Number(t[1] - t[2]) / 1000000), 'ms', 'average:', Math.trunc(Number(t[1] - t[2]) / numImages / 1000000), 'ms');
|
||||
}
|
||||
}
|
||||
|
||||
function measureLatency() {
|
||||
t[3] = process.hrtime.bigint();
|
||||
const latencyInitialization = Math.trunc(parseInt(t[2] - t[0]) / 1000 / 1000);
|
||||
const latencyRoundTrip = Math.trunc(parseInt(t[3] - t[2]) / 1000 / 1000);
|
||||
const latencyInitialization = Math.trunc(Number(t[2] - t[0]) / 1000 / 1000);
|
||||
const latencyRoundTrip = Math.trunc(Number(t[3] - t[2]) / 1000 / 1000);
|
||||
log.info('Latency: worker initializtion: ', latencyInitialization, 'message round trip:', latencyRoundTrip);
|
||||
}
|
||||
|
||||
|
@ -51,7 +57,7 @@ async function main() {
|
|||
});
|
||||
|
||||
log.header();
|
||||
log.info('FaceAPI multi-process test');
|
||||
log.info('Human multi-process test');
|
||||
|
||||
// enumerate all images into queue
|
||||
const dir = fs.readdirSync(imgPathRoot);
|
||||
|
@ -59,19 +65,22 @@ async function main() {
|
|||
if (imgFile.toLocaleLowerCase().endsWith('.jpg')) images.push(path.join(imgPathRoot, imgFile));
|
||||
}
|
||||
numImages = images.length;
|
||||
log.state('Enumerated images:', imgPathRoot, numImages);
|
||||
|
||||
t[0] = process.hrtime.bigint();
|
||||
t[1] = process.hrtime.bigint();
|
||||
t[2] = process.hrtime.bigint();
|
||||
// manage worker processes
|
||||
for (let i = 0; i < numWorkers; i++) {
|
||||
// create worker process
|
||||
workers[i] = await child_process.fork('demo/node-multiprocess-worker.js', ['special']);
|
||||
workers[i] = await childProcess.fork(workerFile, ['special']);
|
||||
// parse message that worker process sends back to main
|
||||
// if message is ready, dispatch next image in queue
|
||||
// if message is processing result, just print how many faces were detected
|
||||
// otherwise it's an unknown message
|
||||
workers[i].on('message', (msg) => {
|
||||
if (msg.ready) detect(workers[i]);
|
||||
else if (msg.image) log.data('Main: worker finished:', workers[i].pid, 'detected faces:', msg.detected.face?.length);
|
||||
else if (msg.image) log.data('Main: worker finished:', workers[i].pid, 'detected faces:', msg.detected.face?.length, 'bodies:', msg.detected.body?.length, 'hands:', msg.detected.hand?.length, 'objects:', msg.detected.object?.length);
|
||||
else if (msg.test) measureLatency();
|
||||
else log.data('Main: worker message:', workers[i].pid, msg);
|
||||
});
|
|
@ -0,0 +1,18 @@
|
|||
/// <reference lib="webworker" />
|
||||
|
||||
// load Human using IIFE script as Chome Mobile does not support Modules as Workers
|
||||
self.importScripts('../../dist/human.js'); // eslint-disable-line no-restricted-globals
|
||||
|
||||
let human;
|
||||
|
||||
onmessage = async (msg) => {
|
||||
// received from index.js using:
|
||||
// worker.postMessage({ image: image.data.buffer, width: canvas.width, height: canvas.height, config }, [image.data.buffer]);
|
||||
|
||||
// Human is registered as global namespace using IIFE script
|
||||
if (!human) human = new Human.default(msg.data.config); // eslint-disable-line no-undef, new-cap
|
||||
const image = new ImageData(new Uint8ClampedArray(msg.data.image), msg.data.width, msg.data.height);
|
||||
let result = {};
|
||||
result = await human.detect(image, msg.data.config);
|
||||
postMessage({ result: result[msg.data.type], type: msg.data.type });
|
||||
};
|
|
@ -1,66 +0,0 @@
|
|||
/*
|
||||
Unsupported sample of using external utility fswebcam to capture screenshot from attached webcam in regular intervals and process it using Human
|
||||
*/
|
||||
|
||||
const util = require('util');
|
||||
const log = require('@vladmandic/pilogger');
|
||||
// eslint-disable-next-line node/no-missing-require
|
||||
const nodeWebCam = require('node-webcam');
|
||||
// for NodeJS, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human
|
||||
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
|
||||
const tf = require('@tensorflow/tfjs-node'); // or const tf = require('@tensorflow/tfjs-node-gpu');
|
||||
// load specific version of Human library that matches TensorFlow mode
|
||||
const Human = require('../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
|
||||
|
||||
// options for node-webcam
|
||||
const optionsCamera = {
|
||||
callbackReturn: 'buffer', // this means whatever `fswebcam` writes to disk, no additional processing so it's fastest
|
||||
saveShots: false, // don't save processed frame to disk, note that temp file is still created by fswebcam thus recommendation for tmpfs
|
||||
};
|
||||
|
||||
// options for human
|
||||
const optionsHuman = {
|
||||
backend: 'tensorflow',
|
||||
modelBasePath: 'file://node_modules/@vladmandic/human/models/',
|
||||
};
|
||||
|
||||
const camera = nodeWebCam.create(optionsCamera);
|
||||
const capture = util.promisify(camera.capture);
|
||||
const human = new Human(optionsHuman);
|
||||
const results = [];
|
||||
|
||||
const buffer2tensor = human.tf.tidy((buffer) => {
|
||||
const decode = human.tf.node.decodeImage(buffer, 3);
|
||||
let expand;
|
||||
if (decode.shape[2] === 4) { // input is in rgba format, need to convert to rgb
|
||||
const channels = human.tf.split(decode, 4, 2); // tf.split(tensor, 4, 2); // split rgba to channels
|
||||
const rgb = human.tf.stack([channels[0], channels[1], channels[2]], 2); // stack channels back to rgb and ignore alpha
|
||||
expand = human.tf.reshape(rgb, [1, decode.shape[0], decode.shape[1], 3]); // move extra dim from the end of tensor and use it as batch number instead
|
||||
} else {
|
||||
expand = human.tf.expandDims(decode, 0); // inpur ia rgb so use as-is
|
||||
}
|
||||
const cast = human.tf.cast(expand, 'float32');
|
||||
return cast;
|
||||
});
|
||||
|
||||
async function process() {
|
||||
// trigger next frame every 5 sec
|
||||
// triggered here before actual capture and detection since we assume it will complete in less than 5sec
|
||||
// so it's as close as possible to real 5sec and not 5sec + detection time
|
||||
// if there is a chance of race scenario where detection takes longer than loop trigger, then trigger should be at the end of the function instead
|
||||
setTimeout(() => process(), 5000);
|
||||
|
||||
const buffer = await capture(); // gets the (default) jpeg data from from webcam
|
||||
const tensor = buffer2tensor(buffer); // create tensor from image buffer
|
||||
const res = await human.detect(tensor); // run detection
|
||||
|
||||
// do whatever here with the res
|
||||
// or just append it to results array that will contain all processed results over time
|
||||
results.push(res);
|
||||
|
||||
// alternatively to triggering every 5sec sec, simply trigger next frame as fast as possible
|
||||
// setImmediate(() => process());
|
||||
}
|
||||
|
||||
log.header();
|
||||
process();
|
|
@ -0,0 +1,120 @@
|
|||
# Human Demos for NodeJS
|
||||
|
||||
- `node`: Process images from files, folders or URLs
|
||||
uses native methods for image loading and decoding without external dependencies
|
||||
- `node-canvas`: Process image from file or URL and draw results to a new image file using `node-canvas`
|
||||
uses `node-canvas` library to load and decode images from files, draw detection results and write output to a new image file
|
||||
- `node-video`: Processing of video input using `ffmpeg`
|
||||
uses `ffmpeg` to decode video input (can be a file, stream or device such as webcam) and
|
||||
output results in a pipe that are captured by demo app as frames and processed by `Human` library
|
||||
- `node-webcam`: Processing of webcam screenshots using `fswebcam`
|
||||
uses `fswebcam` to connect to web cam and take screenshots at regular interval which are then processed by `Human` library
|
||||
- `node-event`: Showcases usage of `Human` eventing to get notifications on processing
|
||||
- `node-similarity`: Compares two input images for similarity of detected faces
|
||||
- `process-folder`: Processing all images in input folder and creates output images
|
||||
interally used to generate samples gallery
|
||||
|
||||
<br>
|
||||
|
||||
## Main Demo
|
||||
|
||||
`nodejs/node.js`: Demo using NodeJS with CommonJS module
|
||||
Simple demo that can process any input image
|
||||
|
||||
Note that you can run demo as-is and it will perform detection on provided sample images,
|
||||
or you can pass a path to image to analyze, either on local filesystem or using URL
|
||||
|
||||
```shell
|
||||
node demo/nodejs/node.js
|
||||
```
|
||||
|
||||
```json
|
||||
2021-06-01 08:52:15 INFO: @vladmandic/human version 2.0.0
|
||||
2021-06-01 08:52:15 INFO: User: vlado Platform: linux Arch: x64 Node: v16.0.0
|
||||
2021-06-01 08:52:15 INFO: Current folder: /home/vlado/dev/human
|
||||
2021-06-01 08:52:15 INFO: Human: 2.0.0
|
||||
2021-06-01 08:52:15 INFO: Active Configuration {
|
||||
backend: 'tensorflow',
|
||||
modelBasePath: 'file://models/',
|
||||
wasmPath: '../node_modules/@tensorflow/tfjs-backend-wasm/dist/',
|
||||
debug: true,
|
||||
async: false,
|
||||
warmup: 'full',
|
||||
cacheSensitivity: 0.75,
|
||||
filter: {
|
||||
enabled: true,
|
||||
width: 0,
|
||||
height: 0,
|
||||
flip: true,
|
||||
return: true,
|
||||
brightness: 0,
|
||||
contrast: 0,
|
||||
sharpness: 0,
|
||||
blur: 0,
|
||||
saturation: 0,
|
||||
hue: 0,
|
||||
negative: false,
|
||||
sepia: false,
|
||||
vintage: false,
|
||||
kodachrome: false,
|
||||
technicolor: false,
|
||||
polaroid: false,
|
||||
pixelate: 0
|
||||
},
|
||||
gesture: { enabled: true },
|
||||
face: {
|
||||
enabled: true,
|
||||
detector: { modelPath: 'blazeface.json', rotation: false, maxDetected: 10, skipFrames: 15, minConfidence: 0.2, iouThreshold: 0.1, return: false, enabled: true },
|
||||
mesh: { enabled: true, modelPath: 'facemesh.json' },
|
||||
iris: { enabled: true, modelPath: 'iris.json' },
|
||||
description: { enabled: true, modelPath: 'faceres.json', skipFrames: 16, minConfidence: 0.1 },
|
||||
emotion: { enabled: true, minConfidence: 0.1, skipFrames: 17, modelPath: 'emotion.json' }
|
||||
},
|
||||
body: { enabled: true, modelPath: 'movenet-lightning.json', maxDetected: 1, minConfidence: 0.2 },
|
||||
hand: {
|
||||
enabled: true,
|
||||
rotation: true,
|
||||
skipFrames: 18,
|
||||
minConfidence: 0.1,
|
||||
iouThreshold: 0.1,
|
||||
maxDetected: 2,
|
||||
landmarks: true,
|
||||
detector: { modelPath: 'handdetect.json' },
|
||||
skeleton: { modelPath: 'handskeleton.json' }
|
||||
},
|
||||
object: { enabled: true, modelPath: 'mb3-centernet.json', minConfidence: 0.2, iouThreshold: 0.4, maxDetected: 10, skipFrames: 19 }
|
||||
}
|
||||
08:52:15.673 Human: version: 2.0.0
|
||||
08:52:15.674 Human: tfjs version: 3.6.0
|
||||
08:52:15.674 Human: platform: linux x64
|
||||
08:52:15.674 Human: agent: NodeJS v16.0.0
|
||||
08:52:15.674 Human: setting backend: tensorflow
|
||||
08:52:15.710 Human: load model: file://models/blazeface.json
|
||||
08:52:15.743 Human: load model: file://models/facemesh.json
|
||||
08:52:15.744 Human: load model: file://models/iris.json
|
||||
08:52:15.760 Human: load model: file://models/emotion.json
|
||||
08:52:15.847 Human: load model: file://models/handdetect.json
|
||||
08:52:15.847 Human: load model: file://models/handskeleton.json
|
||||
08:52:15.914 Human: load model: file://models/movenet-lightning.json
|
||||
08:52:15.957 Human: load model: file://models/mb3-centernet.json
|
||||
08:52:16.015 Human: load model: file://models/faceres.json
|
||||
08:52:16.015 Human: tf engine state: 50796152 bytes 1318 tensors
|
||||
2021-06-01 08:52:16 INFO: Loaded: [ 'face', 'movenet', 'handpose', 'emotion', 'centernet', 'faceres', [length]: 6 ]
|
||||
2021-06-01 08:52:16 INFO: Memory state: { unreliable: true, numTensors: 1318, numDataBuffers: 1318, numBytes: 50796152 }
|
||||
2021-06-01 08:52:16 INFO: Loading image: private/daz3d/daz3d-kiaria-02.jpg
|
||||
2021-06-01 08:52:16 STATE: Processing: [ 1, 1300, 1000, 3, [length]: 4 ]
|
||||
2021-06-01 08:52:17 DATA: Results:
|
||||
2021-06-01 08:52:17 DATA: Face: #0 boxScore:0.88 faceScore:1 age:16.3 genderScore:0.97 gender:female emotionScore:0.85 emotion:happy iris:61.05
|
||||
2021-06-01 08:52:17 DATA: Body: #0 score:0.82 keypoints:17
|
||||
2021-06-01 08:52:17 DATA: Hand: #0 score:0.89
|
||||
2021-06-01 08:52:17 DATA: Hand: #1 score:0.97
|
||||
2021-06-01 08:52:17 DATA: Gesture: face#0 gesture:facing left
|
||||
2021-06-01 08:52:17 DATA: Gesture: body#0 gesture:leaning right
|
||||
2021-06-01 08:52:17 DATA: Gesture: hand#0 gesture:pinky forward middlefinger up
|
||||
2021-06-01 08:52:17 DATA: Gesture: hand#1 gesture:pinky forward middlefinger up
|
||||
2021-06-01 08:52:17 DATA: Gesture: iris#0 gesture:looking left
|
||||
2021-06-01 08:52:17 DATA: Object: #0 score:0.55 label:person
|
||||
2021-06-01 08:52:17 DATA: Object: #1 score:0.23 label:bottle
|
||||
2021-06-01 08:52:17 DATA: Persons:
|
||||
2021-06-01 08:52:17 DATA: #0: Face:score:1 age:16.3 gender:female iris:61.05 Body:score:0.82 keypoints:17 LeftHand:no RightHand:yes Gestures:4
|
||||
```
|
|
@ -0,0 +1,84 @@
|
|||
/**
|
||||
* Human demo for NodeJS using Canvas library
|
||||
*
|
||||
* Requires [canvas](https://www.npmjs.com/package/canvas) to provide Canvas functionality in NodeJS environment
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const process = require('process');
|
||||
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||
// in nodejs environments tfjs-node is required to be loaded before human
|
||||
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||
const canvas = require('canvas'); // eslint-disable-line node/no-unpublished-require
|
||||
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
|
||||
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
|
||||
|
||||
const config = { // just enable all and leave default settings
|
||||
debug: false,
|
||||
face: { enabled: true }, // includes mesh, iris, emotion, descriptor
|
||||
hand: { enabled: true, maxDetected: 2, minConfidence: 0.5, detector: { modelPath: 'handtrack.json' } }, // use alternative hand model
|
||||
body: { enabled: true },
|
||||
object: { enabled: true },
|
||||
gestures: { enabled: true },
|
||||
};
|
||||
|
||||
async function main() {
|
||||
log.header();
|
||||
|
||||
globalThis.Canvas = canvas.Canvas; // patch global namespace with canvas library
|
||||
globalThis.ImageData = canvas.ImageData; // patch global namespace with canvas library
|
||||
// human.env.Canvas = canvas.Canvas; // alternatively monkey-patch human to use external canvas library
|
||||
// human.env.ImageData = canvas.ImageData; // alternatively monkey-patch human to use external canvas library
|
||||
|
||||
// init
|
||||
const human = new Human.Human(config); // create instance of human
|
||||
log.info('Human:', human.version, 'TF:', tf.version_core);
|
||||
|
||||
await human.load(); // pre-load models
|
||||
log.info('Loaded models:', Object.keys(human.models).filter((a) => human.models[a]));
|
||||
log.info('Memory state:', human.tf.engine().memory());
|
||||
|
||||
// parse cmdline
|
||||
const input = process.argv[2];
|
||||
const output = process.argv[3];
|
||||
if (process.argv.length !== 4) log.error('Parameters: <input-image> <output-image> missing');
|
||||
else if (!fs.existsSync(input) && !input.startsWith('http')) log.error(`File not found: ${process.argv[2]}`);
|
||||
else {
|
||||
// everything seems ok
|
||||
const inputImage = await canvas.loadImage(input); // load image using canvas library
|
||||
log.info('Loaded image', input, inputImage.width, inputImage.height);
|
||||
const inputCanvas = new canvas.Canvas(inputImage.width, inputImage.height); // create canvas
|
||||
const inputCtx = inputCanvas.getContext('2d');
|
||||
inputCtx.drawImage(inputImage, 0, 0); // draw input image onto canvas
|
||||
const imageData = inputCtx.getImageData(0, 0, inputCanvas.width, inputCanvas.height);
|
||||
|
||||
// run detection
|
||||
const result = await human.detect(imageData);
|
||||
// run segmentation
|
||||
// const seg = await human.segmentation(inputCanvas);
|
||||
// log.data('Segmentation:', { data: seg.data.length, alpha: typeof seg.alpha, canvas: typeof seg.canvas });
|
||||
|
||||
// print results summary
|
||||
const persons = result.persons; // invoke persons getter, only used to print summary on console
|
||||
for (let i = 0; i < persons.length; i++) {
|
||||
const face = persons[i].face;
|
||||
const faceTxt = face ? `score:${face.score} age:${face.age} gender:${face.gender} iris:${face.iris}` : null;
|
||||
const body = persons[i].body;
|
||||
const bodyTxt = body ? `score:${body.score} keypoints:${body.keypoints.length}` : null;
|
||||
log.data(`Detected: #${i}: Face:${faceTxt} Body:${bodyTxt} LeftHand:${persons[i].hands.left ? 'yes' : 'no'} RightHand:${persons[i].hands.right ? 'yes' : 'no'} Gestures:${persons[i].gestures.length}`);
|
||||
}
|
||||
|
||||
// draw detected results onto canvas and save it to a file
|
||||
const outputCanvas = new canvas.Canvas(inputImage.width, inputImage.height); // create canvas
|
||||
const outputCtx = outputCanvas.getContext('2d');
|
||||
outputCtx.drawImage(result.canvas || inputImage, 0, 0); // draw input image onto canvas
|
||||
human.draw.all(outputCanvas, result); // use human build-in method to draw results as overlays on canvas
|
||||
const outFile = fs.createWriteStream(output); // write canvas to new image file
|
||||
outFile.on('finish', () => log.state('Output image:', output, outputCanvas.width, outputCanvas.height));
|
||||
outFile.on('error', (err) => log.error('Output error:', output, err));
|
||||
const stream = outputCanvas.createJPEGStream({ quality: 0.5, progressive: true, chromaSubsampling: true });
|
||||
stream.pipe(outFile);
|
||||
}
|
||||
}
|
||||
|
||||
main();
|
|
@ -0,0 +1,98 @@
|
|||
/**
|
||||
* Human demo for NodeJS
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const process = require('process');
|
||||
|
||||
let fetch; // fetch is dynamically imported later
|
||||
|
||||
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||
// in nodejs environments tfjs-node is required to be loaded before human
|
||||
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
|
||||
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
|
||||
|
||||
let human = null;
|
||||
|
||||
const myConfig = {
|
||||
modelBasePath: 'file://models/',
|
||||
debug: false,
|
||||
async: true,
|
||||
filter: { enabled: false },
|
||||
face: {
|
||||
enabled: true,
|
||||
detector: { enabled: true },
|
||||
mesh: { enabled: true },
|
||||
iris: { enabled: true },
|
||||
description: { enabled: true },
|
||||
emotion: { enabled: true },
|
||||
},
|
||||
hand: { enabled: true },
|
||||
body: { enabled: true },
|
||||
object: { enabled: true },
|
||||
};
|
||||
|
||||
async function detect(input) {
|
||||
// read input image from file or url into buffer
|
||||
let buffer;
|
||||
log.info('Loading image:', input);
|
||||
if (input.startsWith('http:') || input.startsWith('https:')) {
|
||||
fetch = (await import('node-fetch')).default; // eslint-disable-line node/no-unpublished-import
|
||||
const res = await fetch(input);
|
||||
if (res && res.ok) buffer = await res.buffer();
|
||||
else log.error('Invalid image URL:', input, res.status, res.statusText, res.headers.get('content-type'));
|
||||
} else {
|
||||
buffer = fs.readFileSync(input);
|
||||
}
|
||||
|
||||
// decode image using tfjs-node so we don't need external depenencies
|
||||
if (!buffer) return;
|
||||
const tensor = human.tf.node.decodeImage(buffer, 3);
|
||||
|
||||
// run detection
|
||||
await human.detect(tensor, myConfig);
|
||||
human.tf.dispose(tensor); // dispose image tensor as we no longer need it
|
||||
}
|
||||
|
||||
async function main() {
|
||||
log.header();
|
||||
|
||||
human = new Human.Human(myConfig);
|
||||
log.info('Human:', human.version, 'TF:', tf.version_core);
|
||||
|
||||
if (human.events) {
|
||||
human.events.addEventListener('warmup', () => {
|
||||
log.info('Event Warmup');
|
||||
});
|
||||
|
||||
human.events.addEventListener('load', () => {
|
||||
const loaded = Object.keys(human.models).filter((a) => human.models[a]);
|
||||
log.info('Event Loaded:', loaded, human.tf.engine().memory());
|
||||
});
|
||||
|
||||
human.events.addEventListener('image', () => {
|
||||
log.info('Event Image:', human.process.tensor.shape);
|
||||
});
|
||||
|
||||
human.events.addEventListener('detect', () => {
|
||||
log.data('Event Detected:');
|
||||
const persons = human.result.persons;
|
||||
for (let i = 0; i < persons.length; i++) {
|
||||
const face = persons[i].face;
|
||||
const faceTxt = face ? `score:${face.score} age:${face.age} gender:${face.gender} iris:${face.iris}` : null;
|
||||
const body = persons[i].body;
|
||||
const bodyTxt = body ? `score:${body.score} keypoints:${body.keypoints?.length}` : null;
|
||||
log.data(` #${i}: Face:${faceTxt} Body:${bodyTxt} LeftHand:${persons[i].hands.left ? 'yes' : 'no'} RightHand:${persons[i].hands.right ? 'yes' : 'no'} Gestures:${persons[i].gestures.length}`);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
await human.tf.ready(); // wait until tf is ready
|
||||
|
||||
const input = process.argv[2]; // process input
|
||||
if (input) await detect(input);
|
||||
else log.error('Missing <input>');
|
||||
}
|
||||
|
||||
main();
|
|
@ -0,0 +1,31 @@
|
|||
/**
|
||||
* Human demo for NodeJS using http fetch to get image file
|
||||
*
|
||||
* Requires [node-fetch](https://www.npmjs.com/package/node-fetch) to provide `fetch` functionality in NodeJS environment
|
||||
*/
|
||||
const fs = require('fs');
|
||||
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||
|
||||
// in nodejs environments tfjs-node is required to be loaded before human
|
||||
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
|
||||
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
|
||||
|
||||
const humanConfig = {
|
||||
modelBasePath: 'https://vladmandic.github.io/human/models/',
|
||||
};
|
||||
|
||||
async function main(inputFile) {
|
||||
// @ts-ignore
|
||||
global.fetch = (await import('node-fetch')).default; // eslint-disable-line node/no-unpublished-import
|
||||
const human = new Human.Human(humanConfig); // create instance of human using default configuration
|
||||
log.info('Human:', human.version, 'TF:', tf.version_core);
|
||||
await human.load(); // optional as models would be loaded on-demand first time they are required
|
||||
await human.warmup(); // optional as model warmup is performed on-demand first time its executed
|
||||
const buffer = fs.readFileSync(inputFile); // read file data into buffer
|
||||
const tensor = human.tf.node.decodeImage(buffer); // decode jpg data
|
||||
const result = await human.detect(tensor); // run detection; will initialize backend and on-demand load models
|
||||
log.data(result.gesture);
|
||||
}
|
||||
|
||||
main('samples/in/ai-body.jpg');
|
|
@ -0,0 +1,64 @@
|
|||
/**
|
||||
* Human Person Similarity test for NodeJS
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const process = require('process');
|
||||
|
||||
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||
// in nodejs environments tfjs-node is required to be loaded before human
|
||||
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
|
||||
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
|
||||
|
||||
let human = null;
|
||||
|
||||
const myConfig = {
|
||||
modelBasePath: 'file://models/',
|
||||
debug: true,
|
||||
face: { emotion: { enabled: false } },
|
||||
body: { enabled: false },
|
||||
hand: { enabled: false },
|
||||
gesture: { enabled: false },
|
||||
};
|
||||
|
||||
async function init() {
|
||||
human = new Human.Human(myConfig);
|
||||
await human.tf.ready();
|
||||
log.info('Human:', human.version, 'TF:', tf.version_core);
|
||||
await human.load();
|
||||
const loaded = Object.keys(human.models).filter((a) => human.models[a]);
|
||||
log.info('Loaded:', loaded);
|
||||
log.info('Memory state:', human.tf.engine().memory());
|
||||
}
|
||||
|
||||
async function detect(input) {
|
||||
if (!fs.existsSync(input)) {
|
||||
throw new Error('Cannot load image:', input);
|
||||
}
|
||||
const buffer = fs.readFileSync(input);
|
||||
const tensor = human.tf.node.decodeImage(buffer, 3);
|
||||
log.state('Loaded image:', input, tensor.shape);
|
||||
const result = await human.detect(tensor, myConfig);
|
||||
human.tf.dispose(tensor);
|
||||
log.state('Detected faces:', result.face.length);
|
||||
return result;
|
||||
}
|
||||
|
||||
async function main() {
|
||||
log.configure({ inspect: { breakLength: 265 } });
|
||||
log.header();
|
||||
if (process.argv.length !== 4) {
|
||||
throw new Error('Parameters: <first image> <second image> missing');
|
||||
}
|
||||
await init();
|
||||
const res1 = await detect(process.argv[2]);
|
||||
const res2 = await detect(process.argv[3]);
|
||||
if (!res1 || !res1.face || res1.face.length === 0 || !res2 || !res2.face || res2.face.length === 0) {
|
||||
throw new Error('Could not detect face descriptors');
|
||||
}
|
||||
const similarity = human.similarity(res1.face[0].embedding, res2.face[0].embedding, { order: 2 });
|
||||
log.data('Similarity: ', similarity);
|
||||
}
|
||||
|
||||
main();
|
|
@ -0,0 +1,30 @@
|
|||
/**
|
||||
* Human simple demo for NodeJS
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const process = require('process');
|
||||
|
||||
// in nodejs environments tfjs-node is required to be loaded before human
|
||||
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
|
||||
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
|
||||
|
||||
const humanConfig = {
|
||||
// add any custom config here
|
||||
};
|
||||
|
||||
async function detect(inputFile) {
|
||||
const human = new Human.Human(humanConfig); // create instance of human using default configuration
|
||||
console.log('Human:', human.version, 'TF:', tf.version_core); // eslint-disable-line no-console
|
||||
await human.load(); // optional as models would be loaded on-demand first time they are required
|
||||
await human.warmup(); // optional as model warmup is performed on-demand first time its executed
|
||||
const buffer = fs.readFileSync(inputFile); // read file data into buffer
|
||||
const tensor = human.tf.node.decodeImage(buffer); // decode jpg data
|
||||
console.log('loaded input file:', inputFile, 'resolution:', tensor.shape); // eslint-disable-line no-console
|
||||
const result = await human.detect(tensor); // run detection; will initialize backend and on-demand load models
|
||||
console.log(result); // eslint-disable-line no-console
|
||||
}
|
||||
|
||||
if (process.argv.length === 3) detect(process.argv[2]); // if input file is provided as cmdline parameter use it
|
||||
else detect('samples/in/ai-body.jpg'); // else use built-in test inputfile
|
|
@ -1,29 +1,31 @@
|
|||
/*
|
||||
Unsupported sample of using external utility ffmpeg to capture to decode video input and process it using Human
|
||||
|
||||
uses ffmpeg to process video input and output stream of motion jpeg images which are then parsed for frame start/end markers by pipe2jpeg
|
||||
each frame triggers an event with jpeg buffer that then can be decoded and passed to human for processing
|
||||
if you want process at specific intervals, set output fps to some value
|
||||
if you want to process an input stream, set real-time flag and set input as required
|
||||
/**
|
||||
* Human demo for NodeJS
|
||||
* Unsupported sample of using external utility ffmpeg to capture to decode video input and process it using Human
|
||||
*
|
||||
* Uses ffmpeg to process video input and output stream of motion jpeg images which are then parsed for frame start/end markers by pipe2jpeg
|
||||
* Each frame triggers an event with jpeg buffer that then can be decoded and passed to human for processing
|
||||
* If you want process at specific intervals, set output fps to some value
|
||||
* If you want to process an input stream, set real-time flag and set input as required
|
||||
*
|
||||
* Note that [pipe2jpeg](https://www.npmjs.com/package/pipe2jpeg) is not part of Human dependencies and should be installed manually
|
||||
* Working version of `ffmpeg` must be present on the system
|
||||
*/
|
||||
|
||||
const spawn = require('child_process').spawn;
|
||||
const log = require('@vladmandic/pilogger');
|
||||
// eslint-disable-next-line node/no-missing-require
|
||||
const Pipe2Jpeg = require('pipe2jpeg');
|
||||
// for NodeJS, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human
|
||||
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
|
||||
const tf = require('@tensorflow/tfjs-node'); // or const tf = require('@tensorflow/tfjs-node-gpu');
|
||||
// load specific version of Human library that matches TensorFlow mode
|
||||
const Human = require('../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
|
||||
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||
// in nodejs environments tfjs-node is required to be loaded before human
|
||||
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
|
||||
const Pipe2Jpeg = require('pipe2jpeg'); // eslint-disable-line node/no-missing-require, import/no-unresolved
|
||||
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
|
||||
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
|
||||
|
||||
let count = 0; // counter
|
||||
let busy = false; // busy flag
|
||||
const inputFile = './test.mp4';
|
||||
|
||||
const humanConfig = {
|
||||
backend: 'tensorflow',
|
||||
modelBasePath: 'file://node_modules/@vladmandic/human/models/',
|
||||
modelBasePath: 'file://models/',
|
||||
debug: false,
|
||||
async: true,
|
||||
filter: { enabled: false },
|
||||
|
@ -40,7 +42,7 @@ const humanConfig = {
|
|||
object: { enabled: false },
|
||||
};
|
||||
|
||||
const human = new Human(humanConfig);
|
||||
const human = new Human.Human(humanConfig);
|
||||
const pipe2jpeg = new Pipe2Jpeg();
|
||||
|
||||
const ffmpegParams = [
|
||||
|
@ -60,15 +62,12 @@ const ffmpegParams = [
|
|||
async function process(jpegBuffer) {
|
||||
if (busy) return; // skip processing if busy
|
||||
busy = true;
|
||||
const decoded = tf.node.decodeJpeg(jpegBuffer, 3); // decode jpeg buffer to raw tensor
|
||||
const tensor = tf.expandDims(decoded, 0); // almost all tf models use first dimension as batch number so we add it
|
||||
decoded.dispose();
|
||||
|
||||
const tensor = human.tf.node.decodeJpeg(jpegBuffer, 3); // decode jpeg buffer to raw tensor
|
||||
log.state('input frame:', ++count, 'size:', jpegBuffer.length, 'decoded shape:', tensor.shape);
|
||||
const res = await human.detect(tensor);
|
||||
log.data('gesture', JSON.stringify(res.gesture));
|
||||
// do processing here
|
||||
tensor.dispose(); // must dispose tensor
|
||||
tf.dispose(tensor); // must dispose tensor
|
||||
busy = false;
|
||||
}
|
||||
|
||||
|
@ -76,7 +75,7 @@ async function main() {
|
|||
log.header();
|
||||
await human.tf.ready();
|
||||
// pre-load models
|
||||
log.info('human:', human.version);
|
||||
log.info('human:', human.version, 'tf:', tf.version_core);
|
||||
pipe2jpeg.on('jpeg', (jpegBuffer) => process(jpegBuffer));
|
||||
|
||||
const ffmpeg = spawn('ffmpeg', ffmpegParams, { stdio: ['ignore', 'pipe', 'ignore'] });
|
|
@ -0,0 +1,94 @@
|
|||
/**
|
||||
* Human demo for NodeJS
|
||||
* Unsupported sample of using external utility fswebcam to capture screenshot from attached webcam in regular intervals and process it using Human
|
||||
*
|
||||
* Note that [node-webcam](https://www.npmjs.com/package/node-webcam) is not part of Human dependencies and should be installed manually
|
||||
* Working version of `fswebcam` must be present on the system
|
||||
*/
|
||||
|
||||
let initial = true; // remember if this is the first run to print additional details
|
||||
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||
const nodeWebCam = require('node-webcam'); // eslint-disable-line import/no-unresolved, node/no-missing-require
|
||||
|
||||
// in nodejs environments tfjs-node is required to be loaded before human
|
||||
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
|
||||
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
|
||||
|
||||
// options for node-webcam
|
||||
const tempFile = 'webcam-snap'; // node-webcam requires writting snapshot to a file, recommended to use tmpfs to avoid excessive disk writes
|
||||
const optionsCamera = {
|
||||
callbackReturn: 'buffer', // this means whatever `fswebcam` writes to disk, no additional processing so it's fastest
|
||||
saveShots: false, // don't save processed frame to disk, note that temp file is still created by fswebcam thus recommendation for tmpfs
|
||||
};
|
||||
const camera = nodeWebCam.create(optionsCamera);
|
||||
|
||||
// options for human
|
||||
const optionsHuman = {
|
||||
modelBasePath: 'file://models/',
|
||||
};
|
||||
|
||||
const human = new Human.Human(optionsHuman);
|
||||
|
||||
function buffer2tensor(buffer) {
|
||||
return human.tf.tidy(() => {
|
||||
if (!buffer) return null;
|
||||
const decode = human.tf.node.decodeImage(buffer, 3);
|
||||
let expand;
|
||||
if (decode.shape[2] === 4) { // input is in rgba format, need to convert to rgb
|
||||
const channels = human.tf.split(decode, 4, 2); // tf.split(tensor, 4, 2); // split rgba to channels
|
||||
const rgb = human.tf.stack([channels[0], channels[1], channels[2]], 2); // stack channels back to rgb and ignore alpha
|
||||
expand = human.tf.reshape(rgb, [1, decode.shape[0], decode.shape[1], 3]); // move extra dim from the end of tensor and use it as batch number instead
|
||||
} else {
|
||||
expand = human.tf.expandDims(decode, 0); // inpur ia rgb so use as-is
|
||||
}
|
||||
const cast = human.tf.cast(expand, 'float32');
|
||||
return cast;
|
||||
});
|
||||
}
|
||||
|
||||
async function detect() {
|
||||
// trigger next frame every 5 sec
|
||||
// triggered here before actual capture and detection since we assume it will complete in less than 5sec
|
||||
// so it's as close as possible to real 5sec and not 5sec + detection time
|
||||
// if there is a chance of race scenario where detection takes longer than loop trigger, then trigger should be at the end of the function instead
|
||||
setTimeout(() => detect(), 5000);
|
||||
|
||||
camera.capture(tempFile, (err, data) => { // gets the (default) jpeg data from from webcam
|
||||
if (err) {
|
||||
log.error('error capturing webcam:', err);
|
||||
} else {
|
||||
const tensor = buffer2tensor(data); // create tensor from image buffer
|
||||
if (initial) log.data('input tensor:', tensor.shape);
|
||||
human.detect(tensor) // eslint-disable-line promise/no-promise-in-callback
|
||||
.then((result) => {
|
||||
if (result && result.face && result.face.length > 0) {
|
||||
for (let i = 0; i < result.face.length; i++) {
|
||||
const face = result.face[i];
|
||||
const emotion = face.emotion?.reduce((prev, curr) => (prev.score > curr.score ? prev : curr));
|
||||
log.data(`detected face: #${i} boxScore:${face.boxScore} faceScore:${face.faceScore} age:${face.age} genderScore:${face.genderScore} gender:${face.gender} emotionScore:${emotion?.score} emotion:${emotion?.emotion} iris:${face.iris}`);
|
||||
}
|
||||
} else {
|
||||
log.data(' Face: N/A');
|
||||
}
|
||||
return result;
|
||||
})
|
||||
.catch(() => log.error('human detect error'));
|
||||
}
|
||||
initial = false;
|
||||
});
|
||||
// alternatively to triggering every 5sec sec, simply trigger next frame as fast as possible
|
||||
// setImmediate(() => process());
|
||||
}
|
||||
|
||||
async function main() {
|
||||
log.info('human:', human.version, 'tf:', tf.version_core);
|
||||
camera.list((list) => {
|
||||
log.data('detected camera:', list);
|
||||
});
|
||||
await human.load();
|
||||
detect();
|
||||
}
|
||||
|
||||
log.header();
|
||||
main();
|
|
@ -1,19 +1,25 @@
|
|||
const log = require('@vladmandic/pilogger');
|
||||
/**
|
||||
* Human demo for NodeJS
|
||||
*
|
||||
* Requires [node-fetch](https://www.npmjs.com/package/node-fetch) to provide `fetch` functionality in NodeJS environment
|
||||
*/
|
||||
|
||||
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const process = require('process');
|
||||
const fetch = require('node-fetch').default;
|
||||
|
||||
// for NodeJS, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human
|
||||
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
|
||||
const tf = require('@tensorflow/tfjs-node'); // or const tf = require('@tensorflow/tfjs-node-gpu');
|
||||
let fetch; // fetch is dynamically imported later
|
||||
|
||||
// load specific version of Human library that matches TensorFlow mode
|
||||
const Human = require('../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
|
||||
// in nodejs environments tfjs-node is required to be loaded before human
|
||||
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
|
||||
const Human = require('../../dist/human.node.js'); // use this when using human in dev mode
|
||||
|
||||
let human = null;
|
||||
|
||||
const myConfig = {
|
||||
backend: 'tensorflow',
|
||||
// backend: 'tensorflow',
|
||||
modelBasePath: 'file://models/',
|
||||
debug: true,
|
||||
async: false,
|
||||
|
@ -39,16 +45,18 @@ const myConfig = {
|
|||
|
||||
async function init() {
|
||||
// create instance of human
|
||||
human = new Human(myConfig);
|
||||
human = new Human.Human(myConfig);
|
||||
// wait until tf is ready
|
||||
await human.tf.ready();
|
||||
log.info('human:', human.version, 'tf:', tf.version_core);
|
||||
// pre-load models
|
||||
log.info('Human:', human.version);
|
||||
log.info('Active Configuration', human.config);
|
||||
// log.info('Active Configuration', human.config);
|
||||
await human.load();
|
||||
const loaded = Object.keys(human.models).filter((a) => human.models[a]);
|
||||
log.info('Loaded:', loaded);
|
||||
log.info('Memory state:', human.tf.engine().memory());
|
||||
// log.info('Memory state:', human.tf.engine().memory());
|
||||
log.data(tf.backend().binding ? tf.backend().binding.TF_Version : null);
|
||||
}
|
||||
|
||||
async function detect(input) {
|
||||
|
@ -81,7 +89,7 @@ async function detect(input) {
|
|||
});
|
||||
|
||||
// image shape contains image dimensions and depth
|
||||
log.state('Processing:', tensor['shape']);
|
||||
log.state('Processing:', tensor.shape);
|
||||
|
||||
// run actual detection
|
||||
let result;
|
||||
|
@ -100,7 +108,7 @@ async function detect(input) {
|
|||
for (let i = 0; i < result.face.length; i++) {
|
||||
const face = result.face[i];
|
||||
const emotion = face.emotion.reduce((prev, curr) => (prev.score > curr.score ? prev : curr));
|
||||
log.data(` Face: #${i} boxConfidence:${face.boxConfidence} faceConfidence:${face.boxConfidence} age:${face.age} genderConfidence:${face.genderConfidence} gender:${face.gender} emotionScore:${emotion.score} emotion:${emotion.emotion} iris:${face.iris}`);
|
||||
log.data(` Face: #${i} boxScore:${face.boxScore} faceScore:${face.faceScore} age:${face.age} genderScore:${face.genderScore} gender:${face.gender} emotionScore:${emotion.score} emotion:${emotion.emotion} iris:${face.iris}`);
|
||||
}
|
||||
} else {
|
||||
log.data(' Face: N/A');
|
||||
|
@ -108,7 +116,7 @@ async function detect(input) {
|
|||
if (result && result.body && result.body.length > 0) {
|
||||
for (let i = 0; i < result.body.length; i++) {
|
||||
const body = result.body[i];
|
||||
log.data(` Body: #${i} score:${body.score} landmarks:${body.keypoints?.length || body.landmarks?.length}`);
|
||||
log.data(` Body: #${i} score:${body.score} keypoints:${body.keypoints?.length}`);
|
||||
}
|
||||
} else {
|
||||
log.data(' Body: N/A');
|
||||
|
@ -116,7 +124,7 @@ async function detect(input) {
|
|||
if (result && result.hand && result.hand.length > 0) {
|
||||
for (let i = 0; i < result.hand.length; i++) {
|
||||
const hand = result.hand[i];
|
||||
log.data(` Hand: #${i} confidence:${hand.confidence}`);
|
||||
log.data(` Hand: #${i} score:${hand.score} keypoints:${hand.keypoints?.length}`);
|
||||
}
|
||||
} else {
|
||||
log.data(' Hand: N/A');
|
||||
|
@ -129,6 +137,7 @@ async function detect(input) {
|
|||
} else {
|
||||
log.data(' Gesture: N/A');
|
||||
}
|
||||
|
||||
if (result && result.object && result.object.length > 0) {
|
||||
for (let i = 0; i < result.object.length; i++) {
|
||||
const object = result.object[i];
|
||||
|
@ -137,6 +146,25 @@ async function detect(input) {
|
|||
} else {
|
||||
log.data(' Object: N/A');
|
||||
}
|
||||
|
||||
// print data to console
|
||||
if (result) {
|
||||
// invoke persons getter
|
||||
const persons = result.persons;
|
||||
|
||||
// write result objects to file
|
||||
// fs.writeFileSync('result.json', JSON.stringify(result, null, 2));
|
||||
|
||||
log.data('Persons:');
|
||||
for (let i = 0; i < persons.length; i++) {
|
||||
const face = persons[i].face;
|
||||
const faceTxt = face ? `score:${face.score} age:${face.age} gender:${face.gender} iris:${face.iris}` : null;
|
||||
const body = persons[i].body;
|
||||
const bodyTxt = body ? `score:${body.score} keypoints:${body.keypoints?.length}` : null;
|
||||
log.data(` #${i}: Face:${faceTxt} Body:${bodyTxt} LeftHand:${persons[i].hands.left ? 'yes' : 'no'} RightHand:${persons[i].hands.right ? 'yes' : 'no'} Gestures:${persons[i].gestures.length}`);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -161,16 +189,29 @@ async function test() {
|
|||
}
|
||||
|
||||
async function main() {
|
||||
log.configure({ inspect: { breakLength: 265 } });
|
||||
log.header();
|
||||
log.info('Current folder:', process.env.PWD);
|
||||
fetch = (await import('node-fetch')).default; // eslint-disable-line node/no-unpublished-import
|
||||
await init();
|
||||
const f = process.argv[2];
|
||||
if (process.argv.length !== 3) {
|
||||
log.warn('Parameters: <input image> missing');
|
||||
log.warn('Parameters: <input image | folder> missing');
|
||||
await test();
|
||||
} else if (!fs.existsSync(process.argv[2]) && !process.argv[2].startsWith('http')) {
|
||||
} else if (!fs.existsSync(f) && !f.startsWith('http')) {
|
||||
log.error(`File not found: ${process.argv[2]}`);
|
||||
} else if (fs.existsSync(f)) {
|
||||
const stat = fs.statSync(f);
|
||||
if (stat.isDirectory()) {
|
||||
const dir = fs.readdirSync(f);
|
||||
for (const file of dir) {
|
||||
await detect(path.join(f, file));
|
||||
}
|
||||
} else {
|
||||
await detect(f);
|
||||
}
|
||||
} else {
|
||||
await detect(process.argv[2]);
|
||||
await detect(f);
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,86 @@
|
|||
/**
|
||||
* Human demo for NodeJS
|
||||
*
|
||||
* Takes input and output folder names parameters and processes all images
|
||||
* found in input folder and creates annotated images in output folder
|
||||
*
|
||||
* Requires [canvas](https://www.npmjs.com/package/canvas) to provide Canvas functionality in NodeJS environment
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const process = require('process');
|
||||
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||
const canvas = require('canvas'); // eslint-disable-line node/no-unpublished-require
|
||||
// for nodejs, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human
|
||||
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||
const Human = require('../../dist/human.node-gpu.js'); // this is 'const Human = require('../dist/human.node-gpu.js').default;'
|
||||
|
||||
const config = { // just enable all and leave default settings
|
||||
debug: true,
|
||||
async: false,
|
||||
cacheSensitivity: 0,
|
||||
face: { enabled: true, detector: { maxDetected: 20 } },
|
||||
object: { enabled: true },
|
||||
gesture: { enabled: true },
|
||||
hand: { enabled: true },
|
||||
body: { enabled: true, modelPath: 'https://vladmandic.github.io/human-models/models/movenet-multipose.json' },
|
||||
};
|
||||
|
||||
async function main() {
|
||||
log.header();
|
||||
|
||||
globalThis.Canvas = canvas.Canvas; // patch global namespace with canvas library
|
||||
globalThis.ImageData = canvas.ImageData; // patch global namespace with canvas library
|
||||
|
||||
const human = new Human.Human(config); // create instance of human
|
||||
log.info('Human:', human.version, 'TF:', tf.version_core);
|
||||
const configErrors = await human.validate();
|
||||
if (configErrors.length > 0) log.error('Configuration errors:', configErrors);
|
||||
await human.load(); // pre-load models
|
||||
log.info('Loaded models:', Object.keys(human.models).filter((a) => human.models[a]));
|
||||
|
||||
const inDir = process.argv[2];
|
||||
const outDir = process.argv[3];
|
||||
if (process.argv.length !== 4) {
|
||||
log.error('Parameters: <input-directory> <output-directory> missing');
|
||||
return;
|
||||
}
|
||||
if (!fs.existsSync(inDir) || !fs.statSync(inDir).isDirectory() || !fs.existsSync(outDir) || !fs.statSync(outDir).isDirectory()) {
|
||||
log.error('Invalid directory specified:', 'input:', fs.existsSync(inDir) ?? fs.statSync(inDir).isDirectory(), 'output:', fs.existsSync(outDir) ?? fs.statSync(outDir).isDirectory());
|
||||
return;
|
||||
}
|
||||
|
||||
const dir = fs.readdirSync(inDir);
|
||||
const images = dir.filter((f) => fs.statSync(path.join(inDir, f)).isFile() && (f.toLocaleLowerCase().endsWith('.jpg') || f.toLocaleLowerCase().endsWith('.jpeg')));
|
||||
log.info(`Processing folder: ${inDir} entries:`, dir.length, 'images', images.length);
|
||||
for (const image of images) {
|
||||
const inFile = path.join(inDir, image);
|
||||
const buffer = fs.readFileSync(inFile);
|
||||
const tensor = human.tf.tidy(() => {
|
||||
const decode = human.tf.node.decodeImage(buffer, 3);
|
||||
const expand = human.tf.expandDims(decode, 0);
|
||||
const cast = human.tf.cast(expand, 'float32');
|
||||
return cast;
|
||||
});
|
||||
log.state('Loaded image:', inFile, tensor.shape);
|
||||
|
||||
const result = await human.detect(tensor);
|
||||
human.tf.dispose(tensor);
|
||||
log.data(`Detected: ${image}:`, 'Face:', result.face.length, 'Body:', result.body.length, 'Hand:', result.hand.length, 'Objects:', result.object.length, 'Gestures:', result.gesture.length);
|
||||
|
||||
const outputCanvas = new canvas.Canvas(tensor.shape[2], tensor.shape[1]); // create canvas
|
||||
const outputCtx = outputCanvas.getContext('2d');
|
||||
const inputImage = await canvas.loadImage(buffer); // load image using canvas library
|
||||
outputCtx.drawImage(inputImage, 0, 0); // draw input image onto canvas
|
||||
human.draw.all(outputCanvas, result); // use human build-in method to draw results as overlays on canvas
|
||||
const outFile = path.join(outDir, image);
|
||||
const outStream = fs.createWriteStream(outFile); // write canvas to new image file
|
||||
outStream.on('finish', () => log.state('Output image:', outFile, outputCanvas.width, outputCanvas.height));
|
||||
outStream.on('error', (err) => log.error('Output error:', outFile, err));
|
||||
const stream = outputCanvas.createJPEGStream({ quality: 0.5, progressive: true, chromaSubsampling: true });
|
||||
stream.pipe(outStream);
|
||||
}
|
||||
}
|
||||
|
||||
main();
|
|
@ -0,0 +1,36 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta http-equiv="content-type" content="text/html; charset=utf-8">
|
||||
<title>Human: Offline</title>
|
||||
<meta name="viewport" content="width=device-width, shrink-to-fit=yes">
|
||||
<meta name="mobile-web-app-capable" content="yes">
|
||||
<meta name="application-name" content="Human">
|
||||
<meta name="keywords" content="Human">
|
||||
<meta name="description" content="Human; Author: Vladimir Mandic <mandic00@live.com>">
|
||||
<meta name="msapplication-tooltip" content="Human; Author: Vladimir Mandic <mandic00@live.com>">
|
||||
<meta name="theme-color" content="#000000">
|
||||
<link rel="manifest" href="manifest.webmanifest">
|
||||
<link rel="shortcut icon" href="/favicon.ico" type="image/x-icon">
|
||||
<link rel="icon" sizes="256x256" href="../assets/icon.png">
|
||||
<link rel="apple-touch-icon" href="../assets/icon.png">
|
||||
<link rel="apple-touch-startup-image" href="../assets/icon.png">
|
||||
<style>
|
||||
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../assets/lato-light.woff2') }
|
||||
body { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; background: black; color: #ebebeb; }
|
||||
h1 { font-size: 2rem; margin-top: 1.2rem; font-weight: bold; }
|
||||
a { color: white; }
|
||||
a:link { color: lightblue; text-decoration: none; }
|
||||
a:hover { color: lightskyblue; text-decoration: none; }
|
||||
.row { width: 90vw; margin: auto; margin-top: 100px; text-align: center; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="row text-center">
|
||||
<h1>
|
||||
<a href="/">Human: Offline</a><br>
|
||||
<img alt="icon" src="../assets/icon.png">
|
||||
</h1>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
|
@ -0,0 +1,5 @@
|
|||
# Human Demo in TypeScript for Browsers
|
||||
|
||||
Simple demo app that can be used as a quick-start guide for use of `Human` in browser environments
|
||||
|
||||
- `index.ts` is compiled to `index.js` which is loaded from `index.html`
|
|
@ -0,0 +1,30 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>Human</title>
|
||||
<meta name="viewport" content="width=device-width" id="viewport">
|
||||
<meta name="keywords" content="Human">
|
||||
<meta name="application-name" content="Human">
|
||||
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||
<meta name="theme-color" content="#000000">
|
||||
<link rel="manifest" href="../manifest.webmanifest">
|
||||
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
|
||||
<link rel="apple-touch-icon" href="../../assets/icon.png">
|
||||
<script src="./index.js" type="module"></script>
|
||||
<style>
|
||||
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
|
||||
html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
|
||||
body { margin: 0; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
|
||||
body::-webkit-scrollbar { display: none; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<canvas id="canvas" style="margin: 0 auto; width: 100%"></canvas>
|
||||
<video id="video" playsinline style="display: none"></video>
|
||||
<pre id="status" style="position: absolute; top: 12px; right: 20px; background-color: grey; padding: 8px; box-shadow: 2px 2px black"></pre>
|
||||
<pre id="log" style="padding: 8px"></pre>
|
||||
<div id="performance" style="position: absolute; bottom: 0; width: 100%; padding: 8px; font-size: 0.8rem;"></div>
|
||||
</body>
|
||||
</html>
|
|
@ -0,0 +1,9 @@
|
|||
/*
|
||||
Human
|
||||
homepage: <https://github.com/vladmandic/human>
|
||||
author: <https://github.com/vladmandic>'
|
||||
*/
|
||||
|
||||
import*as c from"../../dist/human.esm.js";var w={async:!1,modelBasePath:"../../models",filter:{enabled:!0,equalization:!1,flip:!1},face:{enabled:!0,detector:{rotation:!1},mesh:{enabled:!0},attention:{enabled:!1},iris:{enabled:!0},description:{enabled:!0},emotion:{enabled:!0}},body:{enabled:!0},hand:{enabled:!0},object:{enabled:!1},gesture:{enabled:!0}},e=new c.Human(w);e.env.perfadd=!1;e.draw.options.font='small-caps 18px "Lato"';e.draw.options.lineHeight=20;var t={video:document.getElementById("video"),canvas:document.getElementById("canvas"),log:document.getElementById("log"),fps:document.getElementById("status"),perf:document.getElementById("performance")},n={detect:0,draw:0,tensors:0,start:0},o={detectFPS:0,drawFPS:0,frames:0,averageMs:0},i=(...a)=>{t.log.innerText+=a.join(" ")+`
|
||||
`,console.log(...a)},r=a=>t.fps.innerText=a,b=a=>t.perf.innerText="tensors:"+e.tf.memory().numTensors.toString()+" | performance: "+JSON.stringify(a).replace(/"|{|}/g,"").replace(/,/g," | ");async function h(){r("starting webcam...");let a={audio:!1,video:{facingMode:"user",resizeMode:"none",width:{ideal:document.body.clientWidth},height:{ideal:document.body.clientHeight}}},d=await navigator.mediaDevices.getUserMedia(a),f=new Promise(p=>{t.video.onloadeddata=()=>p(!0)});t.video.srcObject=d,t.video.play(),await f,t.canvas.width=t.video.videoWidth,t.canvas.height=t.video.videoHeight;let s=d.getVideoTracks()[0],v=s.getCapabilities?s.getCapabilities():"",g=s.getSettings?s.getSettings():"",u=s.getConstraints?s.getConstraints():"";i("video:",t.video.videoWidth,t.video.videoHeight,s.label,{stream:d,track:s,settings:g,constraints:u,capabilities:v}),t.canvas.onclick=()=>{t.video.paused?t.video.play():t.video.pause()}}async function l(){if(!t.video.paused){n.start===0&&(n.start=e.now()),await e.detect(t.video);let a=e.tf.memory().numTensors;a-n.tensors!==0&&i("allocated tensors:",a-n.tensors),n.tensors=a,o.detectFPS=Math.round(1e3*1e3/(e.now()-n.detect))/1e3,o.frames++,o.averageMs=Math.round(1e3*(e.now()-n.start)/o.frames)/1e3,o.frames%100===0&&!t.video.paused&&i("performance",{...o,tensors:n.tensors})}n.detect=e.now(),requestAnimationFrame(l)}async function m(){if(!t.video.paused){let d=e.next(e.result);e.config.filter.flip?e.draw.canvas(d.canvas,t.canvas):e.draw.canvas(t.video,t.canvas),await e.draw.all(t.canvas,d),b(d.performance)}let a=e.now();o.drawFPS=Math.round(1e3*1e3/(a-n.draw))/1e3,n.draw=a,r(t.video.paused?"paused":`fps: ${o.detectFPS.toFixed(1).padStart(5," ")} detect | ${o.drawFPS.toFixed(1).padStart(5," ")} draw`),setTimeout(m,30)}async function M(){i("human version:",e.version,"| tfjs version:",e.tf.version["tfjs-core"]),i("platform:",e.env.platform,"| agent:",e.env.agent),r("loading..."),await e.load(),i("backend:",e.tf.getBackend(),"| available:",e.env.backends),i("models stats:",e.getModelStats()),i("models loaded:",Object.values(e.models).filter(a=>a!==null).length),r("initializing..."),await e.warmup(),await h(),await l(),await m()}window.onload=M;
|
||||
//# sourceMappingURL=index.js.map
|
|
@ -0,0 +1,119 @@
|
|||
/**
|
||||
* Human demo for browsers
|
||||
* @default Human Library
|
||||
* @summary <https://github.com/vladmandic/human>
|
||||
* @author <https://github.com/vladmandic>
|
||||
* @copyright <https://github.com/vladmandic>
|
||||
* @license MIT
|
||||
*/
|
||||
|
||||
import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
|
||||
|
||||
const humanConfig: Partial<H.Config> = { // user configuration for human, used to fine-tune behavior
|
||||
// backend: 'wasm' as const,
|
||||
// wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.20.0/dist/',
|
||||
// cacheSensitivity: 0,
|
||||
async: false,
|
||||
modelBasePath: '../../models',
|
||||
filter: { enabled: true, equalization: false, flip: false },
|
||||
face: { enabled: true, detector: { rotation: false }, mesh: { enabled: true }, attention: { enabled: false }, iris: { enabled: true }, description: { enabled: true }, emotion: { enabled: true } },
|
||||
body: { enabled: true },
|
||||
hand: { enabled: true },
|
||||
object: { enabled: false },
|
||||
gesture: { enabled: true },
|
||||
};
|
||||
|
||||
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
|
||||
|
||||
human.env.perfadd = false; // is performance data showing instant or total values
|
||||
human.draw.options.font = 'small-caps 18px "Lato"'; // set font used to draw labels when using draw methods
|
||||
human.draw.options.lineHeight = 20;
|
||||
// human.draw.options.fillPolygons = true;
|
||||
|
||||
const dom = { // grab instances of dom objects so we dont have to look them up later
|
||||
video: document.getElementById('video') as HTMLVideoElement,
|
||||
canvas: document.getElementById('canvas') as HTMLCanvasElement,
|
||||
log: document.getElementById('log') as HTMLPreElement,
|
||||
fps: document.getElementById('status') as HTMLPreElement,
|
||||
perf: document.getElementById('performance') as HTMLDivElement,
|
||||
};
|
||||
const timestamp = { detect: 0, draw: 0, tensors: 0, start: 0 }; // holds information used to calculate performance and possible memory leaks
|
||||
const fps = { detectFPS: 0, drawFPS: 0, frames: 0, averageMs: 0 }; // holds calculated fps information for both detect and screen refresh
|
||||
|
||||
const log = (...msg) => { // helper method to output messages
|
||||
dom.log.innerText += msg.join(' ') + '\n';
|
||||
console.log(...msg); // eslint-disable-line no-console
|
||||
};
|
||||
const status = (msg) => dom.fps.innerText = msg; // print status element
|
||||
const perf = (msg) => dom.perf.innerText = 'tensors:' + (human.tf.memory().numTensors as number).toString() + ' | performance: ' + JSON.stringify(msg).replace(/"|{|}/g, '').replace(/,/g, ' | '); // print performance element
|
||||
|
||||
async function webCam() { // initialize webcam
|
||||
status('starting webcam...');
|
||||
// @ts-ignore resizeMode is not yet defined in tslib
|
||||
const options: MediaStreamConstraints = { audio: false, video: { facingMode: 'user', resizeMode: 'none', width: { ideal: document.body.clientWidth }, height: { ideal: document.body.clientHeight } } };
|
||||
const stream: MediaStream = await navigator.mediaDevices.getUserMedia(options);
|
||||
const ready = new Promise((resolve) => { dom.video.onloadeddata = () => resolve(true); });
|
||||
dom.video.srcObject = stream;
|
||||
void dom.video.play();
|
||||
await ready;
|
||||
dom.canvas.width = dom.video.videoWidth;
|
||||
dom.canvas.height = dom.video.videoHeight;
|
||||
const track: MediaStreamTrack = stream.getVideoTracks()[0];
|
||||
const capabilities: MediaTrackCapabilities | string = track.getCapabilities ? track.getCapabilities() : '';
|
||||
const settings: MediaTrackSettings | string = track.getSettings ? track.getSettings() : '';
|
||||
const constraints: MediaTrackConstraints | string = track.getConstraints ? track.getConstraints() : '';
|
||||
log('video:', dom.video.videoWidth, dom.video.videoHeight, track.label, { stream, track, settings, constraints, capabilities });
|
||||
dom.canvas.onclick = () => { // pause when clicked on screen and resume on next click
|
||||
if (dom.video.paused) void dom.video.play();
|
||||
else dom.video.pause();
|
||||
};
|
||||
}
|
||||
|
||||
async function detectionLoop() { // main detection loop
|
||||
if (!dom.video.paused) {
|
||||
if (timestamp.start === 0) timestamp.start = human.now();
|
||||
// log('profiling data:', await human.profile(dom.video));
|
||||
await human.detect(dom.video); // actual detection; were not capturing output in a local variable as it can also be reached via human.result
|
||||
const tensors = human.tf.memory().numTensors; // check current tensor usage for memory leaks
|
||||
if (tensors - timestamp.tensors !== 0) log('allocated tensors:', tensors - timestamp.tensors); // printed on start and each time there is a tensor leak
|
||||
timestamp.tensors = tensors;
|
||||
fps.detectFPS = Math.round(1000 * 1000 / (human.now() - timestamp.detect)) / 1000;
|
||||
fps.frames++;
|
||||
fps.averageMs = Math.round(1000 * (human.now() - timestamp.start) / fps.frames) / 1000;
|
||||
if (fps.frames % 100 === 0 && !dom.video.paused) log('performance', { ...fps, tensors: timestamp.tensors });
|
||||
}
|
||||
timestamp.detect = human.now();
|
||||
requestAnimationFrame(detectionLoop); // start new frame immediately
|
||||
}
|
||||
|
||||
async function drawLoop() { // main screen refresh loop
|
||||
if (!dom.video.paused) {
|
||||
const interpolated = human.next(human.result); // smoothen result using last-known results
|
||||
if (human.config.filter.flip) human.draw.canvas(interpolated.canvas as HTMLCanvasElement, dom.canvas); // draw processed image to screen canvas
|
||||
else human.draw.canvas(dom.video, dom.canvas); // draw original video to screen canvas // better than using procesed image as this loop happens faster than processing loop
|
||||
await human.draw.all(dom.canvas, interpolated); // draw labels, boxes, lines, etc.
|
||||
perf(interpolated.performance); // write performance data
|
||||
}
|
||||
const now = human.now();
|
||||
fps.drawFPS = Math.round(1000 * 1000 / (now - timestamp.draw)) / 1000;
|
||||
timestamp.draw = now;
|
||||
status(dom.video.paused ? 'paused' : `fps: ${fps.detectFPS.toFixed(1).padStart(5, ' ')} detect | ${fps.drawFPS.toFixed(1).padStart(5, ' ')} draw`); // write status
|
||||
setTimeout(drawLoop, 30); // use to slow down refresh from max refresh rate to target of 30 fps
|
||||
}
|
||||
|
||||
async function main() { // main entry point
|
||||
log('human version:', human.version, '| tfjs version:', human.tf.version['tfjs-core']);
|
||||
log('platform:', human.env.platform, '| agent:', human.env.agent);
|
||||
status('loading...');
|
||||
await human.load(); // preload all models
|
||||
log('backend:', human.tf.getBackend(), '| available:', human.env.backends);
|
||||
log('models stats:', human.getModelStats());
|
||||
log('models loaded:', Object.values(human.models).filter((model) => model !== null).length);
|
||||
status('initializing...');
|
||||
await human.warmup(); // warmup function to initialize backend for future faster detection
|
||||
await webCam(); // start webcam
|
||||
await detectionLoop(); // start detection loop
|
||||
await drawLoop(); // start draw loop
|
||||
}
|
||||
|
||||
window.onload = main;
|
|
@ -0,0 +1,7 @@
|
|||
/*
|
||||
Human
|
||||
homepage: <https://github.com/vladmandic/human>
|
||||
author: <https://github.com/vladmandic>'
|
||||
*/
|
||||
|
||||
var e="3.20.0";var s="3.20.0";var t="3.20.0";var i="3.20.0";var n="3.20.0";var r="3.20.0";var l="3.20.0";var V={tfjs:e,"tfjs-core":s,"tfjs-data":t,"tfjs-layers":i,"tfjs-converter":n,"tfjs-backend-webgl":r,"tfjs-backend-wasm":l};export{V as version};
|
|
@ -5,7 +5,7 @@ After=network.target network-online.target
|
|||
[Service]
|
||||
Type=simple
|
||||
Environment="NODE_ENV=production"
|
||||
ExecStart=<path-to-node> server/serve.js
|
||||
ExecStart=<path-to-node> <your-project-folder>/node_modules/@vladmandic/build/src/build.js --profile serve
|
||||
WorkingDirectory=<your-project-folder>
|
||||
StandardOutput=inherit
|
||||
StandardError=inherit
|
||||
|
|
305
models/README.md
|
@ -3,3 +3,308 @@
|
|||
For details see Wiki:
|
||||
|
||||
- [**List of Models & Credits**](https://github.com/vladmandic/human/wiki/Models)
|
||||
|
||||
## Model signatures:
|
||||
|
||||
```js
|
||||
INFO: graph model: /home/vlado/dev/human/models/iris.json
|
||||
INFO: created on: 2020-10-12T18:46:47.060Z
|
||||
INFO: metadata: { generatedBy: 'https://github.com/google/mediapipe', convertedBy: 'https://github.com/vladmandic', version: undefined }
|
||||
INFO: model inputs based on signature
|
||||
{ name: 'input_1:0', dtype: 'DT_FLOAT', shape: [ -1, 64, 64, 3 ] }
|
||||
INFO: model outputs based on signature
|
||||
{ id: 0, name: 'Identity:0', dytpe: 'DT_FLOAT', shape: [ -1, 1, 1, 228 ] }
|
||||
INFO: tensors: 191
|
||||
DATA: weights: {
|
||||
files: [ 'iris.bin' ],
|
||||
size: { disk: 2599092, memory: 2599092 },
|
||||
count: { total: 191, float32: 189, int32: 2 },
|
||||
quantized: { none: 191 },
|
||||
values: { total: 649773, float32: 649764, int32: 9 }
|
||||
}
|
||||
DATA: kernel ops: {
|
||||
graph: [ 'Const', 'Placeholder', 'Identity' ],
|
||||
convolution: [ '_FusedConv2D', 'DepthwiseConv2dNative', 'MaxPool' ],
|
||||
arithmetic: [ 'AddV2' ],
|
||||
basic_math: [ 'Prelu' ],
|
||||
transformation: [ 'Pad' ],
|
||||
slice_join: [ 'ConcatV2' ]
|
||||
}
|
||||
INFO: graph model: /home/vlado/dev/human/models/facemesh.json
|
||||
INFO: created on: 2020-10-12T18:46:46.944Z
|
||||
INFO: metadata: { generatedBy: 'https://github.com/google/mediapipe', convertedBy: 'https://github.com/vladmandic', version: undefined }
|
||||
INFO: model inputs based on signature
|
||||
{ name: 'input_1:0', dtype: 'DT_FLOAT', shape: [ 1, 192, 192, 3 ] }
|
||||
INFO: model outputs based on signature
|
||||
{ id: 0, name: 'Identity_1:0', dytpe: 'DT_FLOAT', shape: [ 1, 266 ] }
|
||||
{ id: 1, name: 'Identity_2:0', dytpe: 'DT_FLOAT', shape: [ 1, 1 ] }
|
||||
{ id: 2, name: 'Identity:0', dytpe: 'DT_FLOAT', shape: [ 1, 1404 ] }
|
||||
INFO: tensors: 118
|
||||
DATA: weights: {
|
||||
files: [ 'facemesh.bin' ],
|
||||
size: { disk: 2955780, memory: 2955780 },
|
||||
count: { total: 118, float32: 114, int32: 4 },
|
||||
quantized: { none: 118 },
|
||||
values: { total: 738945, float32: 738919, int32: 26 }
|
||||
}
|
||||
DATA: kernel ops: {
|
||||
graph: [ 'Placeholder', 'Const', 'NoOp', 'Identity' ],
|
||||
convolution: [ '_FusedConv2D', 'DepthwiseConv2dNative', 'MaxPool' ],
|
||||
arithmetic: [ 'AddV2' ],
|
||||
basic_math: [ 'Prelu', 'Sigmoid' ],
|
||||
transformation: [ 'Pad', 'Reshape' ]
|
||||
}
|
||||
INFO: graph model: /home/vlado/dev/human/models/emotion.json
|
||||
INFO: created on: 2020-11-05T20:11:29.740Z
|
||||
INFO: metadata: { generatedBy: 'https://github.com/oarriaga/face_classification', convertedBy: 'https://github.com/vladmandic', version: undefined }
|
||||
INFO: model inputs based on signature
|
||||
{ name: 'input_1:0', dtype: 'DT_FLOAT', shape: [ -1, 64, 64, 1 ] }
|
||||
INFO: model outputs based on signature
|
||||
{ id: 0, name: 'Identity:0', dytpe: 'DT_FLOAT', shape: [ -1, 7 ] }
|
||||
INFO: tensors: 23
|
||||
DATA: weights: {
|
||||
files: [ 'emotion.bin' ],
|
||||
size: { disk: 820516, memory: 820516 },
|
||||
count: { total: 23, float32: 22, int32: 1 },
|
||||
quantized: { none: 23 },
|
||||
values: { total: 205129, float32: 205127, int32: 2 }
|
||||
}
|
||||
DATA: kernel ops: {
|
||||
graph: [ 'Const', 'Placeholder', 'Identity' ],
|
||||
convolution: [ '_FusedConv2D', 'DepthwiseConv2dNative', 'MaxPool' ],
|
||||
arithmetic: [ 'AddV2' ],
|
||||
basic_math: [ 'Relu' ],
|
||||
reduction: [ 'Mean' ],
|
||||
normalization: [ 'Softmax' ]
|
||||
}
|
||||
INFO: graph model: /home/vlado/dev/human/models/faceres.json
|
||||
INFO: created on: 2021-03-21T14:12:59.863Z
|
||||
INFO: metadata: { generatedBy: 'https://github.com/HSE-asavchenko/HSE_FaceRec_tf', convertedBy: 'https://github.com/vladmandic', version: undefined }
|
||||
INFO: model inputs based on signature
|
||||
{ name: 'input_1', dtype: 'DT_FLOAT', shape: [ -1, 224, 224, 3 ] }
|
||||
INFO: model outputs based on signature
|
||||
{ id: 0, name: 'gender_pred/Sigmoid:0', dytpe: 'DT_FLOAT', shape: [ 1, 1 ] }
|
||||
{ id: 1, name: 'global_pooling/Mean', dytpe: 'DT_FLOAT', shape: [ 1, 1024 ] }
|
||||
{ id: 2, name: 'age_pred/Softmax:0', dytpe: 'DT_FLOAT', shape: [ 1, 100 ] }
|
||||
INFO: tensors: 128
|
||||
DATA: weights: {
|
||||
files: [ 'faceres.bin' ],
|
||||
size: { disk: 6978814, memory: 13957620 },
|
||||
count: { total: 128, float32: 127, int32: 1 },
|
||||
quantized: { float16: 127, none: 1 },
|
||||
values: { total: 3489405, float32: 3489403, int32: 2 }
|
||||
}
|
||||
DATA: kernel ops: {
|
||||
graph: [ 'Const', 'Placeholder' ],
|
||||
convolution: [ 'Conv2D', 'DepthwiseConv2dNative' ],
|
||||
arithmetic: [ 'Add', 'Minimum', 'Maximum', 'Mul' ],
|
||||
basic_math: [ 'Relu', 'Sigmoid' ],
|
||||
reduction: [ 'Mean' ],
|
||||
matrices: [ '_FusedMatMul' ],
|
||||
normalization: [ 'Softmax' ]
|
||||
}
|
||||
INFO: graph model: /home/vlado/dev/human/models/blazeface.json
|
||||
INFO: created on: 2020-10-15T19:57:26.419Z
|
||||
INFO: metadata: { generatedBy: 'https://github.com/google/mediapipe', convertedBy: 'https://github.com/vladmandic', version: undefined }
|
||||
INFO: model inputs based on signature
|
||||
{ name: 'input:0', dtype: 'DT_FLOAT', shape: [ 1, 256, 256, 3 ] }
|
||||
INFO: model outputs based on signature
|
||||
{ id: 0, name: 'Identity_3:0', dytpe: 'DT_FLOAT', shape: [ 1, 384, 16 ] }
|
||||
{ id: 1, name: 'Identity:0', dytpe: 'DT_FLOAT', shape: [ 1, 512, 1 ] }
|
||||
{ id: 2, name: 'Identity_1:0', dytpe: 'DT_FLOAT', shape: [ 1, 384, 1 ] }
|
||||
{ id: 3, name: 'Identity_2:0', dytpe: 'DT_FLOAT', shape: [ 1, 512, 16 ] }
|
||||
INFO: tensors: 112
|
||||
DATA: weights: {
|
||||
files: [ 'blazeface.bin' ],
|
||||
size: { disk: 538928, memory: 538928 },
|
||||
count: { total: 112, float32: 106, int32: 6 },
|
||||
quantized: { none: 112 },
|
||||
values: { total: 134732, float32: 134704, int32: 28 }
|
||||
}
|
||||
DATA: kernel ops: {
|
||||
graph: [ 'Const', 'Placeholder', 'Identity' ],
|
||||
convolution: [ '_FusedConv2D', 'DepthwiseConv2dNative', 'MaxPool' ],
|
||||
arithmetic: [ 'AddV2' ],
|
||||
basic_math: [ 'Relu' ],
|
||||
transformation: [ 'Pad', 'Reshape' ]
|
||||
}
|
||||
INFO: graph model: /home/vlado/dev/human/models/mb3-centernet.json
|
||||
INFO: created on: 2021-05-19T11:50:13.013Z
|
||||
INFO: metadata: { generatedBy: 'https://github.com/610265158/mobilenetv3_centernet', convertedBy: 'https://github.com/vladmandic', version: undefined }
|
||||
INFO: model inputs based on signature
|
||||
{ name: 'tower_0/images', dtype: 'DT_FLOAT', shape: [ 1, 512, 512, 3 ] }
|
||||
INFO: model outputs based on signature
|
||||
{ id: 0, name: 'tower_0/wh', dytpe: 'DT_FLOAT', shape: [ 1, 128, 128, 4 ] }
|
||||
{ id: 1, name: 'tower_0/keypoints', dytpe: 'DT_FLOAT', shape: [ 1, 128, 128, 80 ] }
|
||||
{ id: 2, name: 'tower_0/detections', dytpe: 'DT_FLOAT', shape: [ 1, 100, 6 ] }
|
||||
INFO: tensors: 267
|
||||
DATA: weights: {
|
||||
files: [ 'mb3-centernet.bin' ],
|
||||
size: { disk: 4030290, memory: 8060260 },
|
||||
count: { total: 267, float32: 227, int32: 40 },
|
||||
quantized: { float16: 227, none: 40 },
|
||||
values: { total: 2015065, float32: 2014985, int32: 80 }
|
||||
}
|
||||
DATA: kernel ops: {
|
||||
graph: [ 'Const', 'Placeholder', 'Identity' ],
|
||||
convolution: [ '_FusedConv2D', 'FusedDepthwiseConv2dNative', 'DepthwiseConv2dNative', 'Conv2D', 'MaxPool' ],
|
||||
arithmetic: [ 'Mul', 'Add', 'FloorDiv', 'FloorMod', 'Sub' ],
|
||||
basic_math: [ 'Relu6', 'Relu', 'Sigmoid' ],
|
||||
reduction: [ 'Mean' ],
|
||||
image: [ 'ResizeBilinear' ],
|
||||
slice_join: [ 'ConcatV2', 'GatherV2', 'StridedSlice' ],
|
||||
transformation: [ 'Reshape', 'Cast', 'ExpandDims' ],
|
||||
logical: [ 'Equal' ],
|
||||
evaluation: [ 'TopKV2' ]
|
||||
}
|
||||
INFO: graph model: /home/vlado/dev/human/models/movenet-lightning.json
|
||||
INFO: created on: 2021-05-29T12:26:32.994Z
|
||||
INFO: metadata: { generatedBy: 'https://tfhub.dev/google/movenet/singlepose/lightning/4', convertedBy: 'https://github.com/vladmandic', version: undefined }
|
||||
INFO: model inputs based on signature
|
||||
{ name: 'input:0', dtype: 'DT_INT32', shape: [ 1, 192, 192, 3 ] }
|
||||
INFO: model outputs based on signature
|
||||
{ id: 0, name: 'Identity:0', dytpe: 'DT_FLOAT', shape: [ 1, 1, 17, 3 ] }
|
||||
INFO: tensors: 180
|
||||
DATA: weights: {
|
||||
files: [ 'movenet-lightning.bin' ],
|
||||
size: { disk: 4650216, memory: 9300008 },
|
||||
count: { total: 180, int32: 31, float32: 149 },
|
||||
quantized: { none: 31, float16: 149 },
|
||||
values: { total: 2325002, int32: 106, float32: 2324896 }
|
||||
}
|
||||
DATA: kernel ops: {
|
||||
graph: [ 'Const', 'Placeholder', 'Identity' ],
|
||||
transformation: [ 'Cast', 'ExpandDims', 'Squeeze', 'Reshape' ],
|
||||
slice_join: [ 'Unpack', 'Pack', 'GatherNd', 'ConcatV2' ],
|
||||
arithmetic: [ 'Sub', 'Mul', 'AddV2', 'FloorDiv', 'SquaredDifference', 'RealDiv' ],
|
||||
convolution: [ '_FusedConv2D', 'FusedDepthwiseConv2dNative', 'DepthwiseConv2dNative' ],
|
||||
image: [ 'ResizeBilinear' ],
|
||||
basic_math: [ 'Sigmoid', 'Sqrt' ],
|
||||
reduction: [ 'ArgMax' ]
|
||||
}
|
||||
INFO: graph model: /home/vlado/dev/human/models/selfie.json
|
||||
INFO: created on: 2021-06-04T13:46:56.904Z
|
||||
INFO: metadata: { generatedBy: 'https://github.com/PINTO0309/PINTO_model_zoo/tree/main/109_Selfie_Segmentation', convertedBy: 'https://github.com/vladmandic', version: '561.undefined' }
|
||||
INFO: model inputs based on signature
|
||||
{ name: 'input_1:0', dtype: 'DT_FLOAT', shape: [ 1, 256, 256, 3 ] }
|
||||
INFO: model outputs based on signature
|
||||
{ id: 0, name: 'activation_10:0', dytpe: 'DT_FLOAT', shape: [ 1, 256, 256, 1 ] }
|
||||
INFO: tensors: 136
|
||||
DATA: weights: {
|
||||
files: [ 'selfie.bin' ],
|
||||
size: { disk: 212886, memory: 425732 },
|
||||
count: { total: 136, int32: 4, float32: 132 },
|
||||
quantized: { none: 4, float16: 132 },
|
||||
values: { total: 106433, int32: 10, float32: 106423 }
|
||||
}
|
||||
DATA: kernel ops: {
|
||||
graph: [ 'Const', 'Placeholder' ],
|
||||
convolution: [ 'Conv2D', 'DepthwiseConv2dNative', 'AvgPool', 'Conv2DBackpropInput' ],
|
||||
arithmetic: [ 'Add', 'Mul', 'AddV2', 'AddN' ],
|
||||
basic_math: [ 'Relu6', 'Relu', 'Sigmoid' ],
|
||||
image: [ 'ResizeBilinear' ]
|
||||
}
|
||||
INFO: graph model: /home/vlado/dev/human/models/handtrack.json
|
||||
INFO: created on: 2021-09-21T12:09:47.583Z
|
||||
INFO: metadata: { generatedBy: 'https://github.com/victordibia/handtracking', convertedBy: 'https://github.com/vladmandic', version: '561.undefined' }
|
||||
INFO: model inputs based on signature
|
||||
{ name: 'input_tensor:0', dtype: 'DT_UINT8', shape: [ 1, 320, 320, 3 ] }
|
||||
INFO: model outputs based on signature
|
||||
{ id: 0, name: 'Identity_2:0', dytpe: 'DT_FLOAT', shape: [ 1, 100 ] }
|
||||
{ id: 1, name: 'Identity_4:0', dytpe: 'DT_FLOAT', shape: [ 1, 100 ] }
|
||||
{ id: 2, name: 'Identity_6:0', dytpe: 'DT_FLOAT', shape: [ 1, 12804, 4 ] }
|
||||
{ id: 3, name: 'Identity_1:0', dytpe: 'DT_FLOAT', shape: [ 1, 100, 4 ] }
|
||||
{ id: 4, name: 'Identity_3:0', dytpe: 'DT_FLOAT', shape: [ 1, 100, 8 ] }
|
||||
{ id: 5, name: 'Identity_5:0', dytpe: 'DT_FLOAT', shape: [ 1 ] }
|
||||
{ id: 6, name: 'Identity:0', dytpe: 'DT_FLOAT', shape: [ 1, 100 ] }
|
||||
{ id: 7, name: 'Identity_7:0', dytpe: 'DT_FLOAT', shape: [ 1, 12804, 8 ] }
|
||||
INFO: tensors: 619
|
||||
DATA: weights: {
|
||||
files: [ 'handtrack.bin' ],
|
||||
size: { disk: 2964837, memory: 11846016 },
|
||||
count: { total: 619, int32: 347, float32: 272 },
|
||||
quantized: { none: 347, uint8: 272 },
|
||||
values: { total: 2961504, int32: 1111, float32: 2960393 }
|
||||
}
|
||||
DATA: kernel ops: {
|
||||
graph: [ 'Const', 'Placeholder', 'Identity', 'Shape', 'NoOp' ],
|
||||
control: [ 'TensorListReserve', 'Enter', 'TensorListFromTensor', 'Merge', 'LoopCond', 'Switch', 'Exit', 'TensorListStack', 'NextIteration', 'TensorListSetItem', 'TensorListGetItem' ],
|
||||
logical: [ 'Less', 'LogicalAnd', 'Select', 'Greater', 'GreaterEqual' ],
|
||||
convolution: [ '_FusedConv2D', 'FusedDepthwiseConv2dNative', 'DepthwiseConv2dNative' ],
|
||||
arithmetic: [ 'AddV2', 'Mul', 'Sub', 'Minimum', 'Maximum' ],
|
||||
transformation: [ 'Cast', 'ExpandDims', 'Squeeze', 'Reshape', 'Pad' ],
|
||||
slice_join: [ 'Unpack', 'StridedSlice', 'Pack', 'ConcatV2', 'Slice', 'GatherV2', 'Split' ],
|
||||
image: [ 'ResizeBilinear' ],
|
||||
basic_math: [ 'Reciprocal', 'Sigmoid', 'Exp' ],
|
||||
matrices: [ 'Transpose' ],
|
||||
dynamic: [ 'NonMaxSuppressionV5', 'Where' ],
|
||||
creation: [ 'Fill', 'Range' ],
|
||||
evaluation: [ 'TopKV2' ],
|
||||
reduction: [ 'Sum' ]
|
||||
}
|
||||
INFO: graph model: /home/vlado/dev/human/models/antispoof.json
|
||||
INFO: created on: 2021-10-13T14:20:27.100Z
|
||||
INFO: metadata: { generatedBy: 'https://www.kaggle.com/anku420/fake-face-detection', convertedBy: 'https://github.com/vladmandic', version: '716.undefined' }
|
||||
INFO: model inputs based on signature
|
||||
{ name: 'conv2d_input', dtype: 'DT_FLOAT', shape: [ -1, 128, 128, 3 ] }
|
||||
INFO: model outputs based on signature
|
||||
{ id: 0, name: 'activation_4', dytpe: 'DT_FLOAT', shape: [ -1, 1 ] }
|
||||
INFO: tensors: 11
|
||||
DATA: weights: {
|
||||
files: [ 'antispoof.bin' ],
|
||||
size: { disk: 853098, memory: 1706188 },
|
||||
count: { total: 11, float32: 10, int32: 1 },
|
||||
quantized: { float16: 10, none: 1 },
|
||||
values: { total: 426547, float32: 426545, int32: 2 }
|
||||
}
|
||||
DATA: kernel ops: { graph: [ 'Const', 'Placeholder', 'Identity' ], convolution: [ '_FusedConv2D', 'MaxPool' ], basic_math: [ 'Relu', 'Sigmoid' ], transformation: [ 'Reshape' ], matrices: [ '_FusedMatMul' ] }
|
||||
INFO: graph model: /home/vlado/dev/human/models/handlandmark-full.json
|
||||
INFO: created on: 2021-10-31T12:27:49.343Z
|
||||
INFO: metadata: { generatedBy: 'https://github.com/google/mediapipe', convertedBy: 'https://github.com/vladmandic', version: '808.undefined' }
|
||||
INFO: model inputs based on signature
|
||||
{ name: 'input_1', dtype: 'DT_FLOAT', shape: [ 1, 224, 224, 3 ] }
|
||||
INFO: model outputs based on signature
|
||||
{ id: 0, name: 'Identity_3:0', dytpe: 'DT_FLOAT', shape: [ 1, 63 ] }
|
||||
{ id: 1, name: 'Identity:0', dytpe: 'DT_FLOAT', shape: [ 1, 63 ] }
|
||||
{ id: 2, name: 'Identity_1:0', dytpe: 'DT_FLOAT', shape: [ 1, 1 ] }
|
||||
{ id: 3, name: 'Identity_2:0', dytpe: 'DT_FLOAT', shape: [ 1, 1 ] }
|
||||
INFO: tensors: 103
|
||||
DATA: weights: {
|
||||
files: [ 'handlandmark-full.bin' ],
|
||||
size: { disk: 5431368, memory: 10862728 },
|
||||
count: { total: 103, float32: 102, int32: 1 },
|
||||
quantized: { float16: 102, none: 1 },
|
||||
values: { total: 2715682, float32: 2715680, int32: 2 }
|
||||
}
|
||||
DATA: kernel ops: {
|
||||
graph: [ 'Const', 'Placeholder', 'Identity' ],
|
||||
convolution: [ 'Conv2D', 'DepthwiseConv2dNative' ],
|
||||
arithmetic: [ 'AddV2', 'AddN' ],
|
||||
basic_math: [ 'Relu6', 'Sigmoid' ],
|
||||
reduction: [ 'Mean' ],
|
||||
matrices: [ '_FusedMatMul' ]
|
||||
}
|
||||
INFO: graph model: /home/vlado/dev/human/models/liveness.json
|
||||
INFO: created on: 2021-11-09T12:39:11.760Z
|
||||
INFO: metadata: { generatedBy: 'https://github.com/leokwu/livenessnet', convertedBy: 'https://github.com/vladmandic', version: '808.undefined' }
|
||||
INFO: model inputs based on signature
|
||||
{ name: 'conv2d_1_input', dtype: 'DT_FLOAT', shape: [ -1, 32, 32, 3 ] }
|
||||
INFO: model outputs based on signature
|
||||
{ id: 0, name: 'activation_6', dytpe: 'DT_FLOAT', shape: [ -1, 2 ] }
|
||||
INFO: tensors: 23
|
||||
DATA: weights: {
|
||||
files: [ 'liveness.bin' ],
|
||||
size: { disk: 592976, memory: 592976 },
|
||||
count: { total: 23, float32: 22, int32: 1 },
|
||||
quantized: { none: 23 },
|
||||
values: { total: 148244, float32: 148242, int32: 2 }
|
||||
}
|
||||
DATA: kernel ops: {
|
||||
graph: [ 'Const', 'Placeholder', 'Identity' ],
|
||||
convolution: [ '_FusedConv2D', 'MaxPool' ],
|
||||
arithmetic: [ 'Mul', 'Add', 'AddV2' ],
|
||||
transformation: [ 'Reshape' ],
|
||||
matrices: [ '_FusedMatMul' ],
|
||||
normalization: [ 'Softmax' ]
|
||||
}
|
||||
```
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
{
|
||||
"format": "graph-model",
|
||||
"generatedBy": "https://www.kaggle.com/anku420/fake-face-detection",
|
||||
"convertedBy": "https://github.com/vladmandic",
|
||||
"signature":
|
||||
{
|
||||
"inputs":
|
||||
{
|
||||
"conv2d_input": {"name":"conv2d_input:0","dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"-1"},{"size":"128"},{"size":"128"},{"size":"3"}]}}
|
||||
},
|
||||
"outputs":
|
||||
{
|
||||
"activation_4": {"name":"Identity:0","dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"-1"},{"size":"1"}]}}
|
||||
}
|
||||
},
|
||||
"modelTopology":
|
||||
{
|
||||
"node":
|
||||
[
|
||||
{"name":"unknown","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"3"},{"size":"3"},{"size":"3"},{"size":"64"}]}}},"dtype":{"type":"DT_FLOAT"}}},
|
||||
{"name":"unknown_0","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"64"}]}}},"dtype":{"type":"DT_FLOAT"}}},
|
||||
{"name":"unknown_1","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"3"},{"size":"3"},{"size":"64"},{"size":"32"}]}}},"dtype":{"type":"DT_FLOAT"}}},
|
||||
{"name":"unknown_2","op":"Const","attr":{"dtype":{"type":"DT_FLOAT"},"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"32"}]}}}}},
|
||||
{"name":"unknown_3","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"3"},{"size":"3"},{"size":"32"},{"size":"16"}]}}},"dtype":{"type":"DT_FLOAT"}}},
|
||||
{"name":"unknown_4","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"16"}]}}},"dtype":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential/flatten/Const","op":"Const","attr":{"dtype":{"type":"DT_INT32"},"value":{"tensor":{"dtype":"DT_INT32","tensorShape":{"dim":[{"size":"2"}]}}}}},
|
||||
{"name":"unknown_5","op":"Const","attr":{"dtype":{"type":"DT_FLOAT"},"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"3136"},{"size":"128"}]}}}}},
|
||||
{"name":"unknown_6","op":"Const","attr":{"dtype":{"type":"DT_FLOAT"},"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"128"}]}}}}},
|
||||
{"name":"unknown_7","op":"Const","attr":{"dtype":{"type":"DT_FLOAT"},"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"128"},{"size":"1"}]}}}}},
|
||||
{"name":"unknown_8","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"1"}]}}},"dtype":{"type":"DT_FLOAT"}}},
|
||||
{"name":"conv2d_input","op":"Placeholder","attr":{"dtype":{"type":"DT_FLOAT"},"shape":{"shape":{"dim":[{"size":"-1"},{"size":"128"},{"size":"128"},{"size":"3"}]}}}},
|
||||
{"name":"StatefulPartitionedCall/sequential/conv2d/BiasAdd","op":"_FusedConv2D","input":["conv2d_input","unknown","unknown_0"],"device":"/device:CPU:0","attr":{"padding":{"s":"VkFMSUQ="},"num_args":{"i":"1"},"explicit_paddings":{"list":{}},"use_cudnn_on_gpu":{"b":true},"strides":{"list":{"i":["1","1","1","1"]}},"T":{"type":"DT_FLOAT"},"data_format":{"s":"TkhXQw=="},"fused_ops":{"list":{"s":["Qmlhc0FkZA=="]}},"epsilon":{"f":0},"dilations":{"list":{"i":["1","1","1","1"]}}}},
|
||||
{"name":"StatefulPartitionedCall/sequential/max_pooling2d/MaxPool","op":"MaxPool","input":["StatefulPartitionedCall/sequential/conv2d/BiasAdd"],"attr":{"ksize":{"list":{"i":["1","2","2","1"]}},"T":{"type":"DT_FLOAT"},"data_format":{"s":"TkhXQw=="},"strides":{"list":{"i":["1","2","2","1"]}},"padding":{"s":"VkFMSUQ="},"explicit_paddings":{"list":{}}}},
|
||||
{"name":"StatefulPartitionedCall/sequential/activation/Relu","op":"Relu","input":["StatefulPartitionedCall/sequential/max_pooling2d/MaxPool"],"attr":{"T":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential/conv2d_1/BiasAdd","op":"_FusedConv2D","input":["StatefulPartitionedCall/sequential/activation/Relu","unknown_1","unknown_2"],"device":"/device:CPU:0","attr":{"epsilon":{"f":0},"strides":{"list":{"i":["1","1","1","1"]}},"fused_ops":{"list":{"s":["Qmlhc0FkZA=="]}},"T":{"type":"DT_FLOAT"},"dilations":{"list":{"i":["1","1","1","1"]}},"num_args":{"i":"1"},"use_cudnn_on_gpu":{"b":true},"padding":{"s":"VkFMSUQ="},"data_format":{"s":"TkhXQw=="},"explicit_paddings":{"list":{}}}},
|
||||
{"name":"StatefulPartitionedCall/sequential/max_pooling2d_1/MaxPool","op":"MaxPool","input":["StatefulPartitionedCall/sequential/conv2d_1/BiasAdd"],"attr":{"T":{"type":"DT_FLOAT"},"strides":{"list":{"i":["1","2","2","1"]}},"padding":{"s":"VkFMSUQ="},"data_format":{"s":"TkhXQw=="},"ksize":{"list":{"i":["1","2","2","1"]}},"explicit_paddings":{"list":{}}}},
|
||||
{"name":"StatefulPartitionedCall/sequential/activation_1/Relu","op":"Relu","input":["StatefulPartitionedCall/sequential/max_pooling2d_1/MaxPool"],"attr":{"T":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential/conv2d_2/BiasAdd","op":"_FusedConv2D","input":["StatefulPartitionedCall/sequential/activation_1/Relu","unknown_3","unknown_4"],"device":"/device:CPU:0","attr":{"strides":{"list":{"i":["1","1","1","1"]}},"data_format":{"s":"TkhXQw=="},"use_cudnn_on_gpu":{"b":true},"T":{"type":"DT_FLOAT"},"fused_ops":{"list":{"s":["Qmlhc0FkZA=="]}},"num_args":{"i":"1"},"explicit_paddings":{"list":{}},"epsilon":{"f":0},"padding":{"s":"VkFMSUQ="},"dilations":{"list":{"i":["1","1","1","1"]}}}},
|
||||
{"name":"StatefulPartitionedCall/sequential/max_pooling2d_2/MaxPool","op":"MaxPool","input":["StatefulPartitionedCall/sequential/conv2d_2/BiasAdd"],"attr":{"data_format":{"s":"TkhXQw=="},"explicit_paddings":{"list":{}},"padding":{"s":"VkFMSUQ="},"strides":{"list":{"i":["1","2","2","1"]}},"ksize":{"list":{"i":["1","2","2","1"]}},"T":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential/activation_2/Relu","op":"Relu","input":["StatefulPartitionedCall/sequential/max_pooling2d_2/MaxPool"],"attr":{"T":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential/flatten/Reshape","op":"Reshape","input":["StatefulPartitionedCall/sequential/activation_2/Relu","StatefulPartitionedCall/sequential/flatten/Const"],"attr":{"T":{"type":"DT_FLOAT"},"Tshape":{"type":"DT_INT32"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential/activation_3/Relu","op":"_FusedMatMul","input":["StatefulPartitionedCall/sequential/flatten/Reshape","unknown_5","unknown_6"],"device":"/device:CPU:0","attr":{"num_args":{"i":"1"},"transpose_b":{"b":false},"fused_ops":{"list":{"s":["Qmlhc0FkZA==","UmVsdQ=="]}},"epsilon":{"f":0},"T":{"type":"DT_FLOAT"},"transpose_a":{"b":false}}},
|
||||
{"name":"StatefulPartitionedCall/sequential/dense_1/BiasAdd","op":"_FusedMatMul","input":["StatefulPartitionedCall/sequential/activation_3/Relu","unknown_7","unknown_8"],"device":"/device:CPU:0","attr":{"transpose_b":{"b":false},"fused_ops":{"list":{"s":["Qmlhc0FkZA=="]}},"num_args":{"i":"1"},"T":{"type":"DT_FLOAT"},"epsilon":{"f":0},"transpose_a":{"b":false}}},
|
||||
{"name":"StatefulPartitionedCall/sequential/activation_4/Sigmoid","op":"Sigmoid","input":["StatefulPartitionedCall/sequential/dense_1/BiasAdd"],"attr":{"T":{"type":"DT_FLOAT"}}},
|
||||
{"name":"Identity","op":"Identity","input":["StatefulPartitionedCall/sequential/activation_4/Sigmoid"],"attr":{"T":{"type":"DT_FLOAT"}}}
|
||||
],
|
||||
"library": {},
|
||||
"versions":
|
||||
{
|
||||
"producer": 716
|
||||
}
|
||||
},
|
||||
"weightsManifest":
|
||||
[
|
||||
{
|
||||
"paths": ["antispoof.bin"],
|
||||
"weights": [{"name":"unknown","shape":[3,3,3,64],"dtype":"float32","quantization":{"dtype":"float16","original_dtype":"float32"}},{"name":"unknown_0","shape":[64],"dtype":"float32","quantization":{"dtype":"float16","original_dtype":"float32"}},{"name":"unknown_1","shape":[3,3,64,32],"dtype":"float32","quantization":{"dtype":"float16","original_dtype":"float32"}},{"name":"unknown_2","shape":[32],"dtype":"float32","quantization":{"dtype":"float16","original_dtype":"float32"}},{"name":"unknown_3","shape":[3,3,32,16],"dtype":"float32","quantization":{"dtype":"float16","original_dtype":"float32"}},{"name":"unknown_4","shape":[16],"dtype":"float32","quantization":{"dtype":"float16","original_dtype":"float32"}},{"name":"StatefulPartitionedCall/sequential/flatten/Const","shape":[2],"dtype":"int32"},{"name":"unknown_5","shape":[3136,128],"dtype":"float32","quantization":{"dtype":"float16","original_dtype":"float32"}},{"name":"unknown_6","shape":[128],"dtype":"float32","quantization":{"dtype":"float16","original_dtype":"float32"}},{"name":"unknown_7","shape":[128,1],"dtype":"float32","quantization":{"dtype":"float16","original_dtype":"float32"}},{"name":"unknown_8","shape":[1],"dtype":"float32","quantization":{"dtype":"float16","original_dtype":"float32"}}]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
{
|
||||
"format": "graph-model",
|
||||
"generatedBy": "https://github.com/leokwu/livenessnet",
|
||||
"convertedBy": "https://github.com/vladmandic",
|
||||
"signature":
|
||||
{
|
||||
"inputs":
|
||||
{
|
||||
"conv2d_1_input": {"name":"conv2d_1_input:0","dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"-1"},{"size":"32"},{"size":"32"},{"size":"3"}]}}
|
||||
},
|
||||
"outputs":
|
||||
{
|
||||
"activation_6": {"name":"Identity:0","dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"-1"},{"size":"2"}]}}
|
||||
}
|
||||
},
|
||||
"modelTopology":
|
||||
{
|
||||
"node":
|
||||
[
|
||||
{"name":"StatefulPartitionedCall/sequential_1/conv2d_1/Conv2D/ReadVariableOp","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"3"},{"size":"3"},{"size":"3"},{"size":"16"}]}}},"dtype":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/conv2d_1/BiasAdd/ReadVariableOp","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"16"}]}}},"dtype":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/conv2d_2/Conv2D/ReadVariableOp","op":"Const","attr":{"dtype":{"type":"DT_FLOAT"},"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"3"},{"size":"3"},{"size":"16"},{"size":"16"}]}}}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/conv2d_2/BiasAdd/ReadVariableOp","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"16"}]}}},"dtype":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/conv2d_3/Conv2D/ReadVariableOp","op":"Const","attr":{"dtype":{"type":"DT_FLOAT"},"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"3"},{"size":"3"},{"size":"16"},{"size":"32"}]}}}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/conv2d_3/BiasAdd/ReadVariableOp","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"32"}]}}},"dtype":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/conv2d_4/Conv2D/ReadVariableOp","op":"Const","attr":{"dtype":{"type":"DT_FLOAT"},"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"3"},{"size":"3"},{"size":"32"},{"size":"32"}]}}}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/conv2d_4/BiasAdd/ReadVariableOp","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"32"}]}}},"dtype":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/flatten_1/Const","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_INT32","tensorShape":{"dim":[{"size":"2"}]}}},"dtype":{"type":"DT_INT32"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/dense_1/MatMul/ReadVariableOp","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"2048"},{"size":"64"}]}}},"dtype":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/dense_1/BiasAdd/ReadVariableOp","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"64"}]}}},"dtype":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_5/batchnorm/mul","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"64"}]}}},"dtype":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_5/batchnorm/sub","op":"Const","attr":{"dtype":{"type":"DT_FLOAT"},"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"64"}]}}}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/dense_2/MatMul/ReadVariableOp","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"64"},{"size":"2"}]}}},"dtype":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/dense_2/BiasAdd/ReadVariableOp","op":"Const","attr":{"dtype":{"type":"DT_FLOAT"},"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"2"}]}}}}},
|
||||
{"name":"conv2d_1_input","op":"Placeholder","attr":{"shape":{"shape":{"dim":[{"size":"-1"},{"size":"32"},{"size":"32"},{"size":"3"}]}},"dtype":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_2/FusedBatchNormV3/Scaled","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"16"}]}}},"dtype":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_2/FusedBatchNormV3/Offset","op":"Const","attr":{"dtype":{"type":"DT_FLOAT"},"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"16"}]}}}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_1/FusedBatchNormV3/Scaled","op":"Const","attr":{"dtype":{"type":"DT_FLOAT"},"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"16"}]}}}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_1/FusedBatchNormV3/Offset","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"16"}]}}},"dtype":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_4/FusedBatchNormV3/Scaled","op":"Const","attr":{"dtype":{"type":"DT_FLOAT"},"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"32"}]}}}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_4/FusedBatchNormV3/Offset","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"32"}]}}},"dtype":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_3/FusedBatchNormV3/Scaled","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"32"}]}}},"dtype":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_3/FusedBatchNormV3/Offset","op":"Const","attr":{"dtype":{"type":"DT_FLOAT"},"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"32"}]}}}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/activation_1/Relu","op":"_FusedConv2D","input":["conv2d_1_input","StatefulPartitionedCall/sequential_1/conv2d_1/Conv2D/ReadVariableOp","StatefulPartitionedCall/sequential_1/conv2d_1/BiasAdd/ReadVariableOp"],"device":"/device:CPU:0","attr":{"dilations":{"list":{"i":["1","1","1","1"]}},"T":{"type":"DT_FLOAT"},"explicit_paddings":{"list":{}},"use_cudnn_on_gpu":{"b":true},"data_format":{"s":"TkhXQw=="},"num_args":{"i":"1"},"fused_ops":{"list":{"s":["Qmlhc0FkZA==","UmVsdQ=="]}},"strides":{"list":{"i":["1","1","1","1"]}},"epsilon":{"f":0},"padding":{"s":"U0FNRQ=="}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_1/FusedBatchNormV3/Mul","op":"Mul","input":["StatefulPartitionedCall/sequential_1/activation_1/Relu","StatefulPartitionedCall/sequential_1/batch_normalization_1/FusedBatchNormV3/Scaled"],"attr":{"T":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_1/FusedBatchNormV3","op":"Add","input":["StatefulPartitionedCall/sequential_1/batch_normalization_1/FusedBatchNormV3/Mul","StatefulPartitionedCall/sequential_1/batch_normalization_1/FusedBatchNormV3/Offset"],"attr":{"T":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/activation_2/Relu","op":"_FusedConv2D","input":["StatefulPartitionedCall/sequential_1/batch_normalization_1/FusedBatchNormV3","StatefulPartitionedCall/sequential_1/conv2d_2/Conv2D/ReadVariableOp","StatefulPartitionedCall/sequential_1/conv2d_2/BiasAdd/ReadVariableOp"],"device":"/device:CPU:0","attr":{"epsilon":{"f":0},"explicit_paddings":{"list":{}},"T":{"type":"DT_FLOAT"},"dilations":{"list":{"i":["1","1","1","1"]}},"strides":{"list":{"i":["1","1","1","1"]}},"fused_ops":{"list":{"s":["Qmlhc0FkZA==","UmVsdQ=="]}},"data_format":{"s":"TkhXQw=="},"num_args":{"i":"1"},"padding":{"s":"U0FNRQ=="},"use_cudnn_on_gpu":{"b":true}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_2/FusedBatchNormV3/Mul","op":"Mul","input":["StatefulPartitionedCall/sequential_1/activation_2/Relu","StatefulPartitionedCall/sequential_1/batch_normalization_2/FusedBatchNormV3/Scaled"],"attr":{"T":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_2/FusedBatchNormV3","op":"Add","input":["StatefulPartitionedCall/sequential_1/batch_normalization_2/FusedBatchNormV3/Mul","StatefulPartitionedCall/sequential_1/batch_normalization_2/FusedBatchNormV3/Offset"],"attr":{"T":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/max_pooling2d_1/MaxPool","op":"MaxPool","input":["StatefulPartitionedCall/sequential_1/batch_normalization_2/FusedBatchNormV3"],"attr":{"T":{"type":"DT_FLOAT"},"data_format":{"s":"TkhXQw=="},"padding":{"s":"VkFMSUQ="},"ksize":{"list":{"i":["1","2","2","1"]}},"explicit_paddings":{"list":{}},"strides":{"list":{"i":["1","2","2","1"]}}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/activation_3/Relu","op":"_FusedConv2D","input":["StatefulPartitionedCall/sequential_1/max_pooling2d_1/MaxPool","StatefulPartitionedCall/sequential_1/conv2d_3/Conv2D/ReadVariableOp","StatefulPartitionedCall/sequential_1/conv2d_3/BiasAdd/ReadVariableOp"],"device":"/device:CPU:0","attr":{"use_cudnn_on_gpu":{"b":true},"padding":{"s":"U0FNRQ=="},"T":{"type":"DT_FLOAT"},"explicit_paddings":{"list":{}},"strides":{"list":{"i":["1","1","1","1"]}},"data_format":{"s":"TkhXQw=="},"num_args":{"i":"1"},"fused_ops":{"list":{"s":["Qmlhc0FkZA==","UmVsdQ=="]}},"dilations":{"list":{"i":["1","1","1","1"]}},"epsilon":{"f":0}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_3/FusedBatchNormV3/Mul","op":"Mul","input":["StatefulPartitionedCall/sequential_1/activation_3/Relu","StatefulPartitionedCall/sequential_1/batch_normalization_3/FusedBatchNormV3/Scaled"],"attr":{"T":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_3/FusedBatchNormV3","op":"Add","input":["StatefulPartitionedCall/sequential_1/batch_normalization_3/FusedBatchNormV3/Mul","StatefulPartitionedCall/sequential_1/batch_normalization_3/FusedBatchNormV3/Offset"],"attr":{"T":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/activation_4/Relu","op":"_FusedConv2D","input":["StatefulPartitionedCall/sequential_1/batch_normalization_3/FusedBatchNormV3","StatefulPartitionedCall/sequential_1/conv2d_4/Conv2D/ReadVariableOp","StatefulPartitionedCall/sequential_1/conv2d_4/BiasAdd/ReadVariableOp"],"device":"/device:CPU:0","attr":{"num_args":{"i":"1"},"padding":{"s":"U0FNRQ=="},"use_cudnn_on_gpu":{"b":true},"data_format":{"s":"TkhXQw=="},"T":{"type":"DT_FLOAT"},"strides":{"list":{"i":["1","1","1","1"]}},"epsilon":{"f":0},"explicit_paddings":{"list":{}},"fused_ops":{"list":{"s":["Qmlhc0FkZA==","UmVsdQ=="]}},"dilations":{"list":{"i":["1","1","1","1"]}}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_4/FusedBatchNormV3/Mul","op":"Mul","input":["StatefulPartitionedCall/sequential_1/activation_4/Relu","StatefulPartitionedCall/sequential_1/batch_normalization_4/FusedBatchNormV3/Scaled"],"attr":{"T":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_4/FusedBatchNormV3","op":"Add","input":["StatefulPartitionedCall/sequential_1/batch_normalization_4/FusedBatchNormV3/Mul","StatefulPartitionedCall/sequential_1/batch_normalization_4/FusedBatchNormV3/Offset"],"attr":{"T":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/max_pooling2d_2/MaxPool","op":"MaxPool","input":["StatefulPartitionedCall/sequential_1/batch_normalization_4/FusedBatchNormV3"],"attr":{"explicit_paddings":{"list":{}},"T":{"type":"DT_FLOAT"},"data_format":{"s":"TkhXQw=="},"padding":{"s":"VkFMSUQ="},"ksize":{"list":{"i":["1","2","2","1"]}},"strides":{"list":{"i":["1","2","2","1"]}}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/flatten_1/Reshape","op":"Reshape","input":["StatefulPartitionedCall/sequential_1/max_pooling2d_2/MaxPool","StatefulPartitionedCall/sequential_1/flatten_1/Const"],"attr":{"T":{"type":"DT_FLOAT"},"Tshape":{"type":"DT_INT32"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/activation_5/Relu","op":"_FusedMatMul","input":["StatefulPartitionedCall/sequential_1/flatten_1/Reshape","StatefulPartitionedCall/sequential_1/dense_1/MatMul/ReadVariableOp","StatefulPartitionedCall/sequential_1/dense_1/BiasAdd/ReadVariableOp"],"device":"/device:CPU:0","attr":{"fused_ops":{"list":{"s":["Qmlhc0FkZA==","UmVsdQ=="]}},"T":{"type":"DT_FLOAT"},"epsilon":{"f":0},"transpose_b":{"b":false},"num_args":{"i":"1"},"transpose_a":{"b":false}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_5/batchnorm/mul_1","op":"Mul","input":["StatefulPartitionedCall/sequential_1/activation_5/Relu","StatefulPartitionedCall/sequential_1/batch_normalization_5/batchnorm/mul"],"attr":{"T":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_5/batchnorm/add_1","op":"AddV2","input":["StatefulPartitionedCall/sequential_1/batch_normalization_5/batchnorm/mul_1","StatefulPartitionedCall/sequential_1/batch_normalization_5/batchnorm/sub"],"attr":{"T":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/dense_2/BiasAdd","op":"_FusedMatMul","input":["StatefulPartitionedCall/sequential_1/batch_normalization_5/batchnorm/add_1","StatefulPartitionedCall/sequential_1/dense_2/MatMul/ReadVariableOp","StatefulPartitionedCall/sequential_1/dense_2/BiasAdd/ReadVariableOp"],"device":"/device:CPU:0","attr":{"fused_ops":{"list":{"s":["Qmlhc0FkZA=="]}},"transpose_a":{"b":false},"epsilon":{"f":0},"transpose_b":{"b":false},"num_args":{"i":"1"},"T":{"type":"DT_FLOAT"}}},
|
||||
{"name":"StatefulPartitionedCall/sequential_1/activation_6/Softmax","op":"Softmax","input":["StatefulPartitionedCall/sequential_1/dense_2/BiasAdd"],"attr":{"T":{"type":"DT_FLOAT"}}},
|
||||
{"name":"Identity","op":"Identity","input":["StatefulPartitionedCall/sequential_1/activation_6/Softmax"],"attr":{"T":{"type":"DT_FLOAT"}}}
|
||||
],
|
||||
"library": {},
|
||||
"versions":
|
||||
{
|
||||
"producer": 808
|
||||
}
|
||||
},
|
||||
"weightsManifest":
|
||||
[
|
||||
{
|
||||
"paths": ["liveness.bin"],
|
||||
"weights": [{"name":"StatefulPartitionedCall/sequential_1/conv2d_1/Conv2D/ReadVariableOp","shape":[3,3,3,16],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/conv2d_1/BiasAdd/ReadVariableOp","shape":[16],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/conv2d_2/Conv2D/ReadVariableOp","shape":[3,3,16,16],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/conv2d_2/BiasAdd/ReadVariableOp","shape":[16],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/conv2d_3/Conv2D/ReadVariableOp","shape":[3,3,16,32],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/conv2d_3/BiasAdd/ReadVariableOp","shape":[32],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/conv2d_4/Conv2D/ReadVariableOp","shape":[3,3,32,32],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/conv2d_4/BiasAdd/ReadVariableOp","shape":[32],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/flatten_1/Const","shape":[2],"dtype":"int32"},{"name":"StatefulPartitionedCall/sequential_1/dense_1/MatMul/ReadVariableOp","shape":[2048,64],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/dense_1/BiasAdd/ReadVariableOp","shape":[64],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_5/batchnorm/mul","shape":[64],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_5/batchnorm/sub","shape":[64],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/dense_2/MatMul/ReadVariableOp","shape":[64,2],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/dense_2/BiasAdd/ReadVariableOp","shape":[2],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_2/FusedBatchNormV3/Scaled","shape":[16],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_2/FusedBatchNormV3/Offset","shape":[16],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_1/FusedBatchNormV3/Scaled","shape":[16],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_1/FusedBatchNormV3/Offset","shape":[16],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_4/FusedBatchNormV3/Scaled","shape":[32],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_4/FusedBatchNormV3/Offset","shape":[32],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_3/FusedBatchNormV3/Scaled","shape":[32],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_3/FusedBatchNormV3/Offset","shape":[32],"dtype":"float32"}]
|
||||
}
|
||||
]
|
||||
}
|