Compare commits
520 Commits
Author | SHA1 | Date |
---|---|---|
![]() |
a6bd39f166 | |
![]() |
6f713e6d05 | |
![]() |
e1eaff4b31 | |
![]() |
d362e040e9 | |
![]() |
2bb6d94b93 | |
![]() |
a4e7e065ee | |
![]() |
b2c89d3bb5 | |
![]() |
339b34bcf3 | |
![]() |
77194de344 | |
![]() |
2666181b3b | |
![]() |
62f642a245 | |
![]() |
c2bf0c43db | |
![]() |
2e5ca3b954 | |
![]() |
515274800b | |
![]() |
edad35484f | |
![]() |
e7ca53ab6b | |
![]() |
f098b5659c | |
![]() |
ed5f093ff8 | |
![]() |
90a5489e34 | |
![]() |
552da54cb1 | |
![]() |
3699b147b8 | |
![]() |
91f3d7644c | |
![]() |
a2dc24ecce | |
![]() |
b89123b9b2 | |
![]() |
632b405ec5 | |
![]() |
d39ce5e8a4 | |
![]() |
928ee524f3 | |
![]() |
cd0d39a8e3 | |
![]() |
be328a3707 | |
![]() |
c9120d5c67 | |
![]() |
ad7463c7d3 | |
![]() |
4da18dda84 | |
![]() |
668fdec9a6 | |
![]() |
9cc61ae65f | |
![]() |
94b7253642 | |
![]() |
3c324033ab | |
![]() |
b1cb0684e4 | |
![]() |
e4b12ddff0 | |
![]() |
0c8567bb1f | |
![]() |
e3e2d3267c | |
![]() |
55846ab4e1 | |
![]() |
94207aad4f | |
![]() |
da3ad3033f | |
![]() |
1d00e154b0 | |
![]() |
22755355f7 | |
![]() |
79775267bc | |
![]() |
497ca46006 | |
![]() |
04766d9693 | |
![]() |
32556d5954 | |
![]() |
5b4a693429 | |
![]() |
369026c2b1 | |
![]() |
bcb26f3a2e | |
![]() |
202c808f5b | |
![]() |
3db85dd8cc | |
![]() |
7f688743fd | |
![]() |
a8c613c903 | |
![]() |
cc1b1ae5e6 | |
![]() |
58d4390735 | |
![]() |
8ec586e2b8 | |
![]() |
9a80bbc39d | |
![]() |
a83f07a972 | |
![]() |
11bc5c63b4 | |
![]() |
60b0669206 | |
![]() |
6c4c3dacca | |
![]() |
24a03b04ab | |
![]() |
65cda56e3e | |
![]() |
bfe63d6955 | |
![]() |
fd9f8b4e95 | |
![]() |
c425cfe6e0 | |
![]() |
7be582acae | |
![]() |
7b8b843771 | |
![]() |
8a62a3a87d | |
![]() |
4d0b9fff98 | |
![]() |
0ea4edf39c | |
![]() |
4ce7fa22a0 | |
![]() |
789f6770e7 | |
![]() |
3f54aa8c3e | |
![]() |
e51a58211d | |
![]() |
5f03ae3053 | |
![]() |
f543c82ec9 | |
![]() |
c625892e18 | |
![]() |
0abc7ce124 | |
![]() |
b341aadd0f | |
![]() |
8480468869 | |
![]() |
c08395433e | |
![]() |
ccfd5ef49e | |
![]() |
bd110857f8 | |
![]() |
5c375117d7 | |
![]() |
ad4b23aa34 | |
![]() |
50d117c22e | |
![]() |
88f3b6eeda | |
![]() |
bc009e165e | |
![]() |
b201435660 | |
![]() |
22bcd417fd | |
![]() |
c39e13e60a | |
![]() |
71729677be | |
![]() |
f23aab9866 | |
![]() |
53f5edbe26 | |
![]() |
2b0b7bbb7b | |
![]() |
694f08708a | |
![]() |
2a60b0787f | |
![]() |
35c2d2452b | |
![]() |
465176e2dd | |
![]() |
cc18f16b2e | |
![]() |
2596b82580 | |
![]() |
4ce4a92b6f | |
![]() |
6b21638372 | |
![]() |
8ab1256e11 | |
![]() |
5275475c26 | |
![]() |
eae60a2693 | |
![]() |
08cf2be7cf | |
![]() |
0a8f194e02 | |
![]() |
ea4a99a1a1 | |
![]() |
95f278b72e | |
![]() |
e239386fd8 | |
![]() |
365ce35f2d | |
![]() |
90b38664ec | |
![]() |
e8156f4adb | |
![]() |
b155cae9bb | |
![]() |
55aa98817c | |
![]() |
e7871bc79c | |
![]() |
daa89efd39 | |
![]() |
d347babe11 | |
![]() |
346e722839 | |
![]() |
0cb473dc0d | |
![]() |
a3d890119c | |
![]() |
7835126870 | |
![]() |
0c0fff6a99 | |
![]() |
13a78e477b | |
![]() |
1125a79a8c | |
![]() |
4b3edc5f78 | |
![]() |
0086a9169d | |
![]() |
738ee9a7e5 | |
![]() |
f350f01fb0 | |
![]() |
f7be4a4c1f | |
![]() |
9da67575fe | |
![]() |
9452cf097c | |
![]() |
3bc8a5a1b2 | |
![]() |
51246d43f8 | |
![]() |
7ce0aaf8b4 | |
![]() |
3df1c8c8b8 | |
![]() |
c83b8d9485 | |
![]() |
c20a8102c4 | |
![]() |
d1ae5dc761 | |
![]() |
b586c0d998 | |
![]() |
bfcc6b63e8 | |
![]() |
d96d275061 | |
![]() |
1c2032fd89 | |
![]() |
4471881930 | |
![]() |
086f30c3f5 | |
![]() |
1e666d7a27 | |
![]() |
ea03e67601 | |
![]() |
ca1b609eb8 | |
![]() |
beafe9bfef | |
![]() |
60e13020ea | |
![]() |
9a6ad885f4 | |
![]() |
5a3bbd0d4a | |
![]() |
ac1522386b | |
![]() |
3a3e2eb067 | |
![]() |
1059c9c3de | |
![]() |
77f7d6cce2 | |
![]() |
a8aec4e610 | |
![]() |
4fd18fa5fa | |
![]() |
4c8a12512a | |
![]() |
d2ce0331e8 | |
![]() |
3788c82ffc | |
![]() |
daffddd057 | |
![]() |
bbe68c3c55 | |
![]() |
48a0c36d9e | |
![]() |
ff3d9df22e | |
![]() |
56f6c54ee5 | |
![]() |
7aff9ad28d | |
![]() |
31de3e2d86 | |
![]() |
1afe7f7777 | |
![]() |
abac67389b | |
![]() |
03b2a9f4ea | |
![]() |
48bb49dd8f | |
![]() |
ab1f44ff30 | |
![]() |
5a7553aa14 | |
![]() |
b7d1dca089 | |
![]() |
9d798cb073 | |
![]() |
268f2602d1 | |
![]() |
5cf64c1803 | |
![]() |
690a94f5ea | |
![]() |
ec41b72710 | |
![]() |
40429cedda | |
![]() |
39c3f4ea1d | |
![]() |
c53be42d11 | |
![]() |
1162f6e06b | |
![]() |
3132f298c3 | |
![]() |
96e5399991 | |
![]() |
5da11225b6 | |
![]() |
6e60ec8d22 | |
![]() |
1a53bb14de | |
![]() |
15fb4981c9 | |
![]() |
a3359460e2 | |
![]() |
fb81d557e4 | |
![]() |
8cc5c938f4 | |
![]() |
0420a5d144 | |
![]() |
6d580a0b4f | |
![]() |
4f641380ce | |
![]() |
39fa2396a1 | |
![]() |
4b526ad53c | |
![]() |
76830567f7 | |
![]() |
61a7de3c0f | |
![]() |
9d026ea950 | |
![]() |
8cc810bb69 | |
![]() |
6b69f38d55 | |
![]() |
a019176a15 | |
![]() |
d6acf90013 | |
![]() |
485c10ab0a | |
![]() |
34c52df4d6 | |
![]() |
f8a51deae4 | |
![]() |
cbc1fddbf7 | |
![]() |
13a71485cd | |
![]() |
a0654dc231 | |
![]() |
419dd219a6 | |
![]() |
7343d2e8ec | |
![]() |
391812d251 | |
![]() |
e0d66fffd9 | |
![]() |
588c9c08f0 | |
![]() |
2b1f5a2b3f | |
![]() |
d937953f4d | |
![]() |
3200460055 | |
![]() |
018e61f2bb | |
![]() |
6c75c26a51 | |
![]() |
065349c085 | |
![]() |
52769ef2e9 | |
![]() |
91440273f2 | |
![]() |
d30a571a9c | |
![]() |
8634ec7fd9 | |
![]() |
cff58d6384 | |
![]() |
e45e7ebd55 | |
![]() |
eeadb396fa | |
![]() |
8ba083651f | |
![]() |
a109f3b051 | |
![]() |
bcb6e34a1e | |
![]() |
2c2688023e | |
![]() |
792930fc91 | |
![]() |
5bf74a53ad | |
![]() |
74cf335523 | |
![]() |
ff48422c13 | |
![]() |
8892886734 | |
![]() |
4357fbc0ba | |
![]() |
e2866b2bd6 | |
![]() |
3b449dbfe3 | |
![]() |
38b581373e | |
![]() |
f0f8a8ddff | |
![]() |
3d4c12fec3 | |
![]() |
1f03270e76 | |
![]() |
90ce714446 | |
![]() |
c38773bf26 | |
![]() |
267c87536a | |
![]() |
3d81a19a66 | |
![]() |
b517dc7f1b | |
![]() |
1534b58235 | |
![]() |
129365e52f | |
![]() |
b2b07e4b36 | |
![]() |
fa0a93e9b2 | |
![]() |
7e1b2840a2 | |
![]() |
dc5e46adde | |
![]() |
01316a4c2d | |
![]() |
7f87d2633a | |
![]() |
7a05cf3743 | |
![]() |
a741ad95cb | |
![]() |
3b9fc2e8a7 | |
![]() |
eaeb02592e | |
![]() |
ff156bc413 | |
![]() |
8bd987a7b3 | |
![]() |
6291d779ef | |
![]() |
131cc2609a | |
![]() |
209506611a | |
![]() |
2463b16e85 | |
![]() |
d046513c92 | |
![]() |
84df7f885f | |
![]() |
515fbf76e7 | |
![]() |
d47557cfdd | |
![]() |
203fcfa904 | |
![]() |
ff894a1ee7 | |
![]() |
ab453f69df | |
![]() |
df53d373e1 | |
![]() |
2930255757 | |
![]() |
c430d0d99d | |
![]() |
67afa5952f | |
![]() |
ce37a0f716 | |
![]() |
90ec92bbe2 | |
![]() |
1b7ee44659 | |
![]() |
a7b06eafb2 | |
![]() |
fd7f7c2195 | |
![]() |
d4322fc0f9 | |
![]() |
b2d5b8322d | |
![]() |
02afd6c54f | |
![]() |
12644a3e06 | |
![]() |
8a50618e9a | |
![]() |
ab3cda4f51 | |
![]() |
a76ebdaf00 | |
![]() |
429df55ac5 | |
![]() |
ab69d5414a | |
![]() |
8b2225d737 | |
![]() |
03fd6378c4 | |
![]() |
8579766d5f | |
![]() |
23b937e5e2 | |
![]() |
44a5c30e0d | |
![]() |
c1af3888f9 | |
![]() |
eed3d67928 | |
![]() |
61c8ab9b2c | |
![]() |
49cbbb387e | |
![]() |
b6f7d683e1 | |
![]() |
0643fb50df | |
![]() |
ae0e7533eb | |
![]() |
3f8db964ba | |
![]() |
8864b5f7c1 | |
![]() |
ab93b7ffb2 | |
![]() |
a250db9042 | |
![]() |
b5f307f49b | |
![]() |
6f0c0e77b8 | |
![]() |
16f993c266 | |
![]() |
e3f477a50d | |
![]() |
79822d3e01 | |
![]() |
a8f37111ad | |
![]() |
a2b52b3f52 | |
![]() |
43d3c6ce80 | |
![]() |
7d636c8522 | |
![]() |
182136fbfb | |
![]() |
70181b53e9 | |
![]() |
cb9af0a48c | |
![]() |
0d681e4908 | |
![]() |
e0c43098dd | |
![]() |
2695b215df | |
![]() |
ddf5d5f0f2 | |
![]() |
d7cdda2e1b | |
![]() |
f59250bde6 | |
![]() |
6ced256a42 | |
![]() |
ab53f1cde7 | |
![]() |
bee6f3b651 | |
![]() |
06ab47f941 | |
![]() |
a261b7bd99 | |
![]() |
ca56407d49 | |
![]() |
a50c3e2103 | |
![]() |
b0b0702208 | |
![]() |
ee0b4af220 | |
![]() |
1e8a932a2b | |
![]() |
4febbc1d3e | |
![]() |
35583d3a04 | |
![]() |
9ace39fdab | |
![]() |
155c7c2a00 | |
![]() |
71b08aa8f8 | |
![]() |
c94e5f86c2 | |
![]() |
6569b62f70 | |
![]() |
b39f0d55bb | |
![]() |
36150c0730 | |
![]() |
9d4955cb8a | |
![]() |
6796a9a1ba | |
![]() |
edc719cc9e | |
![]() |
0374ecdcc3 | |
![]() |
fd0df97d94 | |
![]() |
b492ce5c40 | |
![]() |
c03b7b3f4c | |
![]() |
259036e8ca | |
![]() |
f423bfe9e3 | |
![]() |
6c8faf7de2 | |
![]() |
eef2b43852 | |
![]() |
7b284f63f6 | |
![]() |
4d2ca7dd2d | |
![]() |
cc864891bf | |
![]() |
f4d73e46b9 | |
![]() |
b36cd175b0 | |
![]() |
fbd04f8555 | |
![]() |
f42f64427a | |
![]() |
589511f1f3 | |
![]() |
2c05cfea24 | |
![]() |
14164dc603 | |
![]() |
43074a660c | |
![]() |
e6f004dbf9 | |
![]() |
76cdfa88a6 | |
![]() |
90a21de40e | |
![]() |
2be4c00d3a | |
![]() |
6f6577c3b0 | |
![]() |
eaaa71df07 | |
![]() |
de89111e70 | |
![]() |
9cd8a433fe | |
![]() |
21d4af9235 | |
![]() |
e5821563a1 | |
![]() |
49c547d698 | |
![]() |
6764b0f5be | |
![]() |
6b6a991cf2 | |
![]() |
5d871f1dd9 | |
![]() |
58fc62e646 | |
![]() |
f460eca131 | |
![]() |
a559d05378 | |
![]() |
cf7ba1ef30 | |
![]() |
cf64b3259e | |
![]() |
4d3a112985 | |
![]() |
0b0e63f183 | |
![]() |
f40eb5d380 | |
![]() |
d0d0aba8fa | |
![]() |
00d7efa724 | |
![]() |
4fd37dfd56 | |
![]() |
af5ab60495 | |
![]() |
6e06695780 | |
![]() |
af28fff323 | |
![]() |
f877c2515b | |
![]() |
0fd7683a56 | |
![]() |
cafb7732ab | |
![]() |
7dad79933e | |
![]() |
c6ec8dec76 | |
![]() |
958f898d4b | |
![]() |
5c529d5889 | |
![]() |
cd8861b6bb | |
![]() |
b1b9cc2954 | |
![]() |
0868aeb350 | |
![]() |
173fa35b7c | |
![]() |
db1502829a | |
![]() |
c3b95f452e | |
![]() |
a3dea5a01f | |
![]() |
1dccff181b | |
![]() |
45d4095bff | |
![]() |
80299c8600 | |
![]() |
e3470dc2f1 | |
![]() |
190c3a60b4 | |
![]() |
80526ee02a | |
![]() |
3e26e91340 | |
![]() |
46f2426621 | |
![]() |
1de80c9e36 | |
![]() |
b506f67e91 | |
![]() |
b408c47847 | |
![]() |
20f9ab4519 | |
![]() |
70510e9a2f | |
![]() |
07412090d8 | |
![]() |
65c4d2581f | |
![]() |
a558dd8870 | |
![]() |
66615cac76 | |
![]() |
1491561ad2 | |
![]() |
1d7227b02a | |
![]() |
f0ed4d2cd5 | |
![]() |
e205509a39 | |
![]() |
c3ac335771 | |
![]() |
53f960f821 | |
![]() |
7944953ab7 | |
![]() |
6d91ea3f53 | |
![]() |
8d3aacea46 | |
![]() |
b08afe09c0 | |
![]() |
f2abd3c069 | |
![]() |
496d09aab7 | |
![]() |
69330e6335 | |
![]() |
c7cd2f8a59 | |
![]() |
af3699c769 | |
![]() |
5b81690dc9 | |
![]() |
7a76f9c065 | |
![]() |
af31b5b9ef | |
![]() |
6d728b4e5c | |
![]() |
22c849593e | |
![]() |
60b7143da4 | |
![]() |
3e73fd8742 | |
![]() |
ac7d1c3e12 | |
![]() |
4be7fc9294 | |
![]() |
5d42b85084 | |
![]() |
13d82a176a | |
![]() |
f6e91fb47d | |
![]() |
f85356843d | |
![]() |
47e2f78324 | |
![]() |
b4a9934f92 | |
![]() |
835fd8d184 | |
![]() |
e4cdd3ffca | |
![]() |
852f22d786 | |
![]() |
c100d03405 | |
![]() |
7b19e5d246 | |
![]() |
2cc6f380ba | |
![]() |
9d7b7dcdce | |
![]() |
32bb8eba58 | |
![]() |
375c6f65fc | |
![]() |
92f225d3df | |
![]() |
47c5c6c822 | |
![]() |
c4df466a9a | |
![]() |
616aad6add | |
![]() |
00f5bd6fde | |
![]() |
dc2b68532f | |
![]() |
23276d522c | |
![]() |
f218f96749 | |
![]() |
0b62a4f9a3 | |
![]() |
d29a47e5a2 | |
![]() |
25c45b20c6 | |
![]() |
52f1fccb28 | |
![]() |
168ad14fda | |
![]() |
168fd473c6 | |
![]() |
fa33b660af | |
![]() |
60f6f75d35 | |
![]() |
55d2848336 | |
![]() |
5f08806e8f | |
![]() |
e1ba7ef942 | |
![]() |
dafaca5b3d | |
![]() |
30df35ec7c | |
![]() |
0c4574a5a3 | |
![]() |
b9a8d27d9c | |
![]() |
ef22c94d62 | |
![]() |
96ef4a4805 | |
![]() |
53a8d12d7b | |
![]() |
9f0621ba99 | |
![]() |
59a1fb3855 | |
![]() |
92e611e735 | |
![]() |
3c1111a831 | |
![]() |
2903adbf37 | |
![]() |
7b4a90cfb5 | |
![]() |
02ba8016e2 | |
![]() |
33e8a92cd8 | |
![]() |
19b17acbf5 | |
![]() |
dbd18e0344 | |
![]() |
17ae986665 | |
![]() |
c70e9817ef | |
![]() |
76f624f78f | |
![]() |
2307767161 | |
![]() |
24ec73e037 | |
![]() |
f2ac34f4a9 | |
![]() |
01e7855578 | |
![]() |
15925dd75d | |
![]() |
f0420232c6 | |
![]() |
21d37c0f31 | |
![]() |
c71475af90 | |
![]() |
5566c145ea | |
![]() |
6b736ebb0d |
|
@ -1,8 +1,9 @@
|
|||
{
|
||||
"$schema": "https://developer.microsoft.com/json-schemas/api-extractor/v7/api-extractor.schema.json",
|
||||
"mainEntryPointFilePath": "types/lib/src/human.d.ts",
|
||||
"bundledPackages": ["@tensorflow/tfjs-core", "@tensorflow/tfjs-converter", "@tensorflow/tfjs-data", "@tensorflow/tfjs-layers"],
|
||||
"compiler": {
|
||||
"skipLibCheck": true
|
||||
"skipLibCheck": false
|
||||
},
|
||||
"newlineKind": "lf",
|
||||
"dtsRollup": {
|
||||
|
|
38
.build.json
|
@ -12,10 +12,10 @@
|
|||
"clean": ["clean"]
|
||||
},
|
||||
"clean": {
|
||||
"locations": ["dist/*", "types/*", "typedoc/*"]
|
||||
"locations": ["dist/*", "types/lib/*", "typedoc/*"]
|
||||
},
|
||||
"lint": {
|
||||
"locations": [ "**/*.json", "src/**/*.ts", "test/**/*.js", "demo/**/*.js", "**/*.md" ],
|
||||
"locations": [ "*.json", "src/**/*.ts", "test/**/*.js", "demo/**/*.js" ],
|
||||
"rules": { }
|
||||
},
|
||||
"changelog": {
|
||||
|
@ -24,8 +24,8 @@
|
|||
"serve": {
|
||||
"sslKey": "node_modules/@vladmandic/build/cert/https.key",
|
||||
"sslCrt": "node_modules/@vladmandic/build/cert/https.crt",
|
||||
"httpPort": 8000,
|
||||
"httpsPort": 8001,
|
||||
"httpPort": 10030,
|
||||
"httpsPort": 10031,
|
||||
"documentRoot": ".",
|
||||
"defaultFolder": "demo",
|
||||
"defaultFile": "index.html"
|
||||
|
@ -39,13 +39,6 @@
|
|||
"banner": { "js": "/*\n Human\n homepage: <https://github.com/vladmandic/human>\n author: <https://github.com/vladmandic>'\n*/\n" }
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"name": "tfjs/browser/version",
|
||||
"platform": "browser",
|
||||
"format": "esm",
|
||||
"input": "tfjs/tf-version.ts",
|
||||
"output": "dist/tfjs.version.js"
|
||||
},
|
||||
{
|
||||
"name": "tfjs/nodejs/cpu",
|
||||
"platform": "node",
|
||||
|
@ -84,7 +77,6 @@
|
|||
"format": "cjs",
|
||||
"input": "tfjs/tf-node-wasm.ts",
|
||||
"output": "dist/tfjs.esm.js",
|
||||
"minify": false,
|
||||
"external": ["@tensorflow"]
|
||||
},
|
||||
{
|
||||
|
@ -95,6 +87,13 @@
|
|||
"output": "dist/human.node-wasm.js",
|
||||
"external": ["@tensorflow"]
|
||||
},
|
||||
{
|
||||
"name": "tfjs/browser/version",
|
||||
"platform": "browser",
|
||||
"format": "esm",
|
||||
"input": "tfjs/tf-version.ts",
|
||||
"output": "dist/tfjs.version.js"
|
||||
},
|
||||
{
|
||||
"name": "tfjs/browser/esm/nobundle",
|
||||
"platform": "browser",
|
||||
|
@ -113,13 +112,13 @@
|
|||
"external": ["@tensorflow"]
|
||||
},
|
||||
{
|
||||
"name": "tfjs/browser/esm/bundle",
|
||||
"name": "tfjs/browser/esm/custom",
|
||||
"platform": "browser",
|
||||
"format": "esm",
|
||||
"input": "tfjs/tf-browser.ts",
|
||||
"input": "tfjs/tf-custom.ts",
|
||||
"output": "dist/tfjs.esm.js",
|
||||
"sourcemap": false,
|
||||
"minify": true
|
||||
"minify": false
|
||||
},
|
||||
{
|
||||
"name": "human/browser/iife/bundle",
|
||||
|
@ -160,15 +159,6 @@
|
|||
"output": "demo/faceid/index.js",
|
||||
"sourcemap": true,
|
||||
"external": ["*/human.esm.js"]
|
||||
},
|
||||
{
|
||||
"name": "demo/tracker",
|
||||
"platform": "browser",
|
||||
"format": "esm",
|
||||
"input": "demo/tracker/index.ts",
|
||||
"output": "demo/tracker/index.js",
|
||||
"sourcemap": true,
|
||||
"external": ["*/human.esm.js"]
|
||||
}
|
||||
]
|
||||
},
|
||||
|
|
|
@ -1,28 +1,29 @@
|
|||
{
|
||||
"globals": {
|
||||
},
|
||||
"rules": {
|
||||
"@typescript-eslint/no-require-imports":"off"
|
||||
},
|
||||
"globals": {},
|
||||
"overrides": [
|
||||
{
|
||||
"files": ["**/*.ts"],
|
||||
"parser": "@typescript-eslint/parser",
|
||||
"parserOptions": { "ecmaVersion": "latest", "project": ["./tsconfig.json"] },
|
||||
"plugins": ["@typescript-eslint"],
|
||||
"env": {
|
||||
"browser": true,
|
||||
"commonjs": false,
|
||||
"node": false,
|
||||
"es2021": true
|
||||
},
|
||||
"parser": "@typescript-eslint/parser",
|
||||
"parserOptions": {
|
||||
"ecmaVersion": "latest",
|
||||
"project": ["./tsconfig.json"]
|
||||
},
|
||||
"plugins": [
|
||||
"@typescript-eslint"
|
||||
],
|
||||
"extends": [
|
||||
"airbnb-base",
|
||||
"eslint:recommended",
|
||||
"plugin:@typescript-eslint/strict",
|
||||
"plugin:@typescript-eslint/eslint-recommended",
|
||||
"plugin:@typescript-eslint/recommended",
|
||||
"plugin:@typescript-eslint/recommended-requiring-type-checking",
|
||||
"plugin:@typescript-eslint/eslint-recommended",
|
||||
"plugin:@typescript-eslint/strict",
|
||||
"plugin:import/recommended",
|
||||
"plugin:promise/recommended"
|
||||
],
|
||||
|
@ -30,12 +31,9 @@
|
|||
"@typescript-eslint/ban-ts-comment":"off",
|
||||
"@typescript-eslint/dot-notation":"off",
|
||||
"@typescript-eslint/no-empty-interface":"off",
|
||||
"@typescript-eslint/no-empty-object-type":"off",
|
||||
"@typescript-eslint/no-inferrable-types":"off",
|
||||
"@typescript-eslint/no-misused-promises":"off",
|
||||
"@typescript-eslint/no-require-imports":"off",
|
||||
"@typescript-eslint/no-unnecessary-condition":"off",
|
||||
"@typescript-eslint/no-unnecessary-type-assertion":"off",
|
||||
"@typescript-eslint/no-unsafe-argument":"off",
|
||||
"@typescript-eslint/no-unsafe-assignment":"off",
|
||||
"@typescript-eslint/no-unsafe-call":"off",
|
||||
|
@ -74,15 +72,20 @@
|
|||
},
|
||||
{
|
||||
"files": ["**/*.d.ts"],
|
||||
"parser": "@typescript-eslint/parser",
|
||||
"parserOptions": { "ecmaVersion": "latest", "project": ["./tsconfig.json"] },
|
||||
"plugins": ["@typescript-eslint"],
|
||||
"env": {
|
||||
"browser": true,
|
||||
"commonjs": false,
|
||||
"node": false,
|
||||
"es2021": true
|
||||
},
|
||||
"parser": "@typescript-eslint/parser",
|
||||
"parserOptions": {
|
||||
"ecmaVersion": "latest",
|
||||
"project": ["./tsconfig.json"]
|
||||
},
|
||||
"plugins": [
|
||||
"@typescript-eslint"
|
||||
],
|
||||
"extends": [
|
||||
"airbnb-base",
|
||||
"eslint:recommended",
|
||||
|
@ -117,14 +120,18 @@
|
|||
},
|
||||
{
|
||||
"files": ["**/*.js"],
|
||||
"parserOptions": { "sourceType": "module", "ecmaVersion": "latest" },
|
||||
"plugins": [],
|
||||
"env": {
|
||||
"browser": true,
|
||||
"commonjs": true,
|
||||
"node": true,
|
||||
"es2021": true
|
||||
},
|
||||
"parserOptions": {
|
||||
"sourceType": "module",
|
||||
"ecmaVersion": "latest"
|
||||
},
|
||||
"plugins": [
|
||||
],
|
||||
"extends": [
|
||||
"airbnb-base",
|
||||
"eslint:recommended",
|
||||
|
@ -154,40 +161,42 @@
|
|||
},
|
||||
{
|
||||
"files": ["**/*.json"],
|
||||
"parserOptions": { "ecmaVersion": "latest" },
|
||||
"plugins": ["json"],
|
||||
"env": {
|
||||
"browser": false,
|
||||
"commonjs": false,
|
||||
"node": false,
|
||||
"es2021": false
|
||||
},
|
||||
"extends": []
|
||||
"parserOptions": {
|
||||
"ecmaVersion": "latest"
|
||||
},
|
||||
"plugins": [
|
||||
"json"
|
||||
],
|
||||
"extends": [
|
||||
"plugin:json/recommended"
|
||||
]
|
||||
},
|
||||
{
|
||||
"files": ["**/*.md"],
|
||||
"plugins": ["markdown"],
|
||||
"processor": "markdown/markdown",
|
||||
"rules": {
|
||||
"no-undef":"off"
|
||||
}
|
||||
"files": ["**/*.html"],
|
||||
"env": {
|
||||
"browser": true,
|
||||
"commonjs": false,
|
||||
"node": false,
|
||||
"es2021": false
|
||||
},
|
||||
{
|
||||
"files": ["**/*.md/*.js"],
|
||||
"parserOptions": {
|
||||
"sourceType": "module",
|
||||
"ecmaVersion": "latest"
|
||||
},
|
||||
"parser": "@html-eslint/parser",
|
||||
"extends": ["plugin:@html-eslint/recommended"],
|
||||
"plugins": [
|
||||
"html", "@html-eslint"
|
||||
],
|
||||
"rules": {
|
||||
"@typescript-eslint/no-unused-vars":"off",
|
||||
"@typescript-eslint/triple-slash-reference":"off",
|
||||
"import/newline-after-import":"off",
|
||||
"import/no-unresolved":"off",
|
||||
"no-console":"off",
|
||||
"no-global-assign":"off",
|
||||
"no-multi-spaces":"off",
|
||||
"no-restricted-globals":"off",
|
||||
"no-undef":"off",
|
||||
"no-unused-vars":"off",
|
||||
"node/no-missing-import":"off",
|
||||
"node/no-missing-require":"off",
|
||||
"promise/catch-or-return":"off"
|
||||
"@html-eslint/element-newline":"off",
|
||||
"@html-eslint/indent": ["error", 2]
|
||||
}
|
||||
}
|
||||
],
|
||||
|
@ -198,7 +207,6 @@
|
|||
"demo/helpers/*.js",
|
||||
"demo/typescript/*.js",
|
||||
"demo/faceid/*.js",
|
||||
"demo/tracker/*.js",
|
||||
"typedoc"
|
||||
]
|
||||
}
|
||||
|
|
|
@ -1,9 +1,4 @@
|
|||
node_modules/
|
||||
types/lib
|
||||
pnpm-lock.yaml
|
||||
package-lock.json
|
||||
*.swp
|
||||
samples/**/*.mp4
|
||||
samples/**/*.webm
|
||||
temp
|
||||
tmp
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
{
|
||||
"MD012": false,
|
||||
"MD013": false,
|
||||
"MD029": false,
|
||||
"MD033": false,
|
||||
"MD036": false,
|
||||
"MD041": false
|
||||
|
|
3
.npmrc
|
@ -1,5 +1,4 @@
|
|||
force=true
|
||||
omit=dev
|
||||
production=true
|
||||
legacy-peer-deps=true
|
||||
strict-peer-dependencies=false
|
||||
node-options='--no-deprecation'
|
||||
|
|
137
CHANGELOG.md
|
@ -1,6 +1,6 @@
|
|||
# @vladmandic/human
|
||||
|
||||
Version: **3.3.6**
|
||||
Version: **2.11.1**
|
||||
Description: **Human: AI-powered 3D Face Detection & Rotation Tracking, Face Description & Recognition, Body Pose Tracking, 3D Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction, Gesture Recognition**
|
||||
|
||||
Author: **Vladimir Mandic <mandic00@live.com>**
|
||||
|
@ -9,132 +9,11 @@
|
|||
|
||||
## Changelog
|
||||
|
||||
### **3.3.6** 2025/08/26 mandic00@live.com
|
||||
### **HEAD -> main** 2022/10/09 mandic00@live.com
|
||||
|
||||
|
||||
### **origin/main** 2025/02/05 mandic00@live.com
|
||||
### **origin/main** 2022/10/02 mandic00@live.com
|
||||
|
||||
- full rebuild
|
||||
|
||||
### **3.3.5** 2025/02/05 mandic00@live.com
|
||||
|
||||
- rebuild
|
||||
- add human.draw.tensor method
|
||||
|
||||
### **3.3.4** 2024/10/24 mandic00@live.com
|
||||
|
||||
|
||||
### **3.3.3** 2024/10/14 mandic00@live.com
|
||||
|
||||
- add loaded property to model stats and mark models not loaded correctly.
|
||||
- release build
|
||||
|
||||
### **3.3.2** 2024/09/11 mandic00@live.com
|
||||
|
||||
- full rebuild
|
||||
|
||||
### **3.3.1** 2024/09/11 mandic00@live.com
|
||||
|
||||
- add config.face.detector.square option
|
||||
- human 3.3 alpha test run
|
||||
- human 3.3 alpha with new build environment
|
||||
- release rebuild
|
||||
- fix flazeface tensor scale and update build platform
|
||||
|
||||
### **3.2.2** 2024/04/17 mandic00@live.com
|
||||
|
||||
|
||||
### **release: 3.2.1** 2024/02/15 mandic00@live.com
|
||||
|
||||
|
||||
### **3.2.1** 2024/02/15 mandic00@live.com
|
||||
|
||||
|
||||
### **3.2.0** 2023/12/06 mandic00@live.com
|
||||
|
||||
- set browser false when navigator object is empty
|
||||
- https://github.com/vladmandic/human/issues/402
|
||||
|
||||
### **release: 3.1.2** 2023/09/18 mandic00@live.com
|
||||
|
||||
- full rebuild
|
||||
|
||||
### **3.1.2** 2023/09/18 mandic00@live.com
|
||||
|
||||
- major toolkit upgrade
|
||||
- full rebuild
|
||||
- major toolkit upgrade
|
||||
|
||||
### **3.1.1** 2023/08/05 mandic00@live.com
|
||||
|
||||
- fixes plus tfjs upgrade for new release
|
||||
|
||||
### **3.0.7** 2023/06/12 mandic00@live.com
|
||||
|
||||
- full rebuild
|
||||
- fix memory leak in histogramequalization
|
||||
- initial work on tracker
|
||||
|
||||
### **3.0.6** 2023/03/21 mandic00@live.com
|
||||
|
||||
- add optional crop to multiple models
|
||||
- fix movenet-multipose
|
||||
- add electron detection
|
||||
- fix gender-ssrnet-imdb
|
||||
- add movenet-multipose workaround
|
||||
- rebuild and publish
|
||||
- add face.detector.minsize configurable setting
|
||||
- add affectnet
|
||||
|
||||
### **3.0.5** 2023/02/02 mandic00@live.com
|
||||
|
||||
- add gear-e models
|
||||
- detect react-native
|
||||
- redo blazeface annotations
|
||||
|
||||
### **3.0.4** 2023/01/29 mandic00@live.com
|
||||
|
||||
- make naviator calls safe
|
||||
- fix facedetector-only configs
|
||||
|
||||
### **3.0.3** 2023/01/07 mandic00@live.com
|
||||
|
||||
- full rebuild
|
||||
|
||||
### **3.0.2** 2023/01/06 mandic00@live.com
|
||||
|
||||
- default face.rotation disabled
|
||||
|
||||
### **release: 3.0.1** 2022/11/22 mandic00@live.com
|
||||
|
||||
|
||||
### **3.0.1** 2022/11/22 mandic00@live.com
|
||||
|
||||
- support dynamic loads
|
||||
- polish demos
|
||||
- add facedetect demo and fix model async load
|
||||
- enforce markdown linting
|
||||
- cleanup git history
|
||||
- default empty result
|
||||
- refactor draw and models namespaces
|
||||
- refactor distance
|
||||
- add basic anthropometry
|
||||
- added webcam id specification
|
||||
- include external typedefs
|
||||
- prepare external typedefs
|
||||
- rebuild all
|
||||
- include project files for types
|
||||
- architectural improvements
|
||||
- refresh dependencies
|
||||
- add named exports
|
||||
- add draw label templates
|
||||
- reduce dev dependencies
|
||||
- tensor rank strong typechecks
|
||||
- rebuild dependencies
|
||||
|
||||
### **2.11.1** 2022/10/09 mandic00@live.com
|
||||
|
||||
- add rvm segmentation model
|
||||
- add human.webcam methods
|
||||
- create funding.yml
|
||||
- fix rotation interpolation
|
||||
|
@ -146,7 +25,9 @@
|
|||
### **2.10.2** 2022/09/11 mandic00@live.com
|
||||
|
||||
- add node.js esm compatibility (#292)
|
||||
- release
|
||||
|
||||
### **release: 2.10.1** 2022/09/07 mandic00@live.com
|
||||
|
||||
|
||||
### **2.10.1** 2022/09/07 mandic00@live.com
|
||||
|
||||
|
@ -182,7 +63,9 @@
|
|||
|
||||
### **2.9.2** 2022/08/08 mandic00@live.com
|
||||
|
||||
- release rebuild
|
||||
|
||||
### **release: 2.9.1** 2022/07/25 mandic00@live.com
|
||||
|
||||
|
||||
### **2.9.1** 2022/07/25 mandic00@live.com
|
||||
|
||||
|
@ -229,6 +112,7 @@
|
|||
- enable precompile as part of warmup
|
||||
- prepare release beta
|
||||
- change default face crop
|
||||
- face attention model is available in human-models
|
||||
- beta release 2.7
|
||||
- refactor draw methods
|
||||
- implement face attention model
|
||||
|
@ -540,6 +424,7 @@
|
|||
- implemented human.next global interpolation method
|
||||
- finished draw buffering and smoothing and enabled by default
|
||||
- implemented service worker
|
||||
- quantized centernet
|
||||
- release candidate
|
||||
- added usage restrictions
|
||||
- quantize handdetect model
|
||||
|
|
168
README.md
|
@ -4,6 +4,7 @@
|
|||

|
||||

|
||||

|
||||

|
||||
|
||||
# Human Library
|
||||
|
||||
|
@ -11,8 +12,7 @@
|
|||
**Body Pose Tracking, 3D Hand & Finger Tracking, Iris Analysis,**
|
||||
**Age & Gender & Emotion Prediction, Gaze Tracking, Gesture Recognition, Body Segmentation**
|
||||
|
||||
<br>
|
||||
|
||||
JavaScript module using TensorFlow/JS Machine Learning library
|
||||
## Highlights
|
||||
|
||||
- Compatible with most server-side and client-side environments and frameworks
|
||||
|
@ -24,26 +24,19 @@
|
|||
- Simple unified API
|
||||
- Built-in Image, Video and WebCam handling
|
||||
|
||||
[*Jump to Quick Start*](#quick-start)
|
||||
|
||||
<br>
|
||||
|
||||
## Compatibility
|
||||
|
||||
**Browser**:
|
||||
- Compatible with both desktop and mobile platforms
|
||||
- Compatible with *WebGPU*, *WebGL*, *WASM*, *CPU* backends
|
||||
- Compatible with *WebWorker* execution
|
||||
- Compatible with *WebView*
|
||||
- Primary platform: *Chromium*-based browsers
|
||||
- Secondary platform: *Firefox*, *Safari*
|
||||
|
||||
**NodeJS**:
|
||||
- Compatibile with *WASM* backend for executions on architectures where *tensorflow* binaries are not available
|
||||
- Compatible with *tfjs-node* using software execution via *tensorflow* shared libraries
|
||||
- Compatible with *tfjs-node* using GPU-accelerated execution via *tensorflow* shared libraries and nVidia CUDA
|
||||
- Supported versions are from **14.x** to **22.x**
|
||||
- NodeJS version **23.x** is not supported due to breaking changes and issues with `@tensorflow/tfjs`
|
||||
- **Browser**:
|
||||
Compatible with both desktop and mobile platforms
|
||||
Compatible with *CPU*, *WebGL*, *WASM* backends
|
||||
Compatible with *WebWorker* execution
|
||||
Compatible with *WebView*
|
||||
- **NodeJS**:
|
||||
Compatibile with *WASM* backend for executions on architectures where *tensorflow* binaries are not available
|
||||
Compatible with *tfjs-node* using software execution via *tensorflow* shared libraries
|
||||
Compatible with *tfjs-node* using GPU-accelerated execution via *tensorflow* shared libraries and nVidia CUDA
|
||||
|
||||
<br>
|
||||
|
||||
|
@ -73,9 +66,8 @@
|
|||
|
||||
- **Full** [[*Live*]](https://vladmandic.github.io/human/demo/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo): Main browser demo app that showcases all Human capabilities
|
||||
- **Simple** [[*Live*]](https://vladmandic.github.io/human/demo/typescript/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/typescript): Simple demo in WebCam processing demo in TypeScript
|
||||
- **Embedded** [[*Live*]](https://vladmandic.github.io/human/demo/video/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/video/index.html): Even simpler demo with tiny code embedded in HTML file
|
||||
- **Face Detect** [[*Live*]](https://vladmandic.github.io/human/demo/facedetect/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facedetect): Extract faces from images and processes details
|
||||
- **Face Match** [[*Live*]](https://vladmandic.github.io/human/demo/facematch/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facematch): Extract faces from images, calculates face descriptors and similarities and matches them to known database
|
||||
- **Embedded** [[*Live*]](https://vladmandic.github.io/human/demo/video/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/video/index.html): Even simpler demo with tiny code embedded in HTML file
|
||||
- **Face Match** [[*Live*]](https://vladmandic.github.io/human/demo/facematch/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facematch): Extract faces from images, calculates face descriptors and simmilarities and matches them to known database
|
||||
- **Face ID** [[*Live*]](https://vladmandic.github.io/human/demo/faceid/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/faceid): Runs multiple checks to validate webcam input before performing face match to faces in IndexDB
|
||||
- **Multi-thread** [[*Live*]](https://vladmandic.github.io/human/demo/multithread/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/multithread): Runs each Human module in a separate web worker for highest possible performance
|
||||
- **NextJS** [[*Live*]](https://vladmandic.github.io/human-next/out/index.html) [[*Details*]](https://github.com/vladmandic/human-next): Use Human with TypeScript, NextJS and ReactJS
|
||||
|
@ -89,15 +81,14 @@
|
|||
*NodeJS demos may require extra dependencies which are used to decode inputs*
|
||||
*See header of each demo to see its dependencies as they are not automatically installed with `Human`*
|
||||
|
||||
- **Main** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node.js): Process images from files, folders or URLs using native methods
|
||||
- **Canvas** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-canvas.js): Process image from file or URL and draw results to a new image file using `node-canvas`
|
||||
- **Video** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-video.js): Processing of video input using `ffmpeg`
|
||||
- **WebCam** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-webcam.js): Processing of webcam screenshots using `fswebcam`
|
||||
- **Events** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-event.js): Showcases usage of `Human` eventing to get notifications on processing
|
||||
- **Similarity** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-similarity.js): Compares two input images for similarity of detected faces
|
||||
- **Face Match** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facematch/node-match.js): Parallel processing of face **match** in multiple child worker threads
|
||||
- **Multiple Workers** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/multithread/node-multiprocess.js): Runs multiple parallel `human` by dispaching them to pool of pre-created worker processes
|
||||
- **Dynamic Load** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Loads Human dynamically with multiple different desired backends
|
||||
- **Main** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Process images from files, folders or URLs using native methods
|
||||
- **Canvas** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Process image from file or URL and draw results to a new image file using `node-canvas`
|
||||
- **Video** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Processing of video input using `ffmpeg`
|
||||
- **WebCam** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Processing of webcam screenshots using `fswebcam`
|
||||
- **Events** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Showcases usage of `Human` eventing to get notifications on processing
|
||||
- **Similarity** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Compares two input images for similarity of detected faces
|
||||
- **Face Match** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facematch): Parallel processing of face **match** in multiple child worker threads
|
||||
- **Multiple Workers** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Runs multiple parallel `human` by dispaching them to pool of pre-created worker processes
|
||||
|
||||
## Project pages
|
||||
|
||||
|
@ -116,7 +107,6 @@
|
|||
- [**Usage & Functions**](https://github.com/vladmandic/human/wiki/Usage)
|
||||
- [**Configuration Details**](https://github.com/vladmandic/human/wiki/Config)
|
||||
- [**Result Details**](https://github.com/vladmandic/human/wiki/Result)
|
||||
- [**Customizing Draw Methods**](https://github.com/vladmandic/human/wiki/Draw)
|
||||
- [**Caching & Smoothing**](https://github.com/vladmandic/human/wiki/Caching)
|
||||
- [**Input Processing**](https://github.com/vladmandic/human/wiki/Image)
|
||||
- [**Face Recognition & Face Description**](https://github.com/vladmandic/human/wiki/Embedding)
|
||||
|
@ -151,21 +141,24 @@
|
|||
## App Examples
|
||||
|
||||
Visit [Examples gallery](https://vladmandic.github.io/human/samples/index.html) for more examples
|
||||
[<img src="assets/samples.jpg" width="640"/>](assets/samples.jpg)
|
||||
<https://vladmandic.github.io/human/samples/index.html>
|
||||
|
||||

|
||||
|
||||
<br>
|
||||
|
||||
## Options
|
||||
|
||||
All options as presented in the demo application...
|
||||
[demo/index.html](demo/index.html)
|
||||
[<img src="assets/screenshot-menu.png"/>](assets/screenshot-menu.png)
|
||||
> [demo/index.html](demo/index.html)
|
||||
|
||||

|
||||
|
||||
<br>
|
||||
|
||||
**Results Browser:**
|
||||
[ *Demo -> Display -> Show Results* ]<br>
|
||||
[<img src="assets/screenshot-results.png"/>](assets/screenshot-results.png)
|
||||

|
||||
|
||||
<br>
|
||||
|
||||
|
@ -177,39 +170,33 @@ sorts them by similarity to selected face
|
|||
and optionally matches detected face with database of known people to guess their names
|
||||
> [demo/facematch](demo/facematch/index.html)
|
||||
|
||||
[<img src="assets/screenshot-facematch.jpg" width="640"/>](assets/screenshot-facematch.jpg)
|
||||

|
||||
|
||||
2. **Face Detect:**
|
||||
Extracts all detect faces from loaded images on-demand and highlights face details on a selected face
|
||||
> [demo/facedetect](demo/facedetect/index.html)
|
||||
|
||||
[<img src="assets/screenshot-facedetect.jpg" width="640"/>](assets/screenshot-facedetect.jpg)
|
||||
|
||||
3. **Face ID:**
|
||||
2. **Face ID:**
|
||||
Performs validation check on a webcam input to detect a real face and matches it to known faces stored in database
|
||||
> [demo/faceid](demo/faceid/index.html)
|
||||
|
||||
[<img src="assets/screenshot-faceid.jpg" width="640"/>](assets/screenshot-faceid.jpg)
|
||||

|
||||
|
||||
<br>
|
||||
|
||||
4. **3D Rendering:**
|
||||
3. **3D Rendering:**
|
||||
> [human-motion](https://github.com/vladmandic/human-motion)
|
||||
|
||||
[<img src="https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-face.jpg" width="640"/>](https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-face.jpg)
|
||||
[<img src="https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-body.jpg" width="640"/>](https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-body.jpg)
|
||||
[<img src="https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-hand.jpg" width="640"/>](https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-hand.jpg)
|
||||

|
||||

|
||||

|
||||
|
||||
<br>
|
||||
|
||||
5. **VR Model Tracking:**
|
||||
4. **VR Model Tracking:**
|
||||
> [human-three-vrm](https://github.com/vladmandic/human-three-vrm)
|
||||
> [human-bjs-vrm](https://github.com/vladmandic/human-bjs-vrm)
|
||||
|
||||
[<img src="https://github.com/vladmandic/human-three-vrm/raw/main/assets/human-vrm-screenshot.jpg" width="640"/>](https://github.com/vladmandic/human-three-vrm/raw/main/assets/human-vrm-screenshot.jpg)
|
||||

|
||||
|
||||
|
||||
6. **Human as OS native application:**
|
||||
5. **Human as OS native application:**
|
||||
> [human-electron](https://github.com/vladmandic/human-electron)
|
||||
|
||||
<br>
|
||||
|
@ -217,7 +204,7 @@ Performs validation check on a webcam input to detect a real face and matches it
|
|||
**468-Point Face Mesh Defails:**
|
||||
(view in full resolution to see keypoints)
|
||||
|
||||
[<img src="assets/facemesh.png" width="400"/>](assets/facemesh.png)
|
||||

|
||||
|
||||
<br><hr><br>
|
||||
|
||||
|
@ -227,16 +214,33 @@ Simply load `Human` (*IIFE version*) directly from a cloud CDN in your HTML file
|
|||
(pick one: `jsdelirv`, `unpkg` or `cdnjs`)
|
||||
|
||||
```html
|
||||
<!DOCTYPE HTML>
|
||||
<script src="https://cdn.jsdelivr.net/npm/@vladmandic/human/dist/human.js"></script>
|
||||
<script src="https://unpkg.dev/@vladmandic/human/dist/human.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/human/3.0.0/human.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/human/2.1.5/human.js"></script>
|
||||
```
|
||||
|
||||
For details, including how to use `Browser ESM` version or `NodeJS` version of `Human`, see [**Installation**](https://github.com/vladmandic/human/wiki/Install)
|
||||
|
||||
<br>
|
||||
|
||||
## Inputs
|
||||
|
||||
`Human` library can process all known input types:
|
||||
|
||||
- `Image`, `ImageData`, `ImageBitmap`, `Canvas`, `OffscreenCanvas`, `Tensor`,
|
||||
- `HTMLImageElement`, `HTMLCanvasElement`, `HTMLVideoElement`, `HTMLMediaElement`
|
||||
|
||||
Additionally, `HTMLVideoElement`, `HTMLMediaElement` can be a standard `<video>` tag that links to:
|
||||
|
||||
- WebCam on user's system
|
||||
- Any supported video type
|
||||
e.g. `.mp4`, `.avi`, etc.
|
||||
- Additional video types supported via *HTML5 Media Source Extensions*
|
||||
e.g.: **HLS** (*HTTP Live Streaming*) using `hls.js` or **DASH** (*Dynamic Adaptive Streaming over HTTP*) using `dash.js`
|
||||
- **WebRTC** media track using built-in support
|
||||
|
||||
<br>
|
||||
|
||||
## Code Examples
|
||||
|
||||
Simple app that uses Human to process video input and
|
||||
|
@ -245,7 +249,7 @@ draw output on screen using internal draw helper functions
|
|||
```js
|
||||
// create instance of human with simple configuration using default values
|
||||
const config = { backend: 'webgl' };
|
||||
const human = new Human.Human(config);
|
||||
const human = new Human(config);
|
||||
// select input HTMLVideoElement and output HTMLCanvasElement from page
|
||||
const inputVideo = document.getElementById('video-id');
|
||||
const outputCanvas = document.getElementById('canvas-id');
|
||||
|
@ -264,7 +268,6 @@ function detectVideo() {
|
|||
human.draw.gesture(outputCanvas, result.gesture);
|
||||
// and loop immediate to the next frame
|
||||
requestAnimationFrame(detectVideo);
|
||||
return result;
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -372,53 +375,6 @@ drawResults(); // start draw loop
|
|||
|
||||
And for even better results, you can run detection in a separate web worker thread
|
||||
|
||||
<br><hr><br>
|
||||
|
||||
## Inputs
|
||||
|
||||
`Human` library can process all known input types:
|
||||
|
||||
- `Image`, `ImageData`, `ImageBitmap`, `Canvas`, `OffscreenCanvas`, `Tensor`,
|
||||
- `HTMLImageElement`, `HTMLCanvasElement`, `HTMLVideoElement`, `HTMLMediaElement`
|
||||
|
||||
Additionally, `HTMLVideoElement`, `HTMLMediaElement` can be a standard `<video>` tag that links to:
|
||||
|
||||
- WebCam on user's system
|
||||
- Any supported video type
|
||||
e.g. `.mp4`, `.avi`, etc.
|
||||
- Additional video types supported via *HTML5 Media Source Extensions*
|
||||
e.g.: **HLS** (*HTTP Live Streaming*) using `hls.js` or **DASH** (*Dynamic Adaptive Streaming over HTTP*) using `dash.js`
|
||||
- **WebRTC** media track using built-in support
|
||||
|
||||
<br><hr><br>
|
||||
|
||||
## Detailed Usage
|
||||
|
||||
- [**Wiki Home**](https://github.com/vladmandic/human/wiki)
|
||||
- [**List of all available methods, properies and namespaces**](https://github.com/vladmandic/human/wiki/Usage)
|
||||
- [**TypeDoc API Specification - Main class**](https://vladmandic.github.io/human/typedoc/classes/Human.html)
|
||||
- [**TypeDoc API Specification - Full**](https://vladmandic.github.io/human/typedoc/)
|
||||
|
||||

|
||||
|
||||
<br><hr><br>
|
||||
|
||||
## TypeDefs
|
||||
|
||||
`Human` is written using TypeScript strong typing and ships with full **TypeDefs** for all classes defined by the library bundled in `types/human.d.ts` and enabled by default
|
||||
|
||||
*Note*: This does not include embedded `tfjs`
|
||||
If you want to use embedded `tfjs` inside `Human` (`human.tf` namespace) and still full **typedefs**, add this code:
|
||||
|
||||
> import type * as tfjs from '@vladmandic/human/dist/tfjs.esm';
|
||||
> const tf = human.tf as typeof tfjs;
|
||||
|
||||
This is not enabled by default as `Human` does not ship with full **TFJS TypeDefs** due to size considerations
|
||||
Enabling `tfjs` TypeDefs as above creates additional project (dev-only as only types are required) dependencies as defined in `@vladmandic/human/dist/tfjs.esm.d.ts`:
|
||||
|
||||
> @tensorflow/tfjs-core, @tensorflow/tfjs-converter, @tensorflow/tfjs-backend-wasm, @tensorflow/tfjs-backend-webgl
|
||||
|
||||
|
||||
<br><hr><br>
|
||||
|
||||
## Default models
|
||||
|
@ -448,9 +404,9 @@ For more info, see [**Configuration Details**](https://github.com/vladmandic/hum
|
|||
|
||||
<br><hr><br>
|
||||
|
||||
`Human` library is written in [TypeScript](https://www.typescriptlang.org/docs/handbook/intro.html) **5.1** using [TensorFlow/JS](https://www.tensorflow.org/js/) **4.10** and conforming to latest `JavaScript` [ECMAScript version 2022](https://262.ecma-international.org/) standard
|
||||
|
||||
Build target for distributables is `JavaScript` [EMCAScript version 2018](https://262.ecma-international.org/9.0/)
|
||||
`Human` library is written in `TypeScript` [4.8](https://www.typescriptlang.org/docs/handbook/intro.html)
|
||||
Conforming to latest `JavaScript` [ECMAScript version 2022](https://262.ecma-international.org/) standard
|
||||
Build target is `JavaScript` [EMCAScript version 2018](https://262.ecma-international.org/11.0/)
|
||||
|
||||
<br>
|
||||
|
||||
|
|
51
TODO.md
|
@ -2,6 +2,21 @@
|
|||
|
||||
## Work-in-Progress
|
||||
|
||||
|
||||
<hr><br>
|
||||
|
||||
## Exploring
|
||||
|
||||
- **Optical flow** for intelligent temporal interpolation
|
||||
<https://docs.opencv.org/3.3.1/db/d7f/tutorial_js_lucas_kanade.html>
|
||||
- **CLAHE** advanced histogram equalization for optimization of badly lit scenes
|
||||
- **TFLite** models
|
||||
<https://js.tensorflow.org/api_tflite/0.0.1-alpha.4/>
|
||||
- **Principal Components Analysis** for reduction of descriptor complexity
|
||||
<https://github.com/vladmandic/human-match/blob/main/src/pca.js>
|
||||
- **Temporal guidance** for face/body segmentation
|
||||
<https://github.com/PeterL1n/RobustVideoMatting>
|
||||
|
||||
<hr><br>
|
||||
|
||||
## Known Issues & Limitations
|
||||
|
@ -16,9 +31,10 @@ No issues with default model `FaceMesh`
|
|||
`NanoDet` model is not supported when using `WASM` backend due to missing kernel op in **TFJS**
|
||||
No issues with default model `MB3-CenterNet`
|
||||
|
||||
## Body Detection using MoveNet-MultiPose
|
||||
### WebGPU
|
||||
|
||||
Model does not return valid detection scores (all other functionality is not impacted)
|
||||
Experimental support only until support is officially added in Chromium
|
||||
Enable via <chrome://flags/#enable-unsafe-webgpu>
|
||||
|
||||
### Firefox
|
||||
|
||||
|
@ -31,8 +47,31 @@ Enable via `about:config` -> `gfx.offscreencanvas.enabled`
|
|||
No support for running in **web workers** as Safari still does not support `OffscreenCanvas`
|
||||
[Details](https://developer.mozilla.org/en-US/docs/Web/API/OffscreenCanvas#browser_compatibility)
|
||||
|
||||
## React-Native
|
||||
|
||||
`Human` support for **React-Native** is best-effort, but not part of the main development focus
|
||||
|
||||
<hr><br>
|
||||
|
||||
## Pending Release Changes
|
||||
|
||||
- New methods [`human.webcam.*`](https://vladmandic.github.io/human/typedoc/classes/WebCam.html)
|
||||
Enables built-in configuration and control of **WebCam** streams
|
||||
- New method [`human.video()`](https://vladmandic.github.io/human/typedoc/classes/Human.html#video)
|
||||
Runs continous detection of an input **video**
|
||||
instead of processing each frame manually using `human.detect()`
|
||||
- New demo for **webcam** and **video** methods [*Live*](https://vladmandic.github.io/human/demo/video/index.html) | [*Code*](https://github.com/vladmandic/human/blob/main/demo/video/index.html)
|
||||
*Full HTML and JavaScript code in less than a screen*
|
||||
- Redesigned [`human.segmentation`](https://vladmandic.github.io/human/typedoc/classes/Human.html#segmentation)
|
||||
*Breaking changes*
|
||||
- New model `rvm` for high-quality body segmentation in real-time
|
||||
*Not part of default deployment, download from [human-models](https://github.com/vladmandic/human-models/tree/main/models)*
|
||||
- New demo for **segmentation** methods [*Live*](https://vladmandic.github.io/human/demo/segmentation/index.html) | [*Code*](https://github.com/vladmandic/human/blob/main/demo/segmentation/index.html)
|
||||
*Full HTML and JavaScript code in less than a screen*
|
||||
- New advanced demo using **BabylonJS and VRM** [*Live*](https://vladmandic.github.io/human-bjs-vrm) | [*Code*](https://github.com/vladmandic/human-bjs-vrm)
|
||||
- Update **TypeDoc** generation [*Link*](https://vladmandic.github.io/human/typedoc)
|
||||
- Update **TypeDefs** bundle generation [*Link*](https://github.com/vladmandic/human/blob/main/types/human.d.ts)
|
||||
No external dependencies
|
||||
- Fix model caching when using web workers
|
||||
- Fix `face.rotation` when using interpolation
|
||||
- Improve NodeJS resolver when using ESM
|
||||
- Update demo `demo/typescript`
|
||||
- Update demo `demo/faceid`
|
||||
- Update demo `demo/nodejs/process-folder.js`
|
||||
and re-process `/samples` [*Link*](https://vladmandic.github.io/human/samples)
|
||||
|
|
Before Width: | Height: | Size: 70 KiB |
Before Width: | Height: | Size: 22 KiB After Width: | Height: | Size: 41 KiB |
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 34 KiB |
After Width: | Height: | Size: 56 KiB |
Before Width: | Height: | Size: 38 KiB |
69
build.js
|
@ -31,42 +31,25 @@ const apiExtractorIgnoreList = [ // eslint-disable-line no-unused-vars
|
|||
'tsdoc-unnecessary-backslash',
|
||||
];
|
||||
|
||||
const regEx = [
|
||||
{ search: 'types="@webgpu/types/dist"', replace: 'path="../src/types/webgpu.d.ts"' },
|
||||
{ search: 'types="offscreencanvas"', replace: 'path="../src/types/offscreencanvas.d.ts"' },
|
||||
];
|
||||
|
||||
function copyFile(src, dst) {
|
||||
if (!fs.existsSync(src)) {
|
||||
log.warn('Copy:', { input: src, output: dst });
|
||||
return;
|
||||
}
|
||||
log.state('Copy:', { input: src, output: dst });
|
||||
function copy(src, dst) {
|
||||
if (!fs.existsSync(src)) return;
|
||||
const buffer = fs.readFileSync(src);
|
||||
fs.writeFileSync(dst, buffer);
|
||||
}
|
||||
|
||||
function writeFile(str, dst) {
|
||||
log.state('Write:', { output: dst });
|
||||
function write(str, dst) {
|
||||
fs.writeFileSync(dst, str);
|
||||
}
|
||||
|
||||
function regExFile(src, entries) {
|
||||
if (!fs.existsSync(src)) {
|
||||
log.warn('Filter:', { src });
|
||||
return;
|
||||
}
|
||||
log.state('Filter:', { input: src });
|
||||
for (const entry of entries) {
|
||||
function filter(str, src) {
|
||||
if (!fs.existsSync(src)) return;
|
||||
const buffer = fs.readFileSync(src, 'UTF-8');
|
||||
const lines = buffer.split(/\r?\n/);
|
||||
const out = [];
|
||||
for (const line of lines) {
|
||||
if (line.includes(entry.search)) out.push(line.replace(entry.search, entry.replace));
|
||||
else out.push(line);
|
||||
if (!line.includes(str)) out.push(line);
|
||||
}
|
||||
fs.writeFileSync(src, out.join('\n'));
|
||||
}
|
||||
}
|
||||
|
||||
async function analyzeModels() {
|
||||
|
@ -106,22 +89,14 @@ async function analyzeModels() {
|
|||
async function main() {
|
||||
log.logFile(logFile);
|
||||
log.data('Build', { name: packageJSON.name, version: packageJSON.version });
|
||||
|
||||
// run production build
|
||||
const build = new Build();
|
||||
await build.run('production');
|
||||
|
||||
// patch tfjs typedefs
|
||||
copyFile('node_modules/@vladmandic/tfjs/types/tfjs-core.d.ts', 'types/tfjs-core.d.ts');
|
||||
copyFile('node_modules/@vladmandic/tfjs/types/tfjs.d.ts', 'types/tfjs.esm.d.ts');
|
||||
copyFile('src/types/tsconfig.json', 'types/tsconfig.json');
|
||||
copyFile('src/types/eslint.json', 'types/.eslintrc.json');
|
||||
copyFile('src/types/tfjs.esm.d.ts', 'dist/tfjs.esm.d.ts');
|
||||
regExFile('types/tfjs-core.d.ts', regEx);
|
||||
|
||||
log.state('Copy:', { input: 'tfjs/tfjs.esm.d.ts' });
|
||||
copy('tfjs/tfjs.esm.d.ts', 'types/lib/dist/tfjs.esm.d.ts');
|
||||
// run api-extractor to create typedef rollup
|
||||
const extractorConfig = APIExtractor.ExtractorConfig.loadFileAndPrepare('.api-extractor.json');
|
||||
try {
|
||||
const extractorResult = APIExtractor.Extractor.invoke(extractorConfig, {
|
||||
localBuild: true,
|
||||
showVerboseMessages: false,
|
||||
|
@ -134,16 +109,24 @@ async function main() {
|
|||
},
|
||||
});
|
||||
log.state('API-Extractor:', { succeeeded: extractorResult.succeeded, errors: extractorResult.errorCount, warnings: extractorResult.warningCount });
|
||||
} catch (err) {
|
||||
log.error('API-Extractor:', err);
|
||||
}
|
||||
regExFile('types/human.d.ts', regEx);
|
||||
writeFile('export * from \'../types/human\';', 'dist/human.esm-nobundle.d.ts');
|
||||
writeFile('export * from \'../types/human\';', 'dist/human.esm.d.ts');
|
||||
writeFile('export * from \'../types/human\';', 'dist/human.d.ts');
|
||||
writeFile('export * from \'../types/human\';', 'dist/human.node-gpu.d.ts');
|
||||
writeFile('export * from \'../types/human\';', 'dist/human.node.d.ts');
|
||||
writeFile('export * from \'../types/human\';', 'dist/human.node-wasm.d.ts');
|
||||
// distribute typedefs
|
||||
// log.state('Copy:', { input: 'types/human.d.ts' });
|
||||
// copy('types/human.d.ts', 'dist/human.esm-nobundle.d.ts');
|
||||
// copy('types/human.d.ts', 'dist/human.esm.d.ts');
|
||||
// copy('types/human.d.ts', 'dist/human.d.ts');
|
||||
// copy('types/human.d.ts', 'dist/human.node-gpu.d.ts');
|
||||
// copy('types/human.d.ts', 'dist/human.node.d.ts');
|
||||
// copy('types/human.d.ts', 'dist/human.node-wasm.d.ts');
|
||||
log.state('Filter:', { input: 'types/human.d.ts' });
|
||||
filter('reference types', 'types/human.d.ts');
|
||||
log.state('Link:', { input: 'types/human.d.ts' });
|
||||
write('export * from \'../types/human\';', 'dist/human.esm-nobundle.d.ts');
|
||||
write('export * from \'../types/human\';', 'dist/human.esm.d.ts');
|
||||
write('export * from \'../types/human\';', 'dist/human.d.ts');
|
||||
write('export * from \'../types/human\';', 'dist/human.node-gpu.d.ts');
|
||||
write('export * from \'../types/human\';', 'dist/human.node.d.ts');
|
||||
write('export * from \'../types/human\';', 'dist/human.node-wasm.d.ts');
|
||||
// export * from '../types/human';
|
||||
|
||||
// generate model signature
|
||||
await analyzeModels();
|
||||
|
|
|
@ -8,7 +8,6 @@ For details on other demos see Wiki: [**Demos**](https://github.com/vladmandic/h
|
|||
`index.html`: Full demo using `Human` ESM module running in Browsers,
|
||||
|
||||
Includes:
|
||||
|
||||
- Selectable inputs:
|
||||
- Sample images
|
||||
- Image via drag & drop
|
||||
|
@ -38,14 +37,12 @@ Includes:
|
|||
in `index.js:ui`
|
||||
|
||||
```js
|
||||
const ui = {
|
||||
console: true, // log messages to browser console
|
||||
useWorker: true, // use web workers for processing
|
||||
buffered: true, // should output be buffered between frames
|
||||
interpolated: true, // should output be interpolated for smoothness between frames
|
||||
results: false, // show results tree
|
||||
useWebRTC: false, // use webrtc as camera source instead of local webcam
|
||||
};
|
||||
```
|
||||
|
||||
Demo implements several ways to use `Human` library,
|
||||
|
|
|
@ -1,160 +0,0 @@
|
|||
/**
|
||||
* Human demo for browsers
|
||||
*
|
||||
* Demo for face detection
|
||||
*/
|
||||
|
||||
/** @type {Human} */
|
||||
import { Human } from '../../dist/human.esm.js';
|
||||
|
||||
let loader;
|
||||
|
||||
const humanConfig = { // user configuration for human, used to fine-tune behavior
|
||||
cacheSensitivity: 0,
|
||||
debug: true,
|
||||
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
|
||||
filter: { enabled: true, equalization: false, flip: false },
|
||||
face: {
|
||||
enabled: true,
|
||||
detector: { rotation: false, maxDetected: 100, minConfidence: 0.2, return: true, square: false },
|
||||
iris: { enabled: true },
|
||||
description: { enabled: true },
|
||||
emotion: { enabled: true },
|
||||
antispoof: { enabled: true },
|
||||
liveness: { enabled: true },
|
||||
},
|
||||
body: { enabled: false },
|
||||
hand: { enabled: false },
|
||||
object: { enabled: false },
|
||||
gesture: { enabled: false },
|
||||
segmentation: { enabled: false },
|
||||
};
|
||||
|
||||
const human = new Human(humanConfig); // new instance of human
|
||||
|
||||
export const showLoader = (msg) => { loader.setAttribute('msg', msg); loader.style.display = 'block'; };
|
||||
export const hideLoader = () => loader.style.display = 'none';
|
||||
|
||||
class ComponentLoader extends HTMLElement { // watch for attributes
|
||||
message = document.createElement('div');
|
||||
|
||||
static get observedAttributes() { return ['msg']; }
|
||||
|
||||
attributeChangedCallback(_name, _prevVal, currVal) {
|
||||
this.message.innerHTML = currVal;
|
||||
}
|
||||
|
||||
connectedCallback() { // triggered on insert
|
||||
this.attachShadow({ mode: 'open' });
|
||||
const css = document.createElement('style');
|
||||
css.innerHTML = `
|
||||
.loader-container { top: 450px; justify-content: center; position: fixed; width: 100%; }
|
||||
.loader-message { font-size: 1.5rem; padding: 1rem; }
|
||||
.loader { width: 300px; height: 300px; border: 3px solid transparent; border-radius: 50%; border-top: 4px solid #f15e41; animation: spin 4s linear infinite; position: relative; }
|
||||
.loader::before, .loader::after { content: ""; position: absolute; top: 6px; bottom: 6px; left: 6px; right: 6px; border-radius: 50%; border: 4px solid transparent; }
|
||||
.loader::before { border-top-color: #bad375; animation: 3s spin linear infinite; }
|
||||
.loader::after { border-top-color: #26a9e0; animation: spin 1.5s linear infinite; }
|
||||
@keyframes spin { from { transform: rotate(0deg); } to { transform: rotate(360deg); } }
|
||||
`;
|
||||
const container = document.createElement('div');
|
||||
container.id = 'loader-container';
|
||||
container.className = 'loader-container';
|
||||
loader = document.createElement('div');
|
||||
loader.id = 'loader';
|
||||
loader.className = 'loader';
|
||||
this.message.id = 'loader-message';
|
||||
this.message.className = 'loader-message';
|
||||
this.message.innerHTML = '';
|
||||
container.appendChild(this.message);
|
||||
container.appendChild(loader);
|
||||
this.shadowRoot?.append(css, container);
|
||||
loader = this; // eslint-disable-line @typescript-eslint/no-this-alias
|
||||
}
|
||||
}
|
||||
|
||||
customElements.define('component-loader', ComponentLoader);
|
||||
|
||||
function addFace(face, source) {
|
||||
const deg = (rad) => Math.round((rad || 0) * 180 / Math.PI);
|
||||
const canvas = document.createElement('canvas');
|
||||
const emotion = face.emotion?.map((e) => `${Math.round(100 * e.score)}% ${e.emotion}`) || [];
|
||||
const rotation = `pitch ${deg(face.rotation?.angle.pitch)}° | roll ${deg(face.rotation?.angle.roll)}° | yaw ${deg(face.rotation?.angle.yaw)}°`;
|
||||
const gaze = `direction ${deg(face.rotation?.gaze.bearing)}° strength ${Math.round(100 * (face.rotation.gaze.strength || 0))}%`;
|
||||
canvas.title = `
|
||||
source: ${source}
|
||||
score: ${Math.round(100 * face.boxScore)}% detection ${Math.round(100 * face.faceScore)}% analysis
|
||||
age: ${face.age} years | gender: ${face.gender} score ${Math.round(100 * face.genderScore)}%
|
||||
emotion: ${emotion.join(' | ')}
|
||||
head rotation: ${rotation}
|
||||
eyes gaze: ${gaze}
|
||||
camera distance: ${face.distance}m | ${Math.round(100 * face.distance / 2.54)}in
|
||||
check: ${Math.round(100 * face.real)}% real ${Math.round(100 * face.live)}% live
|
||||
`.replace(/ /g, ' ');
|
||||
canvas.onclick = (e) => {
|
||||
e.preventDefault();
|
||||
document.getElementById('description').innerHTML = canvas.title;
|
||||
};
|
||||
human.draw.tensor(face.tensor, canvas);
|
||||
human.tf.dispose(face.tensor);
|
||||
return canvas;
|
||||
}
|
||||
|
||||
async function addFaces(imgEl) {
|
||||
showLoader('human: busy');
|
||||
const faceEl = document.getElementById('faces');
|
||||
faceEl.innerHTML = '';
|
||||
const res = await human.detect(imgEl);
|
||||
console.log(res); // eslint-disable-line no-console
|
||||
document.getElementById('description').innerHTML = `detected ${res.face.length} faces`;
|
||||
for (const face of res.face) {
|
||||
const canvas = addFace(face, imgEl.src.substring(0, 64));
|
||||
faceEl.appendChild(canvas);
|
||||
}
|
||||
hideLoader();
|
||||
}
|
||||
|
||||
function addImage(imageUri) {
|
||||
const imgEl = new Image(256, 256);
|
||||
imgEl.onload = () => {
|
||||
const images = document.getElementById('images');
|
||||
images.appendChild(imgEl); // add image if loaded ok
|
||||
images.scroll(images?.offsetWidth, 0);
|
||||
};
|
||||
imgEl.onerror = () => console.error('addImage', { imageUri }); // eslint-disable-line no-console
|
||||
imgEl.onclick = () => addFaces(imgEl);
|
||||
imgEl.title = imageUri.substring(0, 64);
|
||||
imgEl.src = encodeURI(imageUri);
|
||||
}
|
||||
|
||||
async function initDragAndDrop() {
|
||||
const reader = new FileReader();
|
||||
reader.onload = async (e) => {
|
||||
if (e.target.result.startsWith('data:image')) await addImage(e.target.result);
|
||||
};
|
||||
document.body.addEventListener('dragenter', (evt) => evt.preventDefault());
|
||||
document.body.addEventListener('dragleave', (evt) => evt.preventDefault());
|
||||
document.body.addEventListener('dragover', (evt) => evt.preventDefault());
|
||||
document.body.addEventListener('drop', async (evt) => {
|
||||
evt.preventDefault();
|
||||
evt.dataTransfer.dropEffect = 'copy';
|
||||
for (const f of evt.dataTransfer.files) reader.readAsDataURL(f);
|
||||
});
|
||||
document.body.onclick = (e) => {
|
||||
if (e.target.localName !== 'canvas') document.getElementById('description').innerHTML = '';
|
||||
};
|
||||
}
|
||||
|
||||
async function main() {
|
||||
showLoader('loading models');
|
||||
await human.load();
|
||||
showLoader('compiling models');
|
||||
await human.warmup();
|
||||
showLoader('loading images');
|
||||
const images = ['group-1.jpg', 'group-2.jpg', 'group-3.jpg', 'group-4.jpg', 'group-5.jpg', 'group-6.jpg', 'group-7.jpg', 'solvay1927.jpg', 'stock-group-1.jpg', 'stock-group-2.jpg', 'stock-models-6.jpg', 'stock-models-7.jpg'];
|
||||
const imageUris = images.map((a) => `../../samples/in/${a}`);
|
||||
for (let i = 0; i < imageUris.length; i++) addImage(imageUris[i]);
|
||||
initDragAndDrop();
|
||||
hideLoader();
|
||||
}
|
||||
|
||||
window.onload = main;
|
|
@ -1,43 +0,0 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>Human</title>
|
||||
<!-- <meta http-equiv="content-type" content="text/html; charset=utf-8"> -->
|
||||
<meta name="viewport" content="width=device-width, shrink-to-fit=yes">
|
||||
<meta name="keywords" content="Human">
|
||||
<meta name="application-name" content="Human">
|
||||
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||
<meta name="theme-color" content="#000000">
|
||||
<link rel="manifest" href="../manifest.webmanifest">
|
||||
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
|
||||
<link rel="apple-touch-icon" href="../../assets/icon.png">
|
||||
<script src="./facedetect.js" type="module"></script>
|
||||
<style>
|
||||
img { object-fit: contain; }
|
||||
img:hover { filter: grayscale(1); transform: scale(1.08); transition : all 0.3s ease; }
|
||||
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
|
||||
html { font-family: 'Lato', 'Segoe UI'; font-size: 24px; font-variant: small-caps; }
|
||||
body { margin: 24px; background: black; color: white; overflow: hidden; text-align: -webkit-center; width: 100vw; height: 100vh; }
|
||||
::-webkit-scrollbar { height: 8px; border: 0; border-radius: 0; }
|
||||
::-webkit-scrollbar-thumb { background: grey }
|
||||
::-webkit-scrollbar-track { margin: 3px; }
|
||||
canvas { width: 192px; height: 192px; margin: 2px; padding: 2px; cursor: grab; transform: scale(1.00); transition : all 0.3s ease; }
|
||||
canvas:hover { filter: grayscale(1); transform: scale(1.08); transition : all 0.3s ease; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<component-loader></component-loader>
|
||||
<div style="display: flex">
|
||||
<div>
|
||||
<div style="margin: 24px">select image to show detected faces<br>drag & drop to add your images</div>
|
||||
<div id="images" style="display: flex; width: 98vw; overflow-x: auto; overflow-y: hidden; scroll-behavior: smooth"></div>
|
||||
</div>
|
||||
</div>
|
||||
<div id="list" style="height: 10px"></div>
|
||||
<div style="margin: 24px">hover or click on face to show details</div>
|
||||
<div id="faces" style="overflow-y: auto"></div>
|
||||
<div id="description" style="white-space: pre;"></div>
|
||||
</body>
|
||||
</html>
|
|
@ -11,13 +11,12 @@ import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
|
|||
import * as indexDb from './indexdb'; // methods to deal with indexdb
|
||||
|
||||
const humanConfig = { // user configuration for human, used to fine-tune behavior
|
||||
cacheSensitivity: 0.01,
|
||||
cacheSensitivity: 0,
|
||||
modelBasePath: '../../models',
|
||||
filter: { enabled: true, equalization: true }, // lets run with histogram equilizer
|
||||
debug: true,
|
||||
filter: { equalization: true }, // lets run with histogram equilizer
|
||||
face: {
|
||||
enabled: true,
|
||||
detector: { rotation: true, return: true, mask: false }, // return tensor is used to get detected face image
|
||||
detector: { rotation: true, return: true, cropFactor: 1.6, mask: false }, // return tensor is used to get detected face image
|
||||
description: { enabled: true }, // default model for face descriptor extraction is faceres
|
||||
// mobilefacenet: { enabled: true, modelPath: 'https://vladmandic.github.io/human-models/models/mobilefacenet.json' }, // alternative model
|
||||
// insightface: { enabled: true, modelPath: 'https://vladmandic.github.io/insightface/models/insightface-mobilenet-swish.json' }, // alternative model
|
||||
|
@ -42,10 +41,9 @@ const options = {
|
|||
blinkMin: 10, // minimum duration of a valid blink
|
||||
blinkMax: 800, // maximum duration of a valid blink
|
||||
threshold: 0.5, // minimum similarity
|
||||
distanceMin: 0.4, // closest that face is allowed to be to the cammera in cm
|
||||
distanceMax: 1.0, // farthest that face is allowed to be to the cammera in cm
|
||||
mask: humanConfig.face.detector.mask,
|
||||
rotation: humanConfig.face.detector.rotation,
|
||||
cropFactor: humanConfig.face.detector.cropFactor,
|
||||
...matchOptions,
|
||||
};
|
||||
|
||||
|
@ -58,7 +56,6 @@ const ok: Record<string, { status: boolean | undefined, val: number }> = { // mu
|
|||
faceSize: { status: false, val: 0 },
|
||||
antispoofCheck: { status: false, val: 0 },
|
||||
livenessCheck: { status: false, val: 0 },
|
||||
distance: { status: false, val: 0 },
|
||||
age: { status: false, val: 0 },
|
||||
gender: { status: false, val: 0 },
|
||||
timeout: { status: true, val: 0 },
|
||||
|
@ -76,7 +73,6 @@ const allOk = () => ok.faceCount.status
|
|||
&& ok.faceConfidence.status
|
||||
&& ok.antispoofCheck.status
|
||||
&& ok.livenessCheck.status
|
||||
&& ok.distance.status
|
||||
&& ok.descriptor.status
|
||||
&& ok.age.status
|
||||
&& ok.gender.status;
|
||||
|
@ -191,8 +187,6 @@ async function validationLoop(): Promise<H.FaceResult> { // main screen refresh
|
|||
ok.livenessCheck.status = ok.livenessCheck.val >= options.minConfidence;
|
||||
ok.faceSize.val = Math.min(human.result.face[0].box[2], human.result.face[0].box[3]);
|
||||
ok.faceSize.status = ok.faceSize.val >= options.minSize;
|
||||
ok.distance.val = human.result.face[0].distance || 0;
|
||||
ok.distance.status = (ok.distance.val >= options.distanceMin) && (ok.distance.val <= options.distanceMax);
|
||||
ok.descriptor.val = human.result.face[0].embedding?.length || 0;
|
||||
ok.descriptor.status = ok.descriptor.val > 0;
|
||||
ok.age.val = human.result.face[0].age || 0;
|
||||
|
@ -239,8 +233,8 @@ async function detectFace() {
|
|||
dom.canvas.getContext('2d')?.clearRect(0, 0, options.minSize, options.minSize);
|
||||
if (!current?.face?.tensor || !current?.face?.embedding) return false;
|
||||
console.log('face record:', current.face); // eslint-disable-line no-console
|
||||
log(`detected face: ${current.face.gender} ${current.face.age || 0}y distance ${100 * (current.face.distance || 0)}cm/${Math.round(100 * (current.face.distance || 0) / 2.54)}in`);
|
||||
await human.draw.tensor(current.face.tensor, dom.canvas);
|
||||
log(`detected face: ${current.face.gender} ${current.face.age || 0}y distance ${current.face.iris || 0}cm/${Math.round(100 * (current.face.iris || 0) / 2.54) / 100}in`);
|
||||
human.tf.browser.toPixels(current.face.tensor as unknown as H.TensorLike, dom.canvas);
|
||||
if (await indexDb.count() === 0) {
|
||||
log('face database is empty: nothing to compare face with');
|
||||
document.body.style.background = 'black';
|
||||
|
@ -249,7 +243,7 @@ async function detectFace() {
|
|||
}
|
||||
const db = await indexDb.load();
|
||||
const descriptors = db.map((rec) => rec.descriptor).filter((desc) => desc.length > 0);
|
||||
const res = human.match.find(current.face.embedding, descriptors, matchOptions);
|
||||
const res = human.match(current.face.embedding, descriptors, matchOptions);
|
||||
current.record = db[res.index] || null;
|
||||
if (current.record) {
|
||||
log(`best match: ${current.record.name} | id: ${current.record.id} | similarity: ${Math.round(1000 * res.similarity) / 10}%`);
|
||||
|
@ -281,8 +275,8 @@ async function main() { // main entry point
|
|||
await detectionLoop(); // start detection loop
|
||||
startTime = human.now();
|
||||
current.face = await validationLoop(); // start validation loop
|
||||
dom.canvas.width = current.face?.tensor?.shape[1] || options.minSize;
|
||||
dom.canvas.height = current.face?.tensor?.shape[0] || options.minSize;
|
||||
dom.canvas.width = current.face.tensor?.shape[1] || options.minSize;
|
||||
dom.canvas.height = current.face.tensor?.shape[0] || options.minSize;
|
||||
dom.source.width = dom.canvas.width;
|
||||
dom.source.height = dom.canvas.height;
|
||||
dom.canvas.style.width = '';
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
## Browser Face Recognition Demo
|
||||
|
||||
- `demo/facematch`: Demo for Browsers that uses all face description and embedding features to
|
||||
detect, extract and identify all faces plus calculate similarity between them
|
||||
detect, extract and identify all faces plus calculate simmilarity between them
|
||||
|
||||
It highlights functionality such as:
|
||||
|
||||
|
@ -72,13 +72,12 @@ Non-linear performance that increases with number of worker threads due to commu
|
|||
|
||||
> node node-match
|
||||
|
||||
<!-- eslint-skip -->
|
||||
```js
|
||||
INFO: options: { dbFile: './faces.json', dbMax: 10000, threadPoolSize: 6, workerSrc: './node-match-worker.js', debug: false, minThreshold: 0.9, descLength: 1024 }
|
||||
DATA: created shared buffer: { maxDescriptors: 10000, totalBytes: 40960000, totalElements: 10240000 }
|
||||
DATA: db loaded: { existingRecords: 0, newRecords: 5700 }
|
||||
INFO: starting worker thread pool: { totalWorkers: 6, alreadyActive: 0 }
|
||||
STATE: submitted: { matchJobs: 100, poolSize: 6, activeWorkers: 6 }
|
||||
STATE: { matchJobsFinished: 100, totalTimeMs: 1769, averageTimeMs: 17.69 }
|
||||
INFO: closing workers: { poolSize: 6, activeWorkers: 6 }
|
||||
2021-10-13 07:53:36 INFO: options: { dbFile: './faces.json', dbMax: 10000, threadPoolSize: 6, workerSrc: './node-match-worker.js', debug: false, minThreshold: 0.9, descLength: 1024 }
|
||||
2021-10-13 07:53:36 DATA: created shared buffer: { maxDescriptors: 10000, totalBytes: 40960000, totalElements: 10240000 }
|
||||
2021-10-13 07:53:36 DATA: db loaded: { existingRecords: 0, newRecords: 5700 }
|
||||
2021-10-13 07:53:36 INFO: starting worker thread pool: { totalWorkers: 6, alreadyActive: 0 }
|
||||
2021-10-13 07:53:36 STATE: submitted: { matchJobs: 100, poolSize: 6, activeWorkers: 6 }
|
||||
2021-10-13 07:53:38 STATE: { matchJobsFinished: 100, totalTimeMs: 1769, averageTimeMs: 17.69 }
|
||||
2021-10-13 07:53:38 INFO: closing workers: { poolSize: 6, activeWorkers: 6 }
|
||||
```
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/**
|
||||
* Human demo for browsers
|
||||
*
|
||||
* Demo for face descriptor analysis and face similarity analysis
|
||||
* Demo for face descriptor analysis and face simmilarity analysis
|
||||
*/
|
||||
|
||||
/** @type {Human} */
|
||||
|
@ -11,7 +11,7 @@ const userConfig = {
|
|||
backend: 'humangl',
|
||||
async: true,
|
||||
warmup: 'none',
|
||||
cacheSensitivity: 0.01,
|
||||
cacheSensitivity: 0,
|
||||
debug: true,
|
||||
modelBasePath: '../../models/',
|
||||
deallocate: true,
|
||||
|
@ -22,6 +22,7 @@ const userConfig = {
|
|||
},
|
||||
face: {
|
||||
enabled: true,
|
||||
// detector: { rotation: false, return: true, maxDetected: 50, iouThreshold: 0.206, minConfidence: 0.122 },
|
||||
detector: { return: true, rotation: true, maxDetected: 50, iouThreshold: 0.01, minConfidence: 0.2 },
|
||||
mesh: { enabled: true },
|
||||
iris: { enabled: false },
|
||||
|
@ -63,16 +64,25 @@ async function loadFaceMatchDB() {
|
|||
}
|
||||
}
|
||||
|
||||
async function selectFaceCanvas(face) {
|
||||
async function SelectFaceCanvas(face) {
|
||||
// if we have face image tensor, enhance it and display it
|
||||
let embedding;
|
||||
document.getElementById('orig').style.filter = 'blur(16px)';
|
||||
if (face.tensor) {
|
||||
title('Sorting Faces by Similarity');
|
||||
const enhanced = human.enhance(face);
|
||||
if (enhanced) {
|
||||
const c = document.getElementById('orig');
|
||||
await human.draw.tensor(face.tensor, c);
|
||||
const squeeze = human.tf.squeeze(enhanced);
|
||||
const normalize = human.tf.div(squeeze, 255);
|
||||
await human.tf.browser.toPixels(normalize, c);
|
||||
human.tf.dispose([enhanced, squeeze, normalize]);
|
||||
const ctx = c.getContext('2d');
|
||||
ctx.font = 'small-caps 0.4rem "Lato"';
|
||||
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
|
||||
}
|
||||
const arr = db.map((rec) => rec.embedding);
|
||||
const res = await human.match.find(face.embedding, arr);
|
||||
const res = await human.match(face.embedding, arr);
|
||||
log('Match:', db[res.index].name);
|
||||
const emotion = face.emotion[0] ? `${Math.round(100 * face.emotion[0].score)}% ${face.emotion[0].emotion}` : 'N/A';
|
||||
document.getElementById('desc').innerHTML = `
|
||||
|
@ -93,11 +103,11 @@ async function selectFaceCanvas(face) {
|
|||
for (const canvas of canvases) {
|
||||
// calculate similarity from selected face to current one in the loop
|
||||
const current = all[canvas.tag.sample][canvas.tag.face];
|
||||
const similarity = human.match.similarity(face.embedding, current.embedding);
|
||||
const similarity = human.similarity(face.embedding, current.embedding);
|
||||
canvas.tag.similarity = similarity;
|
||||
// get best match
|
||||
// draw the canvas
|
||||
await human.draw.tensor(current.tensor, canvas);
|
||||
await human.tf.browser.toPixels(current.tensor, canvas);
|
||||
const ctx = canvas.getContext('2d');
|
||||
ctx.font = 'small-caps 1rem "Lato"';
|
||||
ctx.fillStyle = 'rgba(0, 0, 0, 1)';
|
||||
|
@ -110,7 +120,7 @@ async function selectFaceCanvas(face) {
|
|||
ctx.font = 'small-caps 1rem "Lato"';
|
||||
const start = human.now();
|
||||
const arr = db.map((rec) => rec.embedding);
|
||||
const res = await human.match.find(current.embedding, arr);
|
||||
const res = await human.match(current.embedding, arr);
|
||||
time += (human.now() - start);
|
||||
if (res.similarity > minScore) ctx.fillText(`DB: ${(100 * res.similarity).toFixed(1)}% ${db[res.index].name}`, 4, canvas.height - 30);
|
||||
}
|
||||
|
@ -125,7 +135,7 @@ async function selectFaceCanvas(face) {
|
|||
title('Selected Face');
|
||||
}
|
||||
|
||||
async function addFaceCanvas(index, res, fileName) {
|
||||
async function AddFaceCanvas(index, res, fileName) {
|
||||
all[index] = res.face;
|
||||
for (const i in res.face) {
|
||||
if (!res.face[i].tensor) continue; // did not get valid results
|
||||
|
@ -144,25 +154,25 @@ async function addFaceCanvas(index, res, fileName) {
|
|||
gender: ${Math.round(100 * res.face[i].genderScore)}% ${res.face[i].gender}
|
||||
emotion: ${emotion}
|
||||
`.replace(/ /g, ' ');
|
||||
await human.draw.tensor(res.face[i].tensor, canvas);
|
||||
await human.tf.browser.toPixels(res.face[i].tensor, canvas);
|
||||
const ctx = canvas.getContext('2d');
|
||||
if (!ctx) return;
|
||||
if (!ctx) return false;
|
||||
ctx.font = 'small-caps 0.8rem "Lato"';
|
||||
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
|
||||
ctx.fillText(`${res.face[i].age}y ${(100 * (res.face[i].genderScore || 0)).toFixed(1)}% ${res.face[i].gender}`, 4, canvas.height - 6);
|
||||
const arr = db.map((rec) => rec.embedding);
|
||||
const result = human.match.find(res.face[i].embedding, arr);
|
||||
const result = human.match(res.face[i].embedding, arr);
|
||||
ctx.font = 'small-caps 1rem "Lato"';
|
||||
if (result.similarity && res.similarity > minScore) ctx.fillText(`${(100 * result.similarity).toFixed(1)}% ${db[result.index].name}`, 4, canvas.height - 30);
|
||||
document.getElementById('faces').appendChild(canvas);
|
||||
canvas.addEventListener('click', (evt) => {
|
||||
log('Select:', 'Image:', evt.target.tag.sample, 'Face:', evt.target.tag.face, 'Source:', evt.target.tag.source, all[evt.target.tag.sample][evt.target.tag.face]);
|
||||
selectFaceCanvas(all[evt.target.tag.sample][evt.target.tag.face]);
|
||||
SelectFaceCanvas(all[evt.target.tag.sample][evt.target.tag.face]);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
async function addImageElement(index, image, length) {
|
||||
async function AddImageElement(index, image, length) {
|
||||
const faces = all.reduce((prev, curr) => prev += curr.length, 0);
|
||||
title(`Analyzing Input Images<br> ${Math.round(100 * index / length)}% [${index} / ${length}]<br>Found ${faces} Faces`);
|
||||
return new Promise((resolve) => {
|
||||
|
@ -171,7 +181,7 @@ async function addImageElement(index, image, length) {
|
|||
document.getElementById('images').appendChild(img); // and finally we can add it
|
||||
human.detect(img, userConfig)
|
||||
.then((res) => { // eslint-disable-line promise/always-return
|
||||
addFaceCanvas(index, res, image); // then wait until image is analyzed
|
||||
AddFaceCanvas(index, res, image); // then wait until image is analyzed
|
||||
resolve(true);
|
||||
})
|
||||
.catch(() => log('human detect error'));
|
||||
|
@ -212,23 +222,18 @@ async function main() {
|
|||
// could not dynamically enumerate images so using static list
|
||||
if (images.length === 0) {
|
||||
images = [
|
||||
'ai-face.jpg', 'ai-upper.jpg', 'ai-body.jpg', 'solvay1927.jpg',
|
||||
'ai-body.jpg', 'solvay1927.jpg', 'ai-upper.jpg',
|
||||
'person-carolina.jpg', 'person-celeste.jpg', 'person-leila1.jpg', 'person-leila2.jpg', 'person-lexi.jpg', 'person-linda.jpg', 'person-nicole.jpg', 'person-tasia.jpg',
|
||||
'person-tetiana.jpg', 'person-vlado1.jpg', 'person-vlado5.jpg', 'person-vlado.jpg', 'person-christina.jpg', 'person-lauren.jpg',
|
||||
'group-1.jpg', 'group-2.jpg', 'group-3.jpg', 'group-4.jpg', 'group-5.jpg', 'group-6.jpg', 'group-7.jpg',
|
||||
'person-celeste.jpg', 'person-christina.jpg', 'person-lauren.jpg', 'person-lexi.jpg', 'person-linda.jpg', 'person-nicole.jpg', 'person-tasia.jpg', 'person-tetiana.jpg', 'person-vlado.jpg', 'person-vlado1.jpg', 'person-vlado5.jpg',
|
||||
'stock-group-1.jpg', 'stock-group-2.jpg',
|
||||
'stock-models-1.jpg', 'stock-models-2.jpg', 'stock-models-3.jpg', 'stock-models-4.jpg', 'stock-models-5.jpg', 'stock-models-6.jpg', 'stock-models-7.jpg', 'stock-models-8.jpg', 'stock-models-9.jpg',
|
||||
'stock-teen-1.jpg', 'stock-teen-2.jpg', 'stock-teen-3.jpg', 'stock-teen-4.jpg', 'stock-teen-5.jpg', 'stock-teen-6.jpg', 'stock-teen-7.jpg', 'stock-teen-8.jpg',
|
||||
'stock-models-10.jpg', 'stock-models-11.jpg', 'stock-models-12.jpg', 'stock-models-13.jpg', 'stock-models-14.jpg', 'stock-models-15.jpg', 'stock-models-16.jpg',
|
||||
'cgi-model-1.jpg', 'cgi-model-2.jpg', 'cgi-model-3.jpg', 'cgi-model-4.jpg', 'cgi-model-5.jpg', 'cgi-model-6.jpg', 'cgi-model-7.jpg', 'cgi-model-8.jpg', 'cgi-model-9.jpg',
|
||||
'cgi-model-10.jpg', 'cgi-model-11.jpg', 'cgi-model-12.jpg', 'cgi-model-13.jpg', 'cgi-model-14.jpg', 'cgi-model-15.jpg', 'cgi-model-18.jpg', 'cgi-model-19.jpg',
|
||||
'cgi-model-20.jpg', 'cgi-model-21.jpg', 'cgi-model-22.jpg', 'cgi-model-23.jpg', 'cgi-model-24.jpg', 'cgi-model-25.jpg', 'cgi-model-26.jpg', 'cgi-model-27.jpg', 'cgi-model-28.jpg', 'cgi-model-29.jpg',
|
||||
'cgi-model-30.jpg', 'cgi-model-31.jpg', 'cgi-model-33.jpg', 'cgi-model-34.jpg',
|
||||
'cgi-multiangle-1.jpg', 'cgi-multiangle-2.jpg', 'cgi-multiangle-3.jpg', 'cgi-multiangle-4.jpg', 'cgi-multiangle-6.jpg', 'cgi-multiangle-7.jpg', 'cgi-multiangle-8.jpg', 'cgi-multiangle-9.jpg', 'cgi-multiangle-10.jpg', 'cgi-multiangle-11.jpg',
|
||||
'stock-emotions-a-1.jpg', 'stock-emotions-a-2.jpg', 'stock-emotions-a-3.jpg', 'stock-emotions-a-4.jpg', 'stock-emotions-a-5.jpg', 'stock-emotions-a-6.jpg', 'stock-emotions-a-7.jpg', 'stock-emotions-a-8.jpg',
|
||||
'stock-emotions-b-1.jpg', 'stock-emotions-b-2.jpg', 'stock-emotions-b-3.jpg', 'stock-emotions-b-4.jpg', 'stock-emotions-b-5.jpg', 'stock-emotions-b-6.jpg', 'stock-emotions-b-7.jpg', 'stock-emotions-b-8.jpg',
|
||||
'daz3d-brianna.jpg', 'daz3d-chiyo.jpg', 'daz3d-cody.jpg', 'daz3d-drew-01.jpg', 'daz3d-drew-02.jpg', 'daz3d-ella-01.jpg', 'daz3d-ella-02.jpg', 'daz3d-gillian.jpg',
|
||||
'daz3d-hye-01.jpg', 'daz3d-hye-02.jpg', 'daz3d-kaia.jpg', 'daz3d-karen.jpg', 'daz3d-kiaria-01.jpg', 'daz3d-kiaria-02.jpg', 'daz3d-lilah-01.jpg', 'daz3d-lilah-02.jpg',
|
||||
'daz3d-lilah-03.jpg', 'daz3d-lila.jpg', 'daz3d-lindsey.jpg', 'daz3d-megah.jpg', 'daz3d-selina-01.jpg', 'daz3d-selina-02.jpg', 'daz3d-snow.jpg',
|
||||
'daz3d-sunshine.jpg', 'daz3d-taia.jpg', 'daz3d-tuesday-01.jpg', 'daz3d-tuesday-02.jpg', 'daz3d-tuesday-03.jpg', 'daz3d-zoe.jpg', 'daz3d-ginnifer.jpg',
|
||||
'daz3d-_emotions01.jpg', 'daz3d-_emotions02.jpg', 'daz3d-_emotions03.jpg', 'daz3d-_emotions04.jpg', 'daz3d-_emotions05.jpg',
|
||||
];
|
||||
// add prefix for gitpages
|
||||
images = images.map((a) => `../../samples/in/${a}`);
|
||||
images = images.map((a) => `/human/samples/in/${a}`);
|
||||
log('Adding static image list:', images);
|
||||
} else {
|
||||
log('Discovered images:', images);
|
||||
|
@ -237,7 +242,7 @@ async function main() {
|
|||
// images = ['/samples/in/person-lexi.jpg', '/samples/in/person-carolina.jpg', '/samples/in/solvay1927.jpg'];
|
||||
|
||||
const t0 = human.now();
|
||||
for (let i = 0; i < images.length; i++) await addImageElement(i, images[i], images.length);
|
||||
for (let i = 0; i < images.length; i++) await AddImageElement(i, images[i], images.length);
|
||||
const t1 = human.now();
|
||||
|
||||
// print stats
|
||||
|
@ -251,7 +256,7 @@ async function main() {
|
|||
title('');
|
||||
log('Ready');
|
||||
human.validate(userConfig);
|
||||
human.match.similarity([], []);
|
||||
human.similarity([], []);
|
||||
}
|
||||
|
||||
window.onload = main;
|
||||
|
|
|
@ -38,8 +38,7 @@ function match(descBuffer, options = { order: 2, multiplier: 20 }) {
|
|||
if (best < threshold || best === 0) break; // short circuit
|
||||
}
|
||||
best = (options.order === 2) ? Math.sqrt(best) : best ** (1 / options.order);
|
||||
const similarity = Math.round(100 * Math.max(0, 100 - best) / 100.0) / 100;
|
||||
return { index, distance: best, similarity };
|
||||
return { index, distance: best, similarity: Math.max(0, 100 - best) / 100.0 };
|
||||
}
|
||||
|
||||
threads.parentPort?.on('message', (msg) => {
|
||||
|
@ -61,11 +60,11 @@ threads.parentPort?.on('message', (msg) => {
|
|||
}
|
||||
if (typeof msg.debug !== 'undefined') { // set verbose logging
|
||||
debug = msg.debug;
|
||||
// if (debug) threads.parentPort?.postMessage(`debug: ${debug}`);
|
||||
if (debug) threads.parentPort?.postMessage(`debug: ${debug}`);
|
||||
}
|
||||
if (typeof msg.threshold !== 'undefined') { // set minimum similarity threshold
|
||||
threshold = msg.threshold;
|
||||
// if (debug) threads.parentPort?.postMessage(`threshold: ${threshold}`);
|
||||
if (debug) threads.parentPort?.postMessage(`threshold: ${threshold}`);
|
||||
}
|
||||
if (typeof msg.shutdown !== 'undefined') { // got message to close worker
|
||||
if (debug) threads.parentPort?.postMessage('shutting down');
|
||||
|
|
|
@ -15,7 +15,7 @@ const options = {
|
|||
dbMax: 10000, // maximum number of records to hold in memory
|
||||
threadPoolSize: 12, // number of worker threads to create in thread pool
|
||||
workerSrc: './node-match-worker.js', // code that executes in the worker thread
|
||||
debug: true, // verbose messages
|
||||
debug: false, // verbose messages
|
||||
minThreshold: 0.5, // match returns first record that meets the similarity threshold, set to 0 to always scan all records
|
||||
descLength: 1024, // descriptor length
|
||||
};
|
||||
|
@ -176,7 +176,7 @@ async function main() {
|
|||
data.requestID++; // increase request id
|
||||
if (testOptions.fuzDescriptors) match(fuzDescriptor(descriptor)); // fuz descriptor for harder match
|
||||
else match(descriptor);
|
||||
if (options.debug) log.debug('submited job', data.requestID); // we already know what we're searching for so we can compare results
|
||||
if (options.debug) log.info('submited job', data.requestID); // we already know what we're searching for so we can compare results
|
||||
}
|
||||
log.state('submitted:', { matchJobs: testOptions.maxJobs, poolSize: data.workers.length, activeWorkers: data.workers.filter((worker) => !!worker).length });
|
||||
}
|
||||
|
|
|
@ -28,10 +28,10 @@ async function updateCached(req) {
|
|||
.then((update) => {
|
||||
// update cache if request is ok
|
||||
if (update.ok) {
|
||||
caches
|
||||
caches // eslint-disable-line promise/no-nesting
|
||||
.open(cacheName)
|
||||
.then((cache) => cache.put(req, update))
|
||||
.catch((err) => log('cache update error', err)); // eslint-disable-line promise/no-nesting
|
||||
.catch((err) => log('cache update error', err));
|
||||
}
|
||||
return true;
|
||||
})
|
||||
|
@ -76,8 +76,8 @@ async function getCached(evt) {
|
|||
|
||||
function cacheInit() {
|
||||
caches.open(cacheName)
|
||||
.then((cache) => cache.addAll(cacheFiles)
|
||||
.then( // eslint-disable-line promise/no-nesting
|
||||
.then((cache) => cache.addAll(cacheFiles) // eslint-disable-line promise/no-nesting
|
||||
.then(
|
||||
() => log('cache refresh:', cacheFiles.length, 'files'),
|
||||
(err) => log('cache error', err),
|
||||
))
|
||||
|
|
|
@ -18,10 +18,7 @@
|
|||
* ui={}: contains all variables exposed in the UI
|
||||
*/
|
||||
|
||||
// WARNING!!!
|
||||
// This demo is written using older code style and a lot of manual setup
|
||||
// Newer versions of Human have richer functionality allowing for much cleaner & easier usage
|
||||
// It is recommended to use other demos such as `demo/typescript` for usage examples
|
||||
// test url <https://human.local/?worker=false&async=false&bench=false&draw=true&warmup=full&backend=humangl>
|
||||
|
||||
import { Human } from '../dist/human.esm.js'; // equivalent of @vladmandic/human
|
||||
import Menu from './helpers/menu.js';
|
||||
|
@ -86,7 +83,7 @@ const ui = {
|
|||
facing: true, // camera facing front or back
|
||||
baseBackground: 'rgba(50, 50, 50, 1)', // 'grey'
|
||||
columns: 2, // when processing sample images create this many columns
|
||||
useWorker: false, // use web workers for processing
|
||||
useWorker: true, // use web workers for processing
|
||||
worker: 'index-worker.js',
|
||||
maxFPSframes: 10, // keep fps history for how many frames
|
||||
modelsPreload: false, // preload human models on startup
|
||||
|
@ -185,7 +182,7 @@ function status(msg) {
|
|||
prevStatus = msg;
|
||||
} else {
|
||||
const video = document.getElementById('video');
|
||||
const playing = isLive(video) && !video.paused; // eslint-disable-line no-use-before-define
|
||||
const playing = (video.srcObject !== null) && !video.paused;
|
||||
document.getElementById('play').style.display = playing ? 'none' : 'block';
|
||||
document.getElementById('loader').style.display = 'none';
|
||||
div.innerText = '';
|
||||
|
@ -195,6 +192,7 @@ function status(msg) {
|
|||
async function videoPlay(videoElement = document.getElementById('video')) {
|
||||
document.getElementById('btnStartText').innerHTML = 'pause video';
|
||||
await videoElement.play();
|
||||
// status();
|
||||
}
|
||||
|
||||
async function videoPause() {
|
||||
|
@ -222,13 +220,21 @@ async function calcSimmilarity(result) {
|
|||
compare.original = result;
|
||||
log('setting face compare baseline:', result.face[0]);
|
||||
if (result.face[0].tensor) {
|
||||
const enhanced = human.enhance(result.face[0]);
|
||||
if (enhanced) {
|
||||
const c = document.getElementById('orig');
|
||||
human.draw.tensor(result.face[0].tensor, c);
|
||||
const squeeze = human.tf.squeeze(enhanced);
|
||||
const norm = human.tf.div(squeeze, 255);
|
||||
human.tf.browser.toPixels(norm, c);
|
||||
human.tf.dispose(enhanced);
|
||||
human.tf.dispose(squeeze);
|
||||
human.tf.dispose(norm);
|
||||
}
|
||||
} else {
|
||||
document.getElementById('compare-canvas').getContext('2d').drawImage(compare.original.canvas, 0, 0, 200, 200);
|
||||
}
|
||||
}
|
||||
const similarity = human.match.similarity(compare.original.face[0].embedding, result.face[0].embedding);
|
||||
const similarity = human.similarity(compare.original.face[0].embedding, result.face[0].embedding);
|
||||
document.getElementById('similarity').innerText = `similarity: ${Math.trunc(1000 * similarity) / 10}%`;
|
||||
}
|
||||
|
||||
|
@ -513,6 +519,17 @@ function runHumanDetect(input, canvas, timestamp) {
|
|||
human.detect(input, userConfig)
|
||||
.then((result) => {
|
||||
status();
|
||||
/*
|
||||
setTimeout(async () => { // simulate gl context lost 2sec after initial detection
|
||||
const ext = human.gl && human.gl.gl ? human.gl.gl.getExtension('WEBGL_lose_context') : {};
|
||||
if (ext && ext.loseContext) {
|
||||
log('simulate context lost:', human.env.webgl, human.gl, ext);
|
||||
human.gl.gl.getExtension('WEBGL_lose_context').loseContext();
|
||||
await videoPause();
|
||||
status('Exception: WebGL');
|
||||
}
|
||||
}, 2000);
|
||||
*/
|
||||
if (result.performance && result.performance.total) ui.detectFPS.push(1000 / result.performance.total);
|
||||
if (ui.detectFPS.length > ui.maxFPSframes) ui.detectFPS.shift();
|
||||
if (ui.bench) {
|
||||
|
@ -606,15 +623,21 @@ async function processImage(input, title) {
|
|||
|
||||
async function processVideo(input, title) {
|
||||
status(`processing video: ${title}`);
|
||||
// const video = document.getElementById('video-file') || document.createElement('video');
|
||||
const video = document.getElementById('video');
|
||||
const canvas = document.getElementById('canvas');
|
||||
// video.id = 'video-file';
|
||||
// video.controls = true;
|
||||
// video.loop = true;
|
||||
// video.style.display = 'none';
|
||||
// document.body.appendChild(video);
|
||||
video.addEventListener('error', () => status(`video loading error: ${video.error.message}`));
|
||||
video.addEventListener('canplay', async () => {
|
||||
for (const m of Object.values(menu)) m.hide();
|
||||
document.getElementById('samples-container').style.display = 'none';
|
||||
canvas.style.display = 'block';
|
||||
await videoPlay();
|
||||
runHumanDetect(video, canvas);
|
||||
if (!ui.detectThread) runHumanDetect(video, canvas);
|
||||
});
|
||||
video.srcObject = null;
|
||||
video.src = input;
|
||||
|
@ -627,8 +650,9 @@ async function detectVideo() {
|
|||
const canvas = document.getElementById('canvas');
|
||||
canvas.style.display = 'block';
|
||||
cancelAnimationFrame(ui.detectThread);
|
||||
if (isLive(video) && !video.paused) {
|
||||
if ((video.srcObject !== null) && !video.paused) {
|
||||
await videoPause();
|
||||
// if (ui.drawThread) cancelAnimationFrame(ui.drawThread);
|
||||
} else {
|
||||
const cameraError = await setupCamera();
|
||||
if (!cameraError) {
|
||||
|
@ -770,7 +794,6 @@ function setupMenu() {
|
|||
|
||||
async function resize() {
|
||||
window.onresize = null;
|
||||
log('resize');
|
||||
// best setting for mobile, ignored for desktop
|
||||
// can set dynamic value such as Math.min(1, Math.round(100 * window.innerWidth / 960) / 100);
|
||||
const viewportScale = 0.7;
|
||||
|
@ -977,7 +1000,8 @@ async function main() {
|
|||
if (ui.modelsPreload && !ui.useWorker) {
|
||||
status('loading');
|
||||
await human.load(userConfig); // this is not required, just pre-loads all models
|
||||
log('demo loaded models:', human.models.loaded());
|
||||
const loaded = Object.keys(human.models).filter((a) => human.models[a]);
|
||||
log('demo loaded models:', loaded);
|
||||
} else {
|
||||
await human.init();
|
||||
}
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
node demo/nodejs/node-multiprocess.js
|
||||
```
|
||||
|
||||
<!-- eslint-skip -->
|
||||
```json
|
||||
2021-06-01 08:54:19 INFO: @vladmandic/human version 2.0.0
|
||||
2021-06-01 08:54:19 INFO: User: vlado Platform: linux Arch: x64 Node: v16.0.0
|
||||
|
|
|
@ -9,10 +9,10 @@
|
|||
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||
<meta name="theme-color" content="#000000">
|
||||
<link rel="manifest" href="../../manifest.webmanifest">
|
||||
<link rel="manifest" href="../manifest.webmanifest">
|
||||
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
|
||||
<link rel="apple-touch-icon" href="../../assets/icon.png">
|
||||
<script src="../multithread/index.js" type="module"></script>
|
||||
<script src="./index.js" type="module"></script>
|
||||
<style>
|
||||
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
|
||||
html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
import { Human } from '../../dist/human.esm.js'; // equivalent of @vladmandic/human
|
||||
import GLBench from '../helpers/gl-bench.js';
|
||||
|
||||
const workerJS = '../multithread/worker.js';
|
||||
const workerJS = './worker.js';
|
||||
|
||||
const config = {
|
||||
main: { // processes input and runs gesture analysis
|
||||
|
|
|
@ -8,8 +8,8 @@
|
|||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const childProcess = require('child_process'); // eslint-disable-line camelcase
|
||||
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||
const childProcess = require('child_process'); // eslint-disable-line camelcase
|
||||
// note that main process does not import human or tfjs at all, it's all done from worker process
|
||||
|
||||
const workerFile = 'demo/multithread/node-multiprocess-worker.js';
|
||||
|
|
|
@ -28,8 +28,7 @@ or you can pass a path to image to analyze, either on local filesystem or using
|
|||
node demo/nodejs/node.js
|
||||
```
|
||||
|
||||
<!-- eslint-skip -->
|
||||
```js
|
||||
```json
|
||||
2021-06-01 08:52:15 INFO: @vladmandic/human version 2.0.0
|
||||
2021-06-01 08:52:15 INFO: User: vlado Platform: linux Arch: x64 Node: v16.0.0
|
||||
2021-06-01 08:52:15 INFO: Current folder: /home/vlado/dev/human
|
||||
|
@ -83,7 +82,7 @@ node demo/nodejs/node.js
|
|||
detector: { modelPath: 'handdetect.json' },
|
||||
skeleton: { modelPath: 'handskeleton.json' }
|
||||
},
|
||||
object: { enabled: true, modelPath: 'centernet.json', minConfidence: 0.2, iouThreshold: 0.4, maxDetected: 10, skipFrames: 19 }
|
||||
object: { enabled: true, modelPath: 'mb3-centernet.json', minConfidence: 0.2, iouThreshold: 0.4, maxDetected: 10, skipFrames: 19 }
|
||||
}
|
||||
08:52:15.673 Human: version: 2.0.0
|
||||
08:52:15.674 Human: tfjs version: 3.6.0
|
||||
|
@ -97,7 +96,7 @@ node demo/nodejs/node.js
|
|||
08:52:15.847 Human: load model: file://models/handdetect.json
|
||||
08:52:15.847 Human: load model: file://models/handskeleton.json
|
||||
08:52:15.914 Human: load model: file://models/movenet-lightning.json
|
||||
08:52:15.957 Human: load model: file://models/centernet.json
|
||||
08:52:15.957 Human: load model: file://models/mb3-centernet.json
|
||||
08:52:16.015 Human: load model: file://models/faceres.json
|
||||
08:52:16.015 Human: tf engine state: 50796152 bytes 1318 tensors
|
||||
2021-06-01 08:52:16 INFO: Loaded: [ 'face', 'movenet', 'handpose', 'emotion', 'centernet', 'faceres', [length]: 6 ]
|
||||
|
|
|
@ -1,70 +0,0 @@
|
|||
const fs = require('fs');
|
||||
const process = require('process');
|
||||
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||
const Human = require('../../dist/human.node.js');
|
||||
|
||||
const humanConfig = {
|
||||
debug: false,
|
||||
face: {
|
||||
enabled: true,
|
||||
detector: { enabled: true, modelPath: 'blazeface.json' },
|
||||
description: { enabled: true, modelPath: 'faceres.json' },
|
||||
// gear: { enabled: true, modelPath: '/home/vlado/dev/human-models/models/gear.json' },
|
||||
// ssrnet: { enabled: true, modelPathAge: '/home/vlado/dev/human-models/models/age.json', modelPathGender: '/home/vlado/dev/human-models/models/gender.json' },
|
||||
emotion: { enabled: false },
|
||||
mesh: { enabled: false },
|
||||
iris: { enabled: false },
|
||||
antispoof: { enabled: false },
|
||||
liveness: { enabled: false },
|
||||
},
|
||||
body: { enabled: false },
|
||||
hand: { enabled: false },
|
||||
gesture: { enabled: false },
|
||||
};
|
||||
const human = new Human.Human(humanConfig);
|
||||
const ageThreshold = 18;
|
||||
|
||||
async function detect(inputFile) {
|
||||
try {
|
||||
const buffer = fs.readFileSync(inputFile);
|
||||
const tensor = human.tf.node.decodeImage(buffer);
|
||||
const result = await human.detect(tensor);
|
||||
human.tf.dispose(tensor);
|
||||
if (!result || !result.face || result.face.length === 0) return false;
|
||||
let msg = ` file=${inputFile} resolution=${tensor.shape}`;
|
||||
for (const face of result.face) {
|
||||
msg = ` file=${inputFile} resolution=${tensor.shape} age=${face.age} gender=${face.gender} confidence=${face.genderScore}`;
|
||||
if (face.age < ageThreshold) {
|
||||
log.warn('fail:' + msg);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
log.info('pass: ' + msg);
|
||||
return false;
|
||||
} catch (err) {
|
||||
log.error(`error: file=${inputFile}: ${err}`);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
async function main() {
|
||||
log.info(`Human: version=${human.version} tf=${tf.version_core}`);
|
||||
process.noDeprecation = true;
|
||||
if (process.argv.length < 3) return;
|
||||
await human.load();
|
||||
await human.warmup();
|
||||
const t0 = performance.now();
|
||||
const args = process.argv.slice(2);
|
||||
let pass = 0;
|
||||
let fail = 0;
|
||||
for (const arg of args) {
|
||||
const ok = await detect(arg);
|
||||
if (ok) pass++;
|
||||
else fail++;
|
||||
}
|
||||
const t1 = performance.now();
|
||||
log.info(`Human: files=${args.length} pass=${pass} fail=${fail} time=${Math.round(t1 - t0)} fps=${Math.round(10000 * args.length / (t1 - t0)) / 10}`);
|
||||
}
|
||||
|
||||
main();
|
|
@ -1,66 +0,0 @@
|
|||
/**
|
||||
* Human simple demo for NodeJS
|
||||
*/
|
||||
|
||||
const childProcess = require('child_process'); // eslint-disable-line camelcase
|
||||
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||
const canvas = require('canvas'); // eslint-disable-line node/no-unpublished-require
|
||||
|
||||
const config = {
|
||||
cacheSensitivity: 0.01,
|
||||
wasmPlatformFetch: true,
|
||||
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
|
||||
};
|
||||
const count = 10;
|
||||
|
||||
async function loadImage(input) {
|
||||
const inputImage = await canvas.loadImage(input);
|
||||
const inputCanvas = new canvas.Canvas(inputImage.width, inputImage.height);
|
||||
const inputCtx = inputCanvas.getContext('2d');
|
||||
inputCtx.drawImage(inputImage, 0, 0);
|
||||
const imageData = inputCtx.getImageData(0, 0, inputCanvas.width, inputCanvas.height);
|
||||
process.send({ input, resolution: [inputImage.width, inputImage.height] });
|
||||
return imageData;
|
||||
}
|
||||
|
||||
async function runHuman(module, backend) {
|
||||
if (backend === 'wasm') require('@tensorflow/tfjs-backend-wasm'); // eslint-disable-line node/no-unpublished-require, global-require
|
||||
const Human = require('../../dist/' + module); // eslint-disable-line global-require, import/no-dynamic-require
|
||||
config.backend = backend;
|
||||
const human = new Human.Human(config);
|
||||
human.env.Canvas = canvas.Canvas;
|
||||
human.env.Image = canvas.Image;
|
||||
human.env.ImageData = canvas.ImageData;
|
||||
process.send({ human: human.version, module });
|
||||
await human.init();
|
||||
process.send({ desired: human.config.backend, wasm: human.env.wasm, tfjs: human.tf.version.tfjs, tensorflow: human.env.tensorflow });
|
||||
const imageData = await loadImage('samples/in/ai-body.jpg');
|
||||
const t0 = human.now();
|
||||
await human.load();
|
||||
const t1 = human.now();
|
||||
await human.warmup();
|
||||
const t2 = human.now();
|
||||
for (let i = 0; i < count; i++) await human.detect(imageData);
|
||||
const t3 = human.now();
|
||||
process.send({ backend: human.tf.getBackend(), load: Math.round(t1 - t0), warmup: Math.round(t2 - t1), detect: Math.round(t3 - t2), count, memory: human.tf.memory().numBytes });
|
||||
}
|
||||
|
||||
async function executeWorker(args) {
|
||||
return new Promise((resolve) => {
|
||||
const worker = childProcess.fork(process.argv[1], args);
|
||||
worker.on('message', (msg) => log.data(msg));
|
||||
worker.on('exit', () => resolve(true));
|
||||
});
|
||||
}
|
||||
|
||||
async function main() {
|
||||
if (process.argv[2]) {
|
||||
await runHuman(process.argv[2], process.argv[3]);
|
||||
} else {
|
||||
await executeWorker(['human.node.js', 'tensorflow']);
|
||||
await executeWorker(['human.node-gpu.js', 'tensorflow']);
|
||||
await executeWorker(['human.node-wasm.js', 'wasm']);
|
||||
}
|
||||
}
|
||||
|
||||
main();
|
|
@ -24,7 +24,6 @@ const config = { // just enable all and leave default settings
|
|||
|
||||
async function main() {
|
||||
log.header();
|
||||
process.noDeprecation = true;
|
||||
|
||||
globalThis.Canvas = canvas.Canvas; // patch global namespace with canvas library
|
||||
globalThis.ImageData = canvas.ImageData; // patch global namespace with canvas library
|
||||
|
@ -36,13 +35,12 @@ async function main() {
|
|||
log.info('Human:', human.version, 'TF:', tf.version_core);
|
||||
|
||||
await human.load(); // pre-load models
|
||||
log.info('Loaded models:', human.models.loaded());
|
||||
log.info('Loaded models:', Object.keys(human.models).filter((a) => human.models[a]));
|
||||
log.info('Memory state:', human.tf.engine().memory());
|
||||
|
||||
// parse cmdline
|
||||
const input = process.argv[2];
|
||||
let output = process.argv[3];
|
||||
if (!output.toLowerCase().endsWith('.jpg')) output += '.jpg';
|
||||
const output = process.argv[3];
|
||||
if (process.argv.length !== 4) log.error('Parameters: <input-image> <output-image> missing');
|
||||
else if (!fs.existsSync(input) && !input.startsWith('http')) log.error(`File not found: ${process.argv[2]}`);
|
||||
else {
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
const fs = require('fs');
|
||||
const process = require('process');
|
||||
|
||||
let fetch; // fetch is dynamically imported later
|
||||
|
||||
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||
// in nodejs environments tfjs-node is required to be loaded before human
|
||||
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||
|
@ -36,13 +38,13 @@ async function detect(input) {
|
|||
let buffer;
|
||||
log.info('Loading image:', input);
|
||||
if (input.startsWith('http:') || input.startsWith('https:')) {
|
||||
fetch = (await import('node-fetch')).default; // eslint-disable-line node/no-unpublished-import
|
||||
const res = await fetch(input);
|
||||
if (res && res.ok) buffer = Buffer.from(await res.arrayBuffer());
|
||||
if (res && res.ok) buffer = await res.buffer();
|
||||
else log.error('Invalid image URL:', input, res.status, res.statusText, res.headers.get('content-type'));
|
||||
} else {
|
||||
buffer = fs.readFileSync(input);
|
||||
}
|
||||
log.data('Image bytes:', buffer?.length, 'buffer:', buffer?.slice(0, 32));
|
||||
|
||||
// decode image using tfjs-node so we don't need external depenencies
|
||||
if (!buffer) return;
|
||||
|
@ -65,7 +67,8 @@ async function main() {
|
|||
});
|
||||
|
||||
human.events.addEventListener('load', () => {
|
||||
log.info('Event Loaded:', human.models.loaded(), human.tf.engine().memory());
|
||||
const loaded = Object.keys(human.models).filter((a) => human.models[a]);
|
||||
log.info('Event Loaded:', loaded, human.tf.engine().memory());
|
||||
});
|
||||
|
||||
human.events.addEventListener('image', () => {
|
||||
|
@ -77,7 +80,7 @@ async function main() {
|
|||
const persons = human.result.persons;
|
||||
for (let i = 0; i < persons.length; i++) {
|
||||
const face = persons[i].face;
|
||||
const faceTxt = face ? `score:${face.score} age:${face.age} gender:${face.gender} iris:${face.distance}` : null;
|
||||
const faceTxt = face ? `score:${face.score} age:${face.age} gender:${face.gender} iris:${face.iris}` : null;
|
||||
const body = persons[i].body;
|
||||
const bodyTxt = body ? `score:${body.score} keypoints:${body.keypoints?.length}` : null;
|
||||
log.data(` #${i}: Face:${faceTxt} Body:${bodyTxt} LeftHand:${persons[i].hands.left ? 'yes' : 'no'} RightHand:${persons[i].hands.right ? 'yes' : 'no'} Gestures:${persons[i].gestures.length}`);
|
||||
|
|
|
@ -16,7 +16,8 @@ const humanConfig = {
|
|||
};
|
||||
|
||||
async function main(inputFile) {
|
||||
global.fetch = (await import('node-fetch')).default; // eslint-disable-line node/no-unpublished-import, import/no-unresolved, node/no-missing-import, node/no-extraneous-import
|
||||
// @ts-ignore
|
||||
global.fetch = (await import('node-fetch')).default; // eslint-disable-line node/no-unpublished-import
|
||||
const human = new Human.Human(humanConfig); // create instance of human using default configuration
|
||||
log.info('Human:', human.version, 'TF:', tf.version_core);
|
||||
await human.load(); // optional as models would be loaded on-demand first time they are required
|
||||
|
|
|
@ -27,7 +27,8 @@ async function init() {
|
|||
await human.tf.ready();
|
||||
log.info('Human:', human.version, 'TF:', tf.version_core);
|
||||
await human.load();
|
||||
log.info('Loaded:', human.models.loaded());
|
||||
const loaded = Object.keys(human.models).filter((a) => human.models[a]);
|
||||
log.info('Loaded:', loaded);
|
||||
log.info('Memory state:', human.tf.engine().memory());
|
||||
}
|
||||
|
||||
|
@ -45,12 +46,10 @@ async function detect(input) {
|
|||
}
|
||||
|
||||
async function main() {
|
||||
process.noDeprecation = true;
|
||||
log.configure({ inspect: { breakLength: 265 } });
|
||||
log.header();
|
||||
if (process.argv.length !== 4) {
|
||||
log.error('Parameters: <first image> <second image> missing');
|
||||
return;
|
||||
throw new Error('Parameters: <first image> <second image> missing');
|
||||
}
|
||||
await init();
|
||||
const res1 = await detect(process.argv[2]);
|
||||
|
@ -58,7 +57,7 @@ async function main() {
|
|||
if (!res1 || !res1.face || res1.face.length === 0 || !res2 || !res2.face || res2.face.length === 0) {
|
||||
throw new Error('Could not detect face descriptors');
|
||||
}
|
||||
const similarity = human.match.similarity(res1.face[0].embedding, res2.face[0].embedding, { order: 2 });
|
||||
const similarity = human.similarity(res1.face[0].embedding, res2.face[0].embedding, { order: 2 });
|
||||
log.data('Similarity: ', similarity);
|
||||
}
|
||||
|
||||
|
|
|
@ -13,11 +13,9 @@ const Human = require('../../dist/human.node.js'); // use this when using human
|
|||
const humanConfig = {
|
||||
// add any custom config here
|
||||
debug: true,
|
||||
body: { enabled: false },
|
||||
};
|
||||
|
||||
async function detect(inputFile) {
|
||||
process.noDeprecation = true;
|
||||
const human = new Human.Human(humanConfig); // create instance of human using default configuration
|
||||
console.log('Human:', human.version, 'TF:', tf.version_core); // eslint-disable-line no-console
|
||||
await human.load(); // optional as models would be loaded on-demand first time they are required
|
||||
|
|
|
@ -11,11 +11,10 @@
|
|||
* Working version of `ffmpeg` must be present on the system
|
||||
*/
|
||||
|
||||
const process = require('process');
|
||||
const spawn = require('child_process').spawn;
|
||||
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||
// in nodejs environments tfjs-node is required to be loaded before human
|
||||
// const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
|
||||
const Pipe2Jpeg = require('pipe2jpeg'); // eslint-disable-line node/no-missing-require, import/no-unresolved
|
||||
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
|
||||
|
@ -23,8 +22,7 @@ const Human = require('../../dist/human.node.js'); // use this when using human
|
|||
|
||||
let count = 0; // counter
|
||||
let busy = false; // busy flag
|
||||
let inputFile = './test.mp4';
|
||||
if (process.argv.length === 3) inputFile = process.argv[2];
|
||||
const inputFile = './test.mp4';
|
||||
|
||||
const humanConfig = {
|
||||
modelBasePath: 'file://models/',
|
||||
|
@ -61,27 +59,24 @@ const ffmpegParams = [
|
|||
'pipe:1', // output to unix pipe that is then captured by pipe2jpeg
|
||||
];
|
||||
|
||||
async function detect(jpegBuffer) {
|
||||
async function process(jpegBuffer) {
|
||||
if (busy) return; // skip processing if busy
|
||||
busy = true;
|
||||
const tensor = human.tf.node.decodeJpeg(jpegBuffer, 3); // decode jpeg buffer to raw tensor
|
||||
log.state('input frame:', ++count, 'size:', jpegBuffer.length, 'decoded shape:', tensor.shape);
|
||||
const res = await human.detect(tensor);
|
||||
human.tf.dispose(tensor); // must dispose tensor
|
||||
// start custom processing here
|
||||
log.data('frame', { frame: ++count, size: jpegBuffer.length, shape: tensor.shape, face: res?.face?.length, body: res?.body?.length, hand: res?.hand?.length, gesture: res?.gesture?.length });
|
||||
if (res?.face?.[0]) log.data('person', { score: [res.face[0].boxScore, res.face[0].faceScore], age: res.face[0].age || 0, gender: [res.face[0].genderScore || 0, res.face[0].gender], emotion: res.face[0].emotion?.[0] });
|
||||
// at the of processing mark loop as not busy so it can process next frame
|
||||
log.data('gesture', JSON.stringify(res.gesture));
|
||||
// do processing here
|
||||
tf.dispose(tensor); // must dispose tensor
|
||||
busy = false;
|
||||
}
|
||||
|
||||
async function main() {
|
||||
log.header();
|
||||
process.noDeprecation = true;
|
||||
await human.tf.ready();
|
||||
// pre-load models
|
||||
log.info({ human: human.version, tf: human.tf.version_core });
|
||||
log.info({ input: inputFile });
|
||||
pipe2jpeg.on('data', (jpegBuffer) => detect(jpegBuffer));
|
||||
log.info('human:', human.version, 'tf:', tf.version_core);
|
||||
pipe2jpeg.on('jpeg', (jpegBuffer) => process(jpegBuffer));
|
||||
|
||||
const ffmpeg = spawn('ffmpeg', ffmpegParams, { stdio: ['ignore', 'pipe', 'ignore'] });
|
||||
ffmpeg.on('error', (error) => log.error('ffmpeg error:', error));
|
||||
|
|
|
@ -1,11 +1,15 @@
|
|||
/**
|
||||
* Human demo for NodeJS
|
||||
*
|
||||
* Requires [node-fetch](https://www.npmjs.com/package/node-fetch) to provide `fetch` functionality in NodeJS environment
|
||||
*/
|
||||
|
||||
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const process = require('process');
|
||||
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||
|
||||
let fetch; // fetch is dynamically imported later
|
||||
|
||||
// in nodejs environments tfjs-node is required to be loaded before human
|
||||
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
|
||||
|
@ -49,7 +53,8 @@ async function init() {
|
|||
log.info('Human:', human.version);
|
||||
// log.info('Active Configuration', human.config);
|
||||
await human.load();
|
||||
log.info('Loaded:', human.models.loaded());
|
||||
const loaded = Object.keys(human.models).filter((a) => human.models[a]);
|
||||
log.info('Loaded:', loaded);
|
||||
// log.info('Memory state:', human.tf.engine().memory());
|
||||
log.data(tf.backend().binding ? tf.backend().binding.TF_Version : null);
|
||||
}
|
||||
|
@ -60,12 +65,11 @@ async function detect(input) {
|
|||
log.info('Loading image:', input);
|
||||
if (input.startsWith('http:') || input.startsWith('https:')) {
|
||||
const res = await fetch(input);
|
||||
if (res && res.ok) buffer = Buffer.from(await res.arrayBuffer());
|
||||
if (res && res.ok) buffer = await res.buffer();
|
||||
else log.error('Invalid image URL:', input, res.status, res.statusText, res.headers.get('content-type'));
|
||||
} else {
|
||||
buffer = fs.readFileSync(input);
|
||||
}
|
||||
log.data('Image bytes:', buffer?.length, 'buffer:', buffer?.slice(0, 32));
|
||||
|
||||
// decode image using tfjs-node so we don't need external depenencies
|
||||
// can also be done using canvas.js or some other 3rd party image library
|
||||
|
@ -92,7 +96,7 @@ async function detect(input) {
|
|||
try {
|
||||
result = await human.detect(tensor, myConfig);
|
||||
} catch (err) {
|
||||
log.error('caught', err);
|
||||
log.error('caught');
|
||||
}
|
||||
|
||||
// dispose image tensor as we no longer need it
|
||||
|
@ -104,7 +108,7 @@ async function detect(input) {
|
|||
for (let i = 0; i < result.face.length; i++) {
|
||||
const face = result.face[i];
|
||||
const emotion = face.emotion.reduce((prev, curr) => (prev.score > curr.score ? prev : curr));
|
||||
log.data(` Face: #${i} boxScore:${face.boxScore} faceScore:${face.faceScore} age:${face.age} genderScore:${face.genderScore} gender:${face.gender} emotionScore:${emotion.score} emotion:${emotion.emotion} distance:${face.distance}`);
|
||||
log.data(` Face: #${i} boxScore:${face.boxScore} faceScore:${face.faceScore} age:${face.age} genderScore:${face.genderScore} gender:${face.gender} emotionScore:${emotion.score} emotion:${emotion.emotion} iris:${face.iris}`);
|
||||
}
|
||||
} else {
|
||||
log.data(' Face: N/A');
|
||||
|
@ -187,8 +191,8 @@ async function test() {
|
|||
async function main() {
|
||||
log.configure({ inspect: { breakLength: 265 } });
|
||||
log.header();
|
||||
process.noDeprecation = true;
|
||||
log.info('Current folder:', process.env.PWD);
|
||||
fetch = (await import('node-fetch')).default; // eslint-disable-line node/no-unpublished-import
|
||||
await init();
|
||||
const f = process.argv[2];
|
||||
if (process.argv.length !== 3) {
|
||||
|
|
|
@ -20,7 +20,7 @@ const config = { // just enable all and leave default settings
|
|||
modelBasePath: 'file://models',
|
||||
debug: true,
|
||||
softwareKernels: true, // slower but enhanced precision since face rotation can work in software mode in nodejs environments
|
||||
cacheSensitivity: 0.01,
|
||||
cacheSensitivity: 0,
|
||||
face: { enabled: true, detector: { maxDetected: 100, minConfidence: 0.1 } },
|
||||
object: { enabled: true, maxDetected: 100, minConfidence: 0.1 },
|
||||
gesture: { enabled: true },
|
||||
|
@ -77,11 +77,10 @@ async function main() {
|
|||
globalThis.ImageData = canvas.ImageData; // patch global namespace with canvas library
|
||||
|
||||
log.info('Human:', human.version, 'TF:', tf.version_core);
|
||||
process.noDeprecation = true;
|
||||
const configErrors = await human.validate();
|
||||
if (configErrors.length > 0) log.error('Configuration errors:', configErrors);
|
||||
await human.load(); // pre-load models
|
||||
log.info('Loaded models:', human.models.loaded());
|
||||
log.info('Loaded models:', Object.keys(human.models).filter((a) => human.models[a]));
|
||||
|
||||
const inDir = process.argv[2];
|
||||
const outDir = process.argv[3];
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
body { font-size: 1rem; font-family: "CenturyGothic", "Segoe UI", sans-serif; font-variant: small-caps; width: -webkit-fill-available; height: 100%; background: black; color: white; overflow: hidden; margin: 0; }
|
||||
select { font-size: 1rem; font-family: "CenturyGothic", "Segoe UI", sans-serif; font-variant: small-caps; background: gray; color: white; border: none; }
|
||||
</style>
|
||||
<script src="../segmentation/index.js" type="module"></script>
|
||||
<script src="index.js" type="module"></script>
|
||||
</head>
|
||||
<body>
|
||||
<noscript><h1>javascript is required</h1></noscript>
|
||||
|
@ -46,9 +46,9 @@
|
|||
<main>
|
||||
<div id="main" class="main">
|
||||
<video id="webcam" style="position: fixed; top: 0; left: 0; width: 50vw; height: 50vh"></video>
|
||||
<img id="background" alt="background" style="position: fixed; top: 0; right: 0; width: 50vw; height: 50vh" controls></img>
|
||||
<canvas id="output" style="position: fixed; bottom: 0; left: 0; height: 50vh"></canvas>
|
||||
<canvas id="merge" style="position: fixed; bottom: 0; right: 0; height: 50vh"></canvas>
|
||||
<video id="video" style="position: fixed; top: 0; right: 0; width: 50vw; height: 50vh" controls></video>
|
||||
<canvas id="output" style="position: fixed; bottom: 0; left: 0; width: 50vw; height: 50vh"></canvas>
|
||||
<canvas id="merge" style="position: fixed; bottom: 0; right: 0; width: 50vw; height: 50vh"></canvas>
|
||||
</div>
|
||||
</main>
|
||||
<footer>
|
||||
|
|
|
@ -25,8 +25,6 @@ const humanConfig = { // user configuration for human, used to fine-tune behavio
|
|||
},
|
||||
};
|
||||
|
||||
const backgroundImage = '../../samples/in/background.jpg';
|
||||
|
||||
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
|
||||
|
||||
const log = (...msg) => console.log(...msg); // eslint-disable-line no-console
|
||||
|
@ -34,7 +32,7 @@ const log = (...msg) => console.log(...msg); // eslint-disable-line no-console
|
|||
async function main() {
|
||||
// gather dom elements
|
||||
const dom = {
|
||||
background: document.getElementById('background'),
|
||||
video: document.getElementById('video'),
|
||||
webcam: document.getElementById('webcam'),
|
||||
output: document.getElementById('output'),
|
||||
merge: document.getElementById('merge'),
|
||||
|
@ -46,7 +44,7 @@ async function main() {
|
|||
// set defaults
|
||||
dom.fps.innerText = 'initializing';
|
||||
dom.ratio.valueAsNumber = human.config.segmentation.ratio;
|
||||
dom.background.src = backgroundImage;
|
||||
dom.video.src = '../assets/rijeka.mp4';
|
||||
dom.composite.innerHTML = ['source-atop', 'color', 'color-burn', 'color-dodge', 'copy', 'darken', 'destination-atop', 'destination-in', 'destination-out', 'destination-over', 'difference', 'exclusion', 'hard-light', 'hue', 'lighten', 'lighter', 'luminosity', 'multiply', 'overlay', 'saturation', 'screen', 'soft-light', 'source-in', 'source-out', 'source-over', 'xor'].map((gco) => `<option value="${gco}">${gco}</option>`).join(''); // eslint-disable-line max-len
|
||||
const ctxMerge = dom.merge.getContext('2d');
|
||||
|
||||
|
@ -54,8 +52,8 @@ async function main() {
|
|||
log('platform:', human.env.platform, '| agent:', human.env.agent);
|
||||
await human.load(); // preload all models
|
||||
log('backend:', human.tf.getBackend(), '| available:', human.env.backends);
|
||||
log('models stats:', human.models.stats());
|
||||
log('models loaded:', human.models.loaded());
|
||||
log('models stats:', human.getModelStats());
|
||||
log('models loaded:', Object.values(human.models).filter((model) => model !== null).length);
|
||||
await human.warmup(); // warmup function to initialize backend for future faster detection
|
||||
const numTensors = human.tf.engine().state.numTensors;
|
||||
|
||||
|
@ -68,8 +66,7 @@ async function main() {
|
|||
dom.merge.height = human.webcam.height;
|
||||
loop(); // eslint-disable-line no-use-before-define
|
||||
};
|
||||
|
||||
await human.webcam.start({ element: dom.webcam, crop: true, width: window.innerWidth / 2, height: window.innerHeight / 2 }); // use human webcam helper methods and associate webcam stream with a dom element
|
||||
await human.webcam.start({ element: dom.webcam, crop: true, width: 960, height: 720 }); // use human webcam helper methods and associate webcam stream with a dom element
|
||||
if (!human.webcam.track) dom.fps.innerText = 'webcam error';
|
||||
|
||||
// processing loop
|
||||
|
@ -85,10 +82,10 @@ async function main() {
|
|||
return;
|
||||
}
|
||||
dom.fps.innerText = `fps: ${Math.round(10000 / (t1 - t0)) / 10}`; // mark performance
|
||||
human.draw.tensor(rgba, dom.output); // draw raw output
|
||||
human.tf.browser.toPixels(rgba, dom.output); // draw raw output
|
||||
human.tf.dispose(rgba); // dispose tensors
|
||||
ctxMerge.globalCompositeOperation = 'source-over';
|
||||
ctxMerge.drawImage(dom.background, 0, 0); // draw original video to first stacked canvas
|
||||
ctxMerge.drawImage(dom.video, 0, 0); // draw original video to first stacked canvas
|
||||
ctxMerge.globalCompositeOperation = dom.composite.value;
|
||||
ctxMerge.drawImage(dom.output, 0, 0); // draw processed output to second stacked canvas
|
||||
if (numTensors !== human.tf.engine().state.numTensors) log({ leak: human.tf.engine().state.numTensors - numTensors }); // check for memory leaks
|
||||
|
|
|
@ -1,28 +0,0 @@
|
|||
## Tracker
|
||||
|
||||
### Based on
|
||||
|
||||
<https://github.com/opendatacam/node-moving-things-tracker>
|
||||
|
||||
### Build
|
||||
|
||||
- remove reference to `lodash`:
|
||||
> `isEqual` in <tracker.js>
|
||||
- replace external lib:
|
||||
> curl https://raw.githubusercontent.com/ubilabs/kd-tree-javascript/master/kdTree.js -o lib/kdTree-min.js
|
||||
- build with `esbuild`:
|
||||
> node_modules/.bin/esbuild --bundle tracker.js --format=esm --platform=browser --target=esnext --keep-names --tree-shaking=false --analyze --outfile=/home/vlado/dev/human/demo/tracker/tracker.js --banner:js="/* eslint-disable */"
|
||||
|
||||
### Usage
|
||||
|
||||
computeDistance(item1, item2)
|
||||
disableKeepInMemory()
|
||||
enableKeepInMemory()
|
||||
getAllTrackedItems()
|
||||
getJSONDebugOfTrackedItems(roundInt = true)
|
||||
getJSONOfAllTrackedItems()
|
||||
getJSONOfTrackedItems(roundInt = true)
|
||||
getTrackedItemsInMOTFormat(frameNb)
|
||||
reset()
|
||||
setParams(newParams)
|
||||
updateTrackedItemsWithNewFrame(detectionsOfThisFrame, frameNb)
|
|
@ -1,65 +0,0 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>Human</title>
|
||||
<meta name="viewport" content="width=device-width" id="viewport">
|
||||
<meta name="keywords" content="Human">
|
||||
<meta name="application-name" content="Human">
|
||||
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||
<meta name="theme-color" content="#000000">
|
||||
<link rel="manifest" href="../manifest.webmanifest">
|
||||
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
|
||||
<link rel="apple-touch-icon" href="../../assets/icon.png">
|
||||
<script src="./index.js" type="module"></script>
|
||||
<style>
|
||||
html { font-family: 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
|
||||
body { margin: 0; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
|
||||
body::-webkit-scrollbar { display: none; }
|
||||
input[type="file"] { font-family: 'Segoe UI'; font-size: 14px; font-variant: small-caps; }
|
||||
::-webkit-file-upload-button { background: #333333; color: white; border: 0; border-radius: 0; padding: 6px 16px; box-shadow: 4px 4px 4px #222222; font-family: 'Segoe UI'; font-size: 14px; font-variant: small-caps; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div style="display: flex">
|
||||
<video id="video" playsinline style="width: 25vw" controls controlslist="nofullscreen nodownload noremoteplayback" disablepictureinpicture loop></video>
|
||||
<canvas id="canvas" style="width: 75vw"></canvas>
|
||||
</div>
|
||||
<div class="uploader" style="padding: 8px">
|
||||
<input type="file" name="inputvideo" id="inputvideo" accept="video/*"></input>
|
||||
<input type="checkbox" id="interpolation" name="interpolation"></input>
|
||||
<label for="tracker">interpolation</label>
|
||||
</div>
|
||||
<form id="config" style="padding: 8px; line-height: 1.6rem;">
|
||||
tracker |
|
||||
<input type="checkbox" id="tracker" name="tracker" checked></input>
|
||||
<label for="tracker">enabled</label> |
|
||||
<input type="checkbox" id="keepInMemory" name="keepInMemory"></input>
|
||||
<label for="keepInMemory">keepInMemory</label> |
|
||||
<br>
|
||||
tracker source |
|
||||
<input type="radio" id="box-face" name="box" value="face" checked>
|
||||
<label for="box-face">face</label> |
|
||||
<input type="radio" id="box-body" name="box" value="body">
|
||||
<label for="box-face">body</label> |
|
||||
<input type="radio" id="box-object" name="box" value="object">
|
||||
<label for="box-face">object</label> |
|
||||
<br>
|
||||
tracker config |
|
||||
<input type="range" id="unMatchedFramesTolerance" name="unMatchedFramesTolerance" min="0" max="300" step="1", value="60"></input>
|
||||
<label for="unMatchedFramesTolerance">unMatchedFramesTolerance</label> |
|
||||
<input type="range" id="iouLimit" name="unMatchedFramesTolerance" min="0" max="1" step="0.01", value="0.1"></input>
|
||||
<label for="iouLimit">iouLimit</label> |
|
||||
<input type="range" id="distanceLimit" name="unMatchedFramesTolerance" min="0" max="1" step="0.01", value="0.1"></input>
|
||||
<label for="distanceLimit">distanceLimit</label> |
|
||||
<input type="radio" id="matchingAlgorithm-kdTree" name="matchingAlgorithm" value="kdTree" checked>
|
||||
<label for="matchingAlgorithm-kdTree">kdTree</label> |
|
||||
<input type="radio" id="matchingAlgorithm-munkres" name="matchingAlgorithm" value="munkres">
|
||||
<label for="matchingAlgorithm-kdTree">munkres</label> |
|
||||
</form>
|
||||
<pre id="status" style="position: absolute; top: 12px; right: 20px; background-color: grey; padding: 8px; box-shadow: 2px 2px black"></pre>
|
||||
<pre id="log" style="padding: 8px"></pre>
|
||||
<div id="performance" style="position: absolute; bottom: 0; width: 100%; padding: 8px; font-size: 0.8rem;"></div>
|
||||
</body>
|
||||
</html>
|
|
@ -1,208 +0,0 @@
|
|||
/**
|
||||
* Human demo for browsers
|
||||
* @default Human Library
|
||||
* @summary <https://github.com/vladmandic/human>
|
||||
* @author <https://github.com/vladmandic>
|
||||
* @copyright <https://github.com/vladmandic>
|
||||
* @license MIT
|
||||
*/
|
||||
|
||||
import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
|
||||
import tracker from './tracker.js';
|
||||
|
||||
const humanConfig: Partial<H.Config> = { // user configuration for human, used to fine-tune behavior
|
||||
debug: true,
|
||||
backend: 'webgl',
|
||||
// cacheSensitivity: 0,
|
||||
// cacheModels: false,
|
||||
// warmup: 'none',
|
||||
modelBasePath: 'https://vladmandic.github.io/human-models/models',
|
||||
filter: { enabled: true, equalization: false, flip: false },
|
||||
face: {
|
||||
enabled: true,
|
||||
detector: { rotation: false, maxDetected: 10, minConfidence: 0.3 },
|
||||
mesh: { enabled: true },
|
||||
attention: { enabled: false },
|
||||
iris: { enabled: false },
|
||||
description: { enabled: false },
|
||||
emotion: { enabled: false },
|
||||
antispoof: { enabled: false },
|
||||
liveness: { enabled: false },
|
||||
},
|
||||
body: { enabled: false, maxDetected: 6, modelPath: 'movenet-multipose.json' },
|
||||
hand: { enabled: false },
|
||||
object: { enabled: false, maxDetected: 10 },
|
||||
segmentation: { enabled: false },
|
||||
gesture: { enabled: false },
|
||||
};
|
||||
|
||||
interface TrackerConfig {
|
||||
unMatchedFramesTolerance: number, // number of frame when an object is not matched before considering it gone; ignored if fastDelete is set
|
||||
iouLimit: number, // exclude things from beeing matched if their IOU less than; 1 means total overlap; 0 means no overlap
|
||||
fastDelete: boolean, // remove new objects immediately if they could not be matched in the next frames; if set, ignores unMatchedFramesTolerance
|
||||
distanceLimit: number, // distance limit for matching; if values need to be excluded from matching set their distance to something greater than the distance limit
|
||||
matchingAlgorithm: 'kdTree' | 'munkres', // algorithm used to match tracks with new detections
|
||||
}
|
||||
|
||||
interface TrackerResult {
|
||||
id: number,
|
||||
confidence: number,
|
||||
bearing: number,
|
||||
isZombie: boolean,
|
||||
name: string,
|
||||
x: number,
|
||||
y: number,
|
||||
w: number,
|
||||
h: number,
|
||||
}
|
||||
|
||||
const trackerConfig: TrackerConfig = {
|
||||
unMatchedFramesTolerance: 100,
|
||||
iouLimit: 0.05,
|
||||
fastDelete: false,
|
||||
distanceLimit: 1e4,
|
||||
matchingAlgorithm: 'kdTree',
|
||||
};
|
||||
|
||||
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
|
||||
|
||||
const dom = { // grab instances of dom objects so we dont have to look them up later
|
||||
video: document.getElementById('video') as HTMLVideoElement,
|
||||
canvas: document.getElementById('canvas') as HTMLCanvasElement,
|
||||
log: document.getElementById('log') as HTMLPreElement,
|
||||
fps: document.getElementById('status') as HTMLPreElement,
|
||||
tracker: document.getElementById('tracker') as HTMLInputElement,
|
||||
interpolation: document.getElementById('interpolation') as HTMLInputElement,
|
||||
config: document.getElementById('config') as HTMLFormElement,
|
||||
ctx: (document.getElementById('canvas') as HTMLCanvasElement).getContext('2d') as CanvasRenderingContext2D,
|
||||
};
|
||||
const timestamp = { detect: 0, draw: 0, tensors: 0, start: 0 }; // holds information used to calculate performance and possible memory leaks
|
||||
const fps = { detectFPS: 0, drawFPS: 0, frames: 0, averageMs: 0 }; // holds calculated fps information for both detect and screen refresh
|
||||
|
||||
const log = (...msg) => { // helper method to output messages
|
||||
dom.log.innerText += msg.join(' ') + '\n';
|
||||
console.log(...msg); // eslint-disable-line no-console
|
||||
};
|
||||
const status = (msg) => dom.fps.innerText = msg; // print status element
|
||||
|
||||
async function detectionLoop() { // main detection loop
|
||||
if (!dom.video.paused && dom.video.readyState >= 2) {
|
||||
if (timestamp.start === 0) timestamp.start = human.now();
|
||||
// log('profiling data:', await human.profile(dom.video));
|
||||
await human.detect(dom.video, humanConfig); // actual detection; were not capturing output in a local variable as it can also be reached via human.result
|
||||
const tensors = human.tf.memory().numTensors; // check current tensor usage for memory leaks
|
||||
if (tensors - timestamp.tensors !== 0) log('allocated tensors:', tensors - timestamp.tensors); // printed on start and each time there is a tensor leak
|
||||
timestamp.tensors = tensors;
|
||||
fps.detectFPS = Math.round(1000 * 1000 / (human.now() - timestamp.detect)) / 1000;
|
||||
fps.frames++;
|
||||
fps.averageMs = Math.round(1000 * (human.now() - timestamp.start) / fps.frames) / 1000;
|
||||
}
|
||||
timestamp.detect = human.now();
|
||||
requestAnimationFrame(detectionLoop); // start new frame immediately
|
||||
}
|
||||
|
||||
function drawLoop() { // main screen refresh loop
|
||||
if (!dom.video.paused && dom.video.readyState >= 2) {
|
||||
const res: H.Result = dom.interpolation.checked ? human.next(human.result) : human.result; // interpolate results if enabled
|
||||
let tracking: H.FaceResult[] | H.BodyResult[] | H.ObjectResult[] = [];
|
||||
if (human.config.face.enabled) tracking = res.face;
|
||||
else if (human.config.body.enabled) tracking = res.body;
|
||||
else if (human.config.object.enabled) tracking = res.object;
|
||||
else log('unknown object type');
|
||||
let data: TrackerResult[] = [];
|
||||
if (dom.tracker.checked) {
|
||||
const items = tracking.map((obj) => ({
|
||||
x: obj.box[0] + obj.box[2] / 2,
|
||||
y: obj.box[1] + obj.box[3] / 2,
|
||||
w: obj.box[2],
|
||||
h: obj.box[3],
|
||||
name: obj.label || (human.config.face.enabled ? 'face' : 'body'),
|
||||
confidence: obj.score,
|
||||
}));
|
||||
tracker.updateTrackedItemsWithNewFrame(items, fps.frames);
|
||||
data = tracker.getJSONOfTrackedItems(true) as TrackerResult[];
|
||||
}
|
||||
human.draw.canvas(dom.video, dom.canvas); // copy input video frame to output canvas
|
||||
for (let i = 0; i < tracking.length; i++) {
|
||||
// @ts-ignore
|
||||
const name = tracking[i].label || (human.config.face.enabled ? 'face' : 'body');
|
||||
dom.ctx.strokeRect(tracking[i].box[0], tracking[i].box[1], tracking[i].box[1], tracking[i].box[2]);
|
||||
dom.ctx.fillText(`id: ${tracking[i].id} ${Math.round(100 * tracking[i].score)}% ${name}`, tracking[i].box[0] + 4, tracking[i].box[1] + 16);
|
||||
if (data[i]) {
|
||||
dom.ctx.fillText(`t: ${data[i].id} ${Math.round(100 * data[i].confidence)}% ${data[i].name} ${data[i].isZombie ? 'zombie' : ''}`, tracking[i].box[0] + 4, tracking[i].box[1] + 34);
|
||||
}
|
||||
}
|
||||
}
|
||||
const now = human.now();
|
||||
fps.drawFPS = Math.round(1000 * 1000 / (now - timestamp.draw)) / 1000;
|
||||
timestamp.draw = now;
|
||||
status(dom.video.paused ? 'paused' : `fps: ${fps.detectFPS.toFixed(1).padStart(5, ' ')} detect | ${fps.drawFPS.toFixed(1).padStart(5, ' ')} draw`); // write status
|
||||
setTimeout(drawLoop, 30); // use to slow down refresh from max refresh rate to target of 30 fps
|
||||
}
|
||||
|
||||
async function handleVideo(file: File) {
|
||||
const url = URL.createObjectURL(file);
|
||||
dom.video.src = url;
|
||||
await dom.video.play();
|
||||
log('loaded video:', file.name, 'resolution:', [dom.video.videoWidth, dom.video.videoHeight], 'duration:', dom.video.duration);
|
||||
dom.canvas.width = dom.video.videoWidth;
|
||||
dom.canvas.height = dom.video.videoHeight;
|
||||
dom.ctx.strokeStyle = 'white';
|
||||
dom.ctx.fillStyle = 'white';
|
||||
dom.ctx.font = '16px Segoe UI';
|
||||
dom.video.playbackRate = 0.25;
|
||||
}
|
||||
|
||||
function initInput() {
|
||||
document.body.addEventListener('dragenter', (evt) => evt.preventDefault());
|
||||
document.body.addEventListener('dragleave', (evt) => evt.preventDefault());
|
||||
document.body.addEventListener('dragover', (evt) => evt.preventDefault());
|
||||
document.body.addEventListener('drop', async (evt) => {
|
||||
evt.preventDefault();
|
||||
if (evt.dataTransfer) evt.dataTransfer.dropEffect = 'copy';
|
||||
const file = evt.dataTransfer?.files?.[0];
|
||||
if (file) await handleVideo(file);
|
||||
log(dom.video.readyState);
|
||||
});
|
||||
(document.getElementById('inputvideo') as HTMLInputElement).onchange = async (evt) => {
|
||||
evt.preventDefault();
|
||||
const file = evt.target?.['files']?.[0];
|
||||
if (file) await handleVideo(file);
|
||||
};
|
||||
dom.config.onchange = () => {
|
||||
trackerConfig.distanceLimit = (document.getElementById('distanceLimit') as HTMLInputElement).valueAsNumber;
|
||||
trackerConfig.iouLimit = (document.getElementById('iouLimit') as HTMLInputElement).valueAsNumber;
|
||||
trackerConfig.unMatchedFramesTolerance = (document.getElementById('unMatchedFramesTolerance') as HTMLInputElement).valueAsNumber;
|
||||
trackerConfig.unMatchedFramesTolerance = (document.getElementById('unMatchedFramesTolerance') as HTMLInputElement).valueAsNumber;
|
||||
trackerConfig.matchingAlgorithm = (document.getElementById('matchingAlgorithm-kdTree') as HTMLInputElement).checked ? 'kdTree' : 'munkres';
|
||||
tracker.setParams(trackerConfig);
|
||||
if ((document.getElementById('keepInMemory') as HTMLInputElement).checked) tracker.enableKeepInMemory();
|
||||
else tracker.disableKeepInMemory();
|
||||
tracker.reset();
|
||||
log('tracker config change', JSON.stringify(trackerConfig));
|
||||
humanConfig.face!.enabled = (document.getElementById('box-face') as HTMLInputElement).checked; // eslint-disable-line @typescript-eslint/no-non-null-assertion
|
||||
humanConfig.body!.enabled = (document.getElementById('box-body') as HTMLInputElement).checked; // eslint-disable-line @typescript-eslint/no-non-null-assertion
|
||||
humanConfig.object!.enabled = (document.getElementById('box-object') as HTMLInputElement).checked; // eslint-disable-line @typescript-eslint/no-non-null-assertion
|
||||
};
|
||||
dom.tracker.onchange = (evt) => {
|
||||
log('tracker', (evt.target as HTMLInputElement).checked ? 'enabled' : 'disabled');
|
||||
tracker.setParams(trackerConfig);
|
||||
tracker.reset();
|
||||
};
|
||||
}
|
||||
|
||||
async function main() { // main entry point
|
||||
log('human version:', human.version, '| tfjs version:', human.tf.version['tfjs-core']);
|
||||
log('platform:', human.env.platform, '| agent:', human.env.agent);
|
||||
status('loading...');
|
||||
await human.load(); // preload all models
|
||||
log('backend:', human.tf.getBackend(), '| available:', human.env.backends);
|
||||
log('models loaded:', human.models.loaded());
|
||||
status('initializing...');
|
||||
await human.warmup(); // warmup function to initialize backend for future faster detection
|
||||
initInput(); // initialize input
|
||||
await detectionLoop(); // start detection loop
|
||||
drawLoop(); // start draw loop
|
||||
}
|
||||
|
||||
window.onload = main;
|
|
@ -21,7 +21,7 @@
|
|||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<canvas id="canvas" style="margin: 0 auto; width: 100vw"></canvas>
|
||||
<canvas id="canvas" style="margin: 0 auto; width: 100%"></canvas>
|
||||
<video id="video" playsinline style="display: none"></video>
|
||||
<pre id="status" style="position: absolute; top: 12px; right: 20px; background-color: grey; padding: 8px; box-shadow: 2px 2px black"></pre>
|
||||
<pre id="log" style="padding: 8px"></pre>
|
||||
|
|
|
@ -4,6 +4,6 @@
|
|||
author: <https://github.com/vladmandic>'
|
||||
*/
|
||||
|
||||
import*as m from"../../dist/human.esm.js";var v=1920,b={debug:!0,backend:"webgl",modelBasePath:"https://vladmandic.github.io/human-models/models/",filter:{enabled:!0,equalization:!1,flip:!1},face:{enabled:!0,detector:{rotation:!1},mesh:{enabled:!0},attention:{enabled:!1},iris:{enabled:!0},description:{enabled:!0},emotion:{enabled:!0},antispoof:{enabled:!0},liveness:{enabled:!0}},body:{enabled:!1},hand:{enabled:!1},object:{enabled:!1},segmentation:{enabled:!1},gesture:{enabled:!0}},e=new m.Human(b);e.env.perfadd=!1;e.draw.options.font='small-caps 18px "Lato"';e.draw.options.lineHeight=20;e.draw.options.drawPoints=!0;var a={video:document.getElementById("video"),canvas:document.getElementById("canvas"),log:document.getElementById("log"),fps:document.getElementById("status"),perf:document.getElementById("performance")},n={detect:0,draw:0,tensors:0,start:0},s={detectFPS:0,drawFPS:0,frames:0,averageMs:0},o=(...t)=>{a.log.innerText+=t.join(" ")+`
|
||||
`,console.log(...t)},i=t=>a.fps.innerText=t,g=t=>a.perf.innerText="tensors:"+e.tf.memory().numTensors.toString()+" | performance: "+JSON.stringify(t).replace(/"|{|}/g,"").replace(/,/g," | ");async function f(){if(!a.video.paused){n.start===0&&(n.start=e.now()),await e.detect(a.video);let t=e.tf.memory().numTensors;t-n.tensors!==0&&o("allocated tensors:",t-n.tensors),n.tensors=t,s.detectFPS=Math.round(1e3*1e3/(e.now()-n.detect))/1e3,s.frames++,s.averageMs=Math.round(1e3*(e.now()-n.start)/s.frames)/1e3,s.frames%100===0&&!a.video.paused&&o("performance",{...s,tensors:n.tensors})}n.detect=e.now(),requestAnimationFrame(f)}async function u(){var d,r,c;if(!a.video.paused){let l=e.next(e.result),w=await e.image(a.video);e.draw.canvas(w.canvas,a.canvas);let p={bodyLabels:`person confidence [score] and ${(c=(r=(d=e.result)==null?void 0:d.body)==null?void 0:r[0])==null?void 0:c.keypoints.length} keypoints`};await e.draw.all(a.canvas,l,p),g(l.performance)}let t=e.now();s.drawFPS=Math.round(1e3*1e3/(t-n.draw))/1e3,n.draw=t,i(a.video.paused?"paused":`fps: ${s.detectFPS.toFixed(1).padStart(5," ")} detect | ${s.drawFPS.toFixed(1).padStart(5," ")} draw`),setTimeout(u,30)}async function h(){let d=(await e.webcam.enumerate())[0].deviceId,r=await e.webcam.start({element:a.video,crop:!1,width:v,id:d});o(r),a.canvas.width=e.webcam.width,a.canvas.height=e.webcam.height,a.canvas.onclick=async()=>{e.webcam.paused?await e.webcam.play():e.webcam.pause()}}async function y(){o("human version:",e.version,"| tfjs version:",e.tf.version["tfjs-core"]),o("platform:",e.env.platform,"| agent:",e.env.agent),i("loading..."),await e.load(),o("backend:",e.tf.getBackend(),"| available:",e.env.backends),o("models stats:",e.models.stats()),o("models loaded:",e.models.loaded()),o("environment",e.env),i("initializing..."),await e.warmup(),await h(),await f(),await u()}window.onload=y;
|
||||
import*as i from"../../dist/human.esm.js";var m={modelBasePath:"../../models",filter:{enabled:!0,equalization:!1,flip:!1},face:{enabled:!0,detector:{rotation:!1},mesh:{enabled:!0},attention:{enabled:!1},iris:{enabled:!0},description:{enabled:!0},emotion:{enabled:!0}},body:{enabled:!0},hand:{enabled:!0},object:{enabled:!1},segmentation:{enabled:!1},gesture:{enabled:!0}},e=new i.Human(m);e.env.perfadd=!1;e.draw.options.font='small-caps 18px "Lato"';e.draw.options.lineHeight=20;var a={video:document.getElementById("video"),canvas:document.getElementById("canvas"),log:document.getElementById("log"),fps:document.getElementById("status"),perf:document.getElementById("performance")},n={detect:0,draw:0,tensors:0,start:0},s={detectFPS:0,drawFPS:0,frames:0,averageMs:0},o=(...t)=>{a.log.innerText+=t.join(" ")+`
|
||||
`,console.log(...t)},d=t=>a.fps.innerText=t,f=t=>a.perf.innerText="tensors:"+e.tf.memory().numTensors.toString()+" | performance: "+JSON.stringify(t).replace(/"|{|}/g,"").replace(/,/g," | ");async function l(){if(!a.video.paused){n.start===0&&(n.start=e.now()),await e.detect(a.video);let t=e.tf.memory().numTensors;t-n.tensors!==0&&o("allocated tensors:",t-n.tensors),n.tensors=t,s.detectFPS=Math.round(1e3*1e3/(e.now()-n.detect))/1e3,s.frames++,s.averageMs=Math.round(1e3*(e.now()-n.start)/s.frames)/1e3,s.frames%100===0&&!a.video.paused&&o("performance",{...s,tensors:n.tensors})}n.detect=e.now(),requestAnimationFrame(l)}async function c(){if(!a.video.paused){let r=e.next(e.result);e.config.filter.flip?e.draw.canvas(r.canvas,a.canvas):e.draw.canvas(a.video,a.canvas),await e.draw.all(a.canvas,r),f(r.performance)}let t=e.now();s.drawFPS=Math.round(1e3*1e3/(t-n.draw))/1e3,n.draw=t,d(a.video.paused?"paused":`fps: ${s.detectFPS.toFixed(1).padStart(5," ")} detect | ${s.drawFPS.toFixed(1).padStart(5," ")} draw`),setTimeout(c,30)}async function u(){await e.webcam.start({element:a.video,crop:!0}),a.canvas.width=e.webcam.width,a.canvas.height=e.webcam.height,a.canvas.onclick=async()=>{e.webcam.paused?await e.webcam.play():e.webcam.pause()}}async function w(){o("human version:",e.version,"| tfjs version:",e.tf.version["tfjs-core"]),o("platform:",e.env.platform,"| agent:",e.env.agent),d("loading..."),await e.load(),o("backend:",e.tf.getBackend(),"| available:",e.env.backends),o("models stats:",e.getModelStats()),o("models loaded:",Object.values(e.models).filter(t=>t!==null).length),d("initializing..."),await e.warmup(),await u(),await l(),await c()}window.onload=w;
|
||||
//# sourceMappingURL=index.js.map
|
||||
|
|
|
@ -9,20 +9,12 @@
|
|||
|
||||
import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
|
||||
|
||||
const width = 1920; // used by webcam config as well as human maximum resultion // can be anything, but resolutions higher than 4k will disable internal optimizations
|
||||
|
||||
const humanConfig: Partial<H.Config> = { // user configuration for human, used to fine-tune behavior
|
||||
debug: true,
|
||||
backend: 'webgl',
|
||||
// cacheSensitivity: 0,
|
||||
// cacheModels: false,
|
||||
// warmup: 'none',
|
||||
// modelBasePath: '../../models',
|
||||
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
|
||||
modelBasePath: '../../models',
|
||||
filter: { enabled: true, equalization: false, flip: false },
|
||||
face: { enabled: true, detector: { rotation: false }, mesh: { enabled: true }, attention: { enabled: false }, iris: { enabled: true }, description: { enabled: true }, emotion: { enabled: true }, antispoof: { enabled: true }, liveness: { enabled: true } },
|
||||
body: { enabled: false },
|
||||
hand: { enabled: false },
|
||||
face: { enabled: true, detector: { rotation: false }, mesh: { enabled: true }, attention: { enabled: false }, iris: { enabled: true }, description: { enabled: true }, emotion: { enabled: true } },
|
||||
body: { enabled: true },
|
||||
hand: { enabled: true },
|
||||
object: { enabled: false },
|
||||
segmentation: { enabled: false },
|
||||
gesture: { enabled: true },
|
||||
|
@ -33,7 +25,6 @@ const human = new H.Human(humanConfig); // create instance of human with overrid
|
|||
human.env.perfadd = false; // is performance data showing instant or total values
|
||||
human.draw.options.font = 'small-caps 18px "Lato"'; // set font used to draw labels when using draw methods
|
||||
human.draw.options.lineHeight = 20;
|
||||
human.draw.options.drawPoints = true; // draw points on face mesh
|
||||
// human.draw.options.fillPolygons = true;
|
||||
|
||||
const dom = { // grab instances of dom objects so we dont have to look them up later
|
||||
|
@ -51,7 +42,7 @@ const log = (...msg) => { // helper method to output messages
|
|||
console.log(...msg); // eslint-disable-line no-console
|
||||
};
|
||||
const status = (msg) => dom.fps.innerText = msg; // print status element
|
||||
const perf = (msg) => dom.perf.innerText = 'tensors:' + human.tf.memory().numTensors.toString() + ' | performance: ' + JSON.stringify(msg).replace(/"|{|}/g, '').replace(/,/g, ' | '); // print performance element
|
||||
const perf = (msg) => dom.perf.innerText = 'tensors:' + (human.tf.memory().numTensors as number).toString() + ' | performance: ' + JSON.stringify(msg).replace(/"|{|}/g, '').replace(/,/g, ' | '); // print performance element
|
||||
|
||||
async function detectionLoop() { // main detection loop
|
||||
if (!dom.video.paused) {
|
||||
|
@ -73,11 +64,9 @@ async function detectionLoop() { // main detection loop
|
|||
async function drawLoop() { // main screen refresh loop
|
||||
if (!dom.video.paused) {
|
||||
const interpolated = human.next(human.result); // smoothen result using last-known results
|
||||
const processed = await human.image(dom.video); // get current video frame, but enhanced with human.filters
|
||||
human.draw.canvas(processed.canvas as HTMLCanvasElement, dom.canvas);
|
||||
|
||||
const opt: Partial<H.DrawOptions> = { bodyLabels: `person confidence [score] and ${human.result?.body?.[0]?.keypoints.length} keypoints` };
|
||||
await human.draw.all(dom.canvas, interpolated, opt); // draw labels, boxes, lines, etc.
|
||||
if (human.config.filter.flip) human.draw.canvas(interpolated.canvas as HTMLCanvasElement, dom.canvas); // draw processed image to screen canvas
|
||||
else human.draw.canvas(dom.video, dom.canvas); // draw original video to screen canvas // better than using procesed image as this loop happens faster than processing loop
|
||||
await human.draw.all(dom.canvas, interpolated); // draw labels, boxes, lines, etc.
|
||||
perf(interpolated.performance); // write performance data
|
||||
}
|
||||
const now = human.now();
|
||||
|
@ -88,10 +77,7 @@ async function drawLoop() { // main screen refresh loop
|
|||
}
|
||||
|
||||
async function webCam() {
|
||||
const devices = await human.webcam.enumerate();
|
||||
const id = devices[0].deviceId; // use first available video source
|
||||
const webcamStatus = await human.webcam.start({ element: dom.video, crop: false, width, id }); // use human webcam helper methods and associate webcam stream with a dom element
|
||||
log(webcamStatus);
|
||||
await human.webcam.start({ element: dom.video, crop: true }); // use human webcam helper methods and associate webcam stream with a dom element
|
||||
dom.canvas.width = human.webcam.width;
|
||||
dom.canvas.height = human.webcam.height;
|
||||
dom.canvas.onclick = async () => { // pause when clicked on screen and resume on next click
|
||||
|
@ -106,9 +92,8 @@ async function main() { // main entry point
|
|||
status('loading...');
|
||||
await human.load(); // preload all models
|
||||
log('backend:', human.tf.getBackend(), '| available:', human.env.backends);
|
||||
log('models stats:', human.models.stats());
|
||||
log('models loaded:', human.models.loaded());
|
||||
log('environment', human.env);
|
||||
log('models stats:', human.getModelStats());
|
||||
log('models loaded:', Object.values(human.models).filter((model) => model !== null).length);
|
||||
status('initializing...');
|
||||
await human.warmup(); // warmup function to initialize backend for future faster detection
|
||||
await webCam(); // start webcam
|
||||
|
|
|
@ -4,4 +4,4 @@
|
|||
author: <https://github.com/vladmandic>'
|
||||
*/
|
||||
|
||||
var e="4.22.0";var s="4.22.0";var t="4.22.0";var n="4.22.0";var r="4.22.0";var i="4.22.0";var h={tfjs:e,"tfjs-core":e,"tfjs-converter":s,"tfjs-backend-cpu":t,"tfjs-backend-webgl":n,"tfjs-backend-wasm":r,"tfjs-backend-webgpu":i};export{h as version};
|
||||
var e="3.21.0";var s="3.21.0";var t="3.21.0";var i="3.21.0";var n="3.21.0";var r="3.21.0";var l="3.21.0";var V={tfjs:e,"tfjs-core":s,"tfjs-data":t,"tfjs-layers":i,"tfjs-converter":n,"tfjs-backend-webgl":r,"tfjs-backend-wasm":l};export{V as version};
|
||||
|
|
|
@ -1,38 +1,37 @@
|
|||
{
|
||||
"antispoof": 853098,
|
||||
"blazeface": 538928,
|
||||
"centernet": 4030290,
|
||||
"emotion": 820516,
|
||||
"facemesh": 1477958,
|
||||
"faceres": 6978814,
|
||||
"handlandmark-lite": 2023432,
|
||||
"handlandmark-full": 5431368,
|
||||
"handtrack": 2964837,
|
||||
"iris": 2599092,
|
||||
"liveness": 592976,
|
||||
"mb3-centernet": 4030290,
|
||||
"models": 0,
|
||||
"movenet-lightning": 4650216,
|
||||
"affectnet-mobilenet": 6920630,
|
||||
"age": 161240,
|
||||
"blazeface-back": 538928,
|
||||
"blazeface-front": 402048,
|
||||
"blazepose-detector": 5928856,
|
||||
"blazepose-full": 6339202,
|
||||
"blazepose-heavy": 27502466,
|
||||
"blazepose-lite": 2726402,
|
||||
"blazepose-detector2d": 7499400,
|
||||
"blazepose-detector3d": 5928856,
|
||||
"blazepose-full": 6338290,
|
||||
"blazepose-heavy": 27501554,
|
||||
"blazepose-lite": 2725490,
|
||||
"efficientpose": 5651240,
|
||||
"faceboxes": 2013002,
|
||||
"facemesh-attention-pinto": 2387598,
|
||||
"facemesh-attention-alt": 2387598,
|
||||
"facemesh-attention": 2382414,
|
||||
"facemesh-detection-full": 1026192,
|
||||
"facemesh-detection-short": 201268,
|
||||
"facemesh-orig": 2955780,
|
||||
"faceres-deep": 13957620,
|
||||
"gear-e1": 112438,
|
||||
"gear-e2": 112438,
|
||||
"gear": 1498916,
|
||||
"gender-ssrnet-imdb": 161236,
|
||||
"gender": 201808,
|
||||
"handdetect": 3515612,
|
||||
"handlandmark-full": 5431368,
|
||||
"handlandmark-lite": 2023432,
|
||||
"handlandmark-sparse": 5286322,
|
||||
"handskeleton": 5502280,
|
||||
"meet": 372228,
|
||||
|
@ -44,6 +43,7 @@
|
|||
"posenet": 5032780,
|
||||
"rvm": 3739355,
|
||||
"selfie": 212886,
|
||||
"blazepose-detect": 5928804,
|
||||
"anti-spoofing": 853098,
|
||||
"efficientpose-i-lite": 2269064,
|
||||
"efficientpose-ii-lite": 5651240,
|
||||
|
|
97
package.json
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "@vladmandic/human",
|
||||
"version": "3.3.6",
|
||||
"version": "2.11.1",
|
||||
"description": "Human: AI-powered 3D Face Detection & Rotation Tracking, Face Description & Recognition, Body Pose Tracking, 3D Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction, Gesture Recognition",
|
||||
"sideEffects": false,
|
||||
"main": "dist/human.node.js",
|
||||
|
@ -8,24 +8,16 @@
|
|||
"browser": "dist/human.esm.js",
|
||||
"types": "types/human.d.ts",
|
||||
"exports": {
|
||||
"node": "./dist/human.node.js",
|
||||
"node": {
|
||||
"require": "./dist/human.node.js",
|
||||
"import": "./dist/human.node.js",
|
||||
"module": "./dist/human.node.js"
|
||||
},
|
||||
"require": "./dist/human.node.js",
|
||||
"import": "./dist/human.esm.js",
|
||||
"script": "./dist/human.js",
|
||||
"module": "./dist/human.esm.js",
|
||||
"types": "./types/human.d.ts",
|
||||
"dist/human": "./dist/human.js",
|
||||
"dist/human.js": "./dist/human.js",
|
||||
"dist/human.esm": "./dist/human.esm.js",
|
||||
"dist/human.esm.js": "./dist/human.esm.js",
|
||||
"dist/human.esm-nobundle": "./dist/human.esm-nobundle.js",
|
||||
"dist/human.esm-nobundle.js": "./dist/human.esm-nobundle.js",
|
||||
"dist/human.node": "./dist/human.node.js",
|
||||
"dist/human.node.js": "./dist/human.node.js",
|
||||
"dist/human.node-wasm": "./dist/human.node-wasm.js",
|
||||
"dist/human.node-wasm.js": "./dist/human.node-wasm.js",
|
||||
"dist/human.node-gpu": "./dist/human.node-gpu.js",
|
||||
"dist/human.node-gpu.js": "./dist/human.node-gpu.js",
|
||||
"require": "./dist/human.node.js",
|
||||
"import": "./dist/human.esm.js"
|
||||
"types": "./types/human.d.ts"
|
||||
},
|
||||
"author": "Vladimir Mandic <mandic00@live.com>",
|
||||
"bugs": {
|
||||
|
@ -46,7 +38,7 @@
|
|||
"clean": "build --profile clean",
|
||||
"build": "rimraf test/build.log && node build.js",
|
||||
"test": "node --no-warnings --unhandled-rejections=strict --trace-uncaught test/node.js",
|
||||
"lint": "eslint *.json *.js src demo test models wiki",
|
||||
"lint": "eslint *.json *.js src demo test models",
|
||||
"scan": "npx auditjs@latest ossi --dev --quiet"
|
||||
},
|
||||
"keywords": [
|
||||
|
@ -74,40 +66,45 @@
|
|||
"tensorflow"
|
||||
],
|
||||
"devDependencies": {
|
||||
"@html-eslint/eslint-plugin": "^0.46.1",
|
||||
"@html-eslint/parser": "^0.46.0",
|
||||
"@microsoft/api-extractor": "^7.52.11",
|
||||
"@tensorflow/tfjs-backend-cpu": "^4.22.0",
|
||||
"@tensorflow/tfjs-backend-wasm": "^4.22.0",
|
||||
"@tensorflow/tfjs-backend-webgl": "^4.22.0",
|
||||
"@tensorflow/tfjs-backend-webgpu": "4.22.0",
|
||||
"@tensorflow/tfjs-converter": "^4.22.0",
|
||||
"@tensorflow/tfjs-core": "^4.22.0",
|
||||
"@tensorflow/tfjs-data": "^4.22.0",
|
||||
"@tensorflow/tfjs-layers": "^4.22.0",
|
||||
"@tensorflow/tfjs-node": "^4.22.0",
|
||||
"@tensorflow/tfjs-node-gpu": "^4.22.0",
|
||||
"@types/emscripten": "^1.40.1",
|
||||
"@types/node": "^24.3.0",
|
||||
"@types/offscreencanvas": "^2019.7.3",
|
||||
"@typescript-eslint/eslint-plugin": "^8.41.0",
|
||||
"@typescript-eslint/parser": "^8.41.0",
|
||||
"@vladmandic/build": "^0.10.3",
|
||||
"@vladmandic/pilogger": "^0.5.2",
|
||||
"@html-eslint/eslint-plugin": "^0.15.0",
|
||||
"@html-eslint/parser": "^0.15.0",
|
||||
"@microsoft/api-extractor": "^7.32.0",
|
||||
"@tensorflow/tfjs": "^3.21.0",
|
||||
"@tensorflow/tfjs-backend-cpu": "^3.21.0",
|
||||
"@tensorflow/tfjs-backend-wasm": "^3.21.0",
|
||||
"@tensorflow/tfjs-backend-webgl": "^3.21.0",
|
||||
"@tensorflow/tfjs-backend-webgpu": "0.0.1-alpha.14",
|
||||
"@tensorflow/tfjs-converter": "^3.21.0",
|
||||
"@tensorflow/tfjs-core": "^3.21.0",
|
||||
"@tensorflow/tfjs-data": "^3.21.0",
|
||||
"@tensorflow/tfjs-layers": "^3.21.0",
|
||||
"@tensorflow/tfjs-node": "^3.21.1",
|
||||
"@tensorflow/tfjs-node-gpu": "^3.21.0",
|
||||
"@tensorflow/tfjs-tflite": "0.0.1-alpha.9",
|
||||
"@types/emscripten": "^1.39.6",
|
||||
"@types/node": "^18.8.3",
|
||||
"@types/offscreencanvas": "^2019.7.0",
|
||||
"@typescript-eslint/eslint-plugin": "^5.39.0",
|
||||
"@typescript-eslint/parser": "^5.39.0",
|
||||
"@vladmandic/build": "^0.7.14",
|
||||
"@vladmandic/pilogger": "^0.4.6",
|
||||
"@vladmandic/tfjs": "github:vladmandic/tfjs",
|
||||
"canvas": "^3.2.0",
|
||||
"esbuild": "^0.25.9",
|
||||
"eslint": "8.57.0",
|
||||
"@webgpu/types": "^0.1.22",
|
||||
"canvas": "^2.10.1",
|
||||
"esbuild": "^0.15.10",
|
||||
"eslint": "8.25.0",
|
||||
"eslint-config-airbnb-base": "^15.0.0",
|
||||
"eslint-plugin-html": "^8.1.3",
|
||||
"eslint-plugin-import": "^2.32.0",
|
||||
"eslint-plugin-json": "^4.0.1",
|
||||
"eslint-plugin-markdown": "^5.1.0",
|
||||
"eslint-plugin-html": "^7.1.0",
|
||||
"eslint-plugin-import": "^2.26.0",
|
||||
"eslint-plugin-json": "^3.1.0",
|
||||
"eslint-plugin-node": "^11.1.0",
|
||||
"eslint-plugin-promise": "^7.2.1",
|
||||
"rimraf": "^6.0.1",
|
||||
"tslib": "^2.8.1",
|
||||
"typedoc": "0.28.11",
|
||||
"typescript": "5.9.2"
|
||||
"eslint-plugin-promise": "^6.0.1",
|
||||
"long": "^5.2.0",
|
||||
"node-fetch": "^3.2.10",
|
||||
"rimraf": "^3.0.2",
|
||||
"seedrandom": "^3.0.5",
|
||||
"tslib": "^2.4.0",
|
||||
"typedoc": "0.23.15",
|
||||
"typescript": "4.8.4"
|
||||
}
|
||||
}
|
||||
|
|
Before Width: | Height: | Size: 98 KiB |
Before Width: | Height: | Size: 164 KiB After Width: | Height: | Size: 178 KiB |
Before Width: | Height: | Size: 150 KiB After Width: | Height: | Size: 145 KiB |
Before Width: | Height: | Size: 59 KiB After Width: | Height: | Size: 50 KiB |
|
@ -2,47 +2,64 @@
|
|||
* BlazePose model implementation
|
||||
*/
|
||||
|
||||
import * as tf from 'dist/tfjs.esm.js';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import { loadModel } from '../tfjs/load';
|
||||
import { constants } from '../tfjs/constants';
|
||||
import { log, now } from '../util/util';
|
||||
import type { BodyKeypoint, BodyResult, BodyLandmark, Box, Point, BodyAnnotation } from '../result';
|
||||
import type { GraphModel, Tensor, Tensor4D } from '../tfjs/types';
|
||||
import type { GraphModel, Tensor } from '../tfjs/types';
|
||||
import type { Config } from '../config';
|
||||
import * as coords from './blazeposecoords';
|
||||
import { loadDetector, detectBoxes, DetectedBox } from './blazeposedetector';
|
||||
import * as detect from './blazeposedetector';
|
||||
import * as box from '../util/box';
|
||||
import { env } from '../util/env';
|
||||
|
||||
const env = { initial: true };
|
||||
// const models: [GraphModel | null, GraphModel | null] = [null, null];
|
||||
let model: GraphModel | null;
|
||||
let inputSize = 256;
|
||||
const models: { detector: GraphModel | null, landmarks: GraphModel | null } = { detector: null, landmarks: null };
|
||||
const inputSize: { detector: [number, number], landmarks: [number, number] } = { detector: [224, 224], landmarks: [256, 256] };
|
||||
let skipped = Number.MAX_SAFE_INTEGER;
|
||||
const outputNodes: { detector: string[], landmarks: string[] } = {
|
||||
landmarks: ['ld_3d', 'activation_segmentation', 'activation_heatmap', 'world_3d', 'output_poseflag'],
|
||||
detector: [],
|
||||
};
|
||||
|
||||
const cache: BodyResult[] = [];
|
||||
let cache: BodyResult | null = null;
|
||||
let cropBox: Box | undefined;
|
||||
let padding: [number, number][] = [[0, 0], [0, 0], [0, 0], [0, 0]];
|
||||
let lastTime = 0;
|
||||
|
||||
const sigmoid = (x) => (1 - (1 / (1 + Math.exp(x))));
|
||||
|
||||
export const loadDetect = (config: Config): Promise<GraphModel> => loadDetector(config);
|
||||
|
||||
export async function loadPose(config: Config): Promise<GraphModel> {
|
||||
if (env.initial) model = null;
|
||||
if (!model) {
|
||||
model = await loadModel(config.body.modelPath);
|
||||
const inputs = model?.['executor'] ? Object.values(model.modelSignature['inputs']) : undefined;
|
||||
// @ts-ignore model signature properties are not typed and inputs are unreliable for this model
|
||||
inputSize = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[1].size) : 0;
|
||||
} else if (config.debug) log('cached model:', model['modelUrl']);
|
||||
return model;
|
||||
export async function loadDetect(config: Config): Promise<GraphModel> {
|
||||
if (env.initial) models.detector = null;
|
||||
if (!models.detector && config.body['detector'] && config.body['detector'].modelPath || '') {
|
||||
models.detector = await loadModel(config.body['detector'].modelPath);
|
||||
const inputs = models.detector?.['executor'] ? Object.values(models.detector.modelSignature['inputs']) : undefined;
|
||||
inputSize.detector[0] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[1].size) : 0;
|
||||
inputSize.detector[1] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : 0;
|
||||
} else if (config.debug && models.detector) log('cached model:', models.detector['modelUrl']);
|
||||
detect.createAnchors();
|
||||
return models.detector as GraphModel;
|
||||
}
|
||||
|
||||
function prepareImage(input: Tensor4D, size: number, cropBox?: Box): Tensor {
|
||||
export async function loadPose(config: Config): Promise<GraphModel> {
|
||||
if (env.initial) models.landmarks = null;
|
||||
if (!models.landmarks) {
|
||||
models.landmarks = await loadModel(config.body.modelPath);
|
||||
const inputs = models.landmarks?.['executor'] ? Object.values(models.landmarks.modelSignature['inputs']) : undefined;
|
||||
inputSize.landmarks[0] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[1].size) : 0;
|
||||
inputSize.landmarks[1] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : 0;
|
||||
} else if (config.debug) log('cached model:', models.landmarks['modelUrl']);
|
||||
return models.landmarks;
|
||||
}
|
||||
|
||||
export async function load(config: Config): Promise<[GraphModel | null, GraphModel | null]> {
|
||||
if (!models.detector) await loadDetect(config);
|
||||
if (!models.landmarks) await loadPose(config);
|
||||
return [models.detector, models.landmarks];
|
||||
}
|
||||
|
||||
function prepareImage(input: Tensor, size: number): Tensor {
|
||||
const t: Record<string, Tensor> = {};
|
||||
if (!input?.shape?.[1] || !input?.shape?.[2]) return input;
|
||||
let final: Tensor;
|
||||
|
@ -65,10 +82,10 @@ function prepareImage(input: Tensor4D, size: number, cropBox?: Box): Tensor {
|
|||
[0, 0], // dont touch rbg
|
||||
];
|
||||
t.pad = tf.pad(t.cropped || input, padding); // use cropped box if it exists
|
||||
t.resize = tf.image.resizeBilinear(t.pad as Tensor4D, [size, size]);
|
||||
t.resize = tf.image.resizeBilinear(t.pad, [size, size]);
|
||||
final = tf.div(t.resize, constants.tf255);
|
||||
} else if (input.shape[1] !== size) { // if input needs resizing
|
||||
t.resize = tf.image.resizeBilinear(t.cropped as Tensor4D || input, [size, size]);
|
||||
t.resize = tf.image.resizeBilinear(t.cropped || input, [size, size]);
|
||||
final = tf.div(t.resize, constants.tf255);
|
||||
} else { // if input is already in a correct resolution just normalize it
|
||||
final = tf.div(t.cropped || input, constants.tf255);
|
||||
|
@ -77,7 +94,7 @@ function prepareImage(input: Tensor4D, size: number, cropBox?: Box): Tensor {
|
|||
return final;
|
||||
}
|
||||
|
||||
function rescaleKeypoints(keypoints: BodyKeypoint[], outputSize: [number, number], cropBox?: Box): BodyKeypoint[] {
|
||||
function rescaleKeypoints(keypoints: BodyKeypoint[], outputSize: [number, number]): BodyKeypoint[] {
|
||||
for (const kpt of keypoints) { // first rescale due to padding
|
||||
kpt.position = [
|
||||
Math.trunc(kpt.position[0] * (outputSize[0] + padding[2][0] + padding[2][1]) / outputSize[0] - padding[2][0]),
|
||||
|
@ -87,12 +104,10 @@ function rescaleKeypoints(keypoints: BodyKeypoint[], outputSize: [number, number
|
|||
kpt.positionRaw = [kpt.position[0] / outputSize[0], kpt.position[1] / outputSize[1], 2 * (kpt.position[2] as number) / (outputSize[0] + outputSize[1])];
|
||||
}
|
||||
if (cropBox) { // second rescale due to cropping
|
||||
const width = cropBox[2] - cropBox[0];
|
||||
const height = cropBox[3] - cropBox[1];
|
||||
for (const kpt of keypoints) {
|
||||
kpt.positionRaw = [
|
||||
kpt.positionRaw[0] / height + cropBox[1], // correct offset due to crop
|
||||
kpt.positionRaw[1] / width + cropBox[0], // correct offset due to crop
|
||||
kpt.positionRaw[0] + cropBox[1], // correct offset due to crop
|
||||
kpt.positionRaw[1] + cropBox[0], // correct offset due to crop
|
||||
kpt.positionRaw[2] as number,
|
||||
];
|
||||
kpt.position = [
|
||||
|
@ -125,9 +140,9 @@ async function detectLandmarks(input: Tensor, config: Config, outputSize: [numbe
|
|||
* t.world: 39 keypoints [x,y,z] normalized to -1..1
|
||||
* t.poseflag: body score
|
||||
*/
|
||||
if (!model?.['executor']) return null;
|
||||
if (!models.landmarks?.['executor']) return null;
|
||||
const t: Record<string, Tensor> = {};
|
||||
[t.ld/* 1,195(39*5) */, t.segmentation/* 1,256,256,1 */, t.heatmap/* 1,64,64,39 */, t.world/* 1,117(39*3) */, t.poseflag/* 1,1 */] = model?.execute(input, outputNodes.landmarks) as Tensor[]; // run model
|
||||
[t.ld/* 1,195(39*5) */, t.segmentation/* 1,256,256,1 */, t.heatmap/* 1,64,64,39 */, t.world/* 1,117(39*3) */, t.poseflag/* 1,1 */] = models.landmarks?.execute(input, outputNodes.landmarks) as Tensor[]; // run model
|
||||
const poseScore = (await t.poseflag.data())[0];
|
||||
const points = await t.ld.data();
|
||||
const distances = await t.world.data();
|
||||
|
@ -138,7 +153,7 @@ async function detectLandmarks(input: Tensor, config: Config, outputSize: [numbe
|
|||
const score = sigmoid(points[depth * i + 3]);
|
||||
const presence = sigmoid(points[depth * i + 4]);
|
||||
const adjScore = Math.trunc(100 * score * presence * poseScore) / 100;
|
||||
const positionRaw: Point = [points[depth * i + 0] / inputSize, points[depth * i + 1] / inputSize, points[depth * i + 2] + 0];
|
||||
const positionRaw: Point = [points[depth * i + 0] / inputSize.landmarks[0], points[depth * i + 1] / inputSize.landmarks[1], points[depth * i + 2] + 0];
|
||||
const position: Point = [Math.trunc(outputSize[0] * positionRaw[0]), Math.trunc(outputSize[1] * positionRaw[1]), positionRaw[2] as number];
|
||||
const distance: Point = [distances[depth * i + 0], distances[depth * i + 1], distances[depth * i + 2] + 0];
|
||||
keypointsRelative.push({ part: coords.kpt[i] as BodyLandmark, positionRaw, position, distance, score: adjScore });
|
||||
|
@ -162,31 +177,52 @@ async function detectLandmarks(input: Tensor, config: Config, outputSize: [numbe
|
|||
return body;
|
||||
}
|
||||
|
||||
export async function predict(input: Tensor4D, config: Config): Promise<BodyResult[]> {
|
||||
/*
|
||||
interface DetectedBox { box: Box, boxRaw: Box, score: number }
|
||||
|
||||
function rescaleBoxes(boxes: Array<DetectedBox>, outputSize: [number, number]): Array<DetectedBox> {
|
||||
for (const b of boxes) {
|
||||
b.box = [
|
||||
Math.trunc(b.box[0] * (outputSize[0] + padding[2][0] + padding[2][1]) / outputSize[0]),
|
||||
Math.trunc(b.box[1] * (outputSize[1] + padding[1][0] + padding[1][1]) / outputSize[1]),
|
||||
Math.trunc(b.box[2] * (outputSize[0] + padding[2][0] + padding[2][1]) / outputSize[0]),
|
||||
Math.trunc(b.box[3] * (outputSize[1] + padding[1][0] + padding[1][1]) / outputSize[1]),
|
||||
];
|
||||
b.boxRaw = [b.box[0] / outputSize[0], b.box[1] / outputSize[1], b.box[2] / outputSize[0], b.box[3] / outputSize[1]];
|
||||
}
|
||||
return boxes;
|
||||
}
|
||||
|
||||
async function detectBoxes(input: Tensor, config: Config, outputSize: [number, number]) {
|
||||
const t: Record<string, Tensor> = {};
|
||||
t.res = models.detector?.execute(input, ['Identity']) as Tensor; //
|
||||
t.logitsRaw = tf.slice(t.res, [0, 0, 0], [1, -1, 1]);
|
||||
t.boxesRaw = tf.slice(t.res, [0, 0, 1], [1, -1, -1]);
|
||||
t.logits = tf.squeeze(t.logitsRaw);
|
||||
t.boxes = tf.squeeze(t.boxesRaw);
|
||||
const boxes = await detect.decode(t.boxes, t.logits, config, outputSize);
|
||||
rescaleBoxes(boxes, outputSize);
|
||||
Object.keys(t).forEach((tensor) => tf.dispose(t[tensor]));
|
||||
return boxes;
|
||||
}
|
||||
*/
|
||||
|
||||
export async function predict(input: Tensor, config: Config): Promise<BodyResult[]> {
|
||||
const outputSize: [number, number] = [input.shape[2] || 0, input.shape[1] || 0];
|
||||
const skipTime = (config.body.skipTime || 0) > (now() - lastTime);
|
||||
const skipFrame = skipped < (config.body.skipFrames || 0);
|
||||
if (config.skipAllowed && skipTime && skipFrame && cache !== null) {
|
||||
skipped++;
|
||||
} else {
|
||||
let boxes: DetectedBox[] = [];
|
||||
if (config.body?.['detector']?.['enabled']) {
|
||||
const preparedImage = prepareImage(input, 224);
|
||||
boxes = await detectBoxes(preparedImage, config, outputSize);
|
||||
tf.dispose(preparedImage);
|
||||
} else {
|
||||
boxes = [{ box: [0, 0, 0, 0] as Box, boxRaw: [0, 0, 1, 1], score: 0 }]; // running without detector
|
||||
}
|
||||
for (let i = 0; i < boxes.length; i++) {
|
||||
const preparedBox = prepareImage(input, 256, boxes[i]?.boxRaw); // padded and resized
|
||||
cache.length = 0;
|
||||
const bodyResult = await detectLandmarks(preparedBox, config, outputSize);
|
||||
tf.dispose(preparedBox);
|
||||
if (!bodyResult) continue;
|
||||
bodyResult.id = i;
|
||||
// bodyResult.score = 0; // TBD
|
||||
cache.push(bodyResult);
|
||||
const t: Record<string, Tensor> = {};
|
||||
/*
|
||||
if (config.body['detector'] && config.body['detector']['enabled']) {
|
||||
t.detector = await prepareImage(input, 224);
|
||||
const boxes = await detectBoxes(t.detector, config, outputSize);
|
||||
}
|
||||
*/
|
||||
t.landmarks = prepareImage(input, 256); // padded and resized
|
||||
cache = await detectLandmarks(t.landmarks, config, outputSize);
|
||||
/*
|
||||
cropBox = [0, 0, 1, 1]; // reset crop coordinates
|
||||
if (cache?.boxRaw && config.skipAllowed) {
|
||||
|
@ -201,8 +237,9 @@ export async function predict(input: Tensor4D, config: Config): Promise<BodyResu
|
|||
}
|
||||
}
|
||||
*/
|
||||
Object.keys(t).forEach((tensor) => tf.dispose(t[tensor]));
|
||||
lastTime = now();
|
||||
skipped = 0;
|
||||
}
|
||||
return cache;
|
||||
return cache ? [cache] : [];
|
||||
}
|
||||
|
|
|
@ -1,15 +1,11 @@
|
|||
import * as tf from 'dist/tfjs.esm.js';
|
||||
import { log } from '../util/util';
|
||||
import { env } from '../util/env';
|
||||
import { loadModel } from '../tfjs/load';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import type { Tensor } from '../tfjs/types';
|
||||
import type { Box } from '../result';
|
||||
import type { Config } from '../config';
|
||||
import type { GraphModel, Tensor, Tensor1D, Tensor2D } from '../tfjs/types';
|
||||
|
||||
export interface DetectedBox { box: Box, boxRaw: Box, score: number }
|
||||
interface DetectedBox { box: Box, boxRaw: Box, score: number }
|
||||
|
||||
let model: GraphModel | null;
|
||||
let inputSize = 224;
|
||||
const inputSize = 224;
|
||||
let anchorTensor: { x, y };
|
||||
const numLayers = 5;
|
||||
const strides = [8, 16, 32, 32, 32];
|
||||
|
@ -39,20 +35,8 @@ export function createAnchors() {
|
|||
anchorTensor = { x: tf.tensor1d(anchors.map((a) => a.x)), y: tf.tensor1d(anchors.map((a) => a.y)) };
|
||||
}
|
||||
|
||||
export async function loadDetector(config: Config): Promise<GraphModel> {
|
||||
if (env.initial) model = null;
|
||||
if (!model && config.body['detector'] && config.body['detector'].modelPath || '') {
|
||||
model = await loadModel(config.body['detector'].modelPath);
|
||||
const inputs = model?.['executor'] ? Object.values(model.modelSignature['inputs']) : undefined;
|
||||
// @ts-ignore model signature properties are not typed and inputs are unreliable for this model
|
||||
inputSize = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[1].size) : 0;
|
||||
} else if (config.debug && model) log('cached model:', model['modelUrl']);
|
||||
createAnchors();
|
||||
return model as GraphModel;
|
||||
}
|
||||
|
||||
const cropFactor = [5.0, 5.0];
|
||||
export function decodeBoxes(boxesTensor, anchor) {
|
||||
function decodeBoxes(boxesTensor, anchor): Tensor {
|
||||
return tf.tidy(() => {
|
||||
const split = tf.split(boxesTensor, 12, 1); // first 4 are box data [x,y,w,h] and 4 are keypoints data [x,y] for total of 12
|
||||
let xCenter = tf.squeeze(split[0]);
|
||||
|
@ -65,41 +49,39 @@ export function decodeBoxes(boxesTensor, anchor) {
|
|||
height = tf.mul(tf.div(height, inputSize), cropFactor[1]);
|
||||
const xMin = tf.sub(xCenter, tf.div(width, 2));
|
||||
const yMin = tf.sub(yCenter, tf.div(height, 2));
|
||||
const xMax = tf.add(xMin, width);
|
||||
const yMax = tf.add(yMin, height);
|
||||
const boxes = tf.stack([xMin, yMin, xMax, yMax], 1);
|
||||
const boxes = tf.stack([xMin, yMin, width, height], 1);
|
||||
return boxes;
|
||||
});
|
||||
}
|
||||
|
||||
async function decodeResults(boxesTensor: Tensor, logitsTensor: Tensor, config: Config, outputSize: [number, number]): Promise<DetectedBox[]> {
|
||||
const detectedBoxes: DetectedBox[] = [];
|
||||
export async function decode(boxesTensor: Tensor, logitsTensor: Tensor, config: Config, outputSize: [number, number]): Promise<DetectedBox[]> {
|
||||
const t: Record<string, Tensor> = {};
|
||||
t.boxes = decodeBoxes(boxesTensor, anchorTensor);
|
||||
t.scores = tf.sigmoid(logitsTensor);
|
||||
t.nms = await tf.image.nonMaxSuppressionAsync(t.boxes as Tensor2D, t.scores as Tensor1D, 1, config.body['detector']?.minConfidence || 0.1, config.body['detector']?.iouThreshold || 0.1);
|
||||
const nms = await t.nms.data();
|
||||
t.argmax = tf.argMax(t.scores);
|
||||
const i = (await t.argmax.data())[0];
|
||||
const scores = await t.scores.data();
|
||||
const detected: { box: Box, boxRaw: Box, score: number }[] = [];
|
||||
const minScore = config.body?.['detector']?.minConfidence || 0;
|
||||
if (scores[i] >= minScore) {
|
||||
const boxes = await t.boxes.array();
|
||||
for (const i of Array.from(nms)) {
|
||||
const score = scores[i];
|
||||
const boxRaw: Box = boxes[i];
|
||||
const box: Box = [Math.round(boxRaw[0] * outputSize[0]), Math.round(boxRaw[1] * outputSize[1]), Math.round(boxRaw[2] * outputSize[0]), Math.round(boxRaw[3] * outputSize[1])];
|
||||
const detectedBox: DetectedBox = { score, boxRaw, box };
|
||||
detectedBoxes.push(detectedBox);
|
||||
const box: Box = [boxRaw[0] * outputSize[0], boxRaw[1] * outputSize[1], boxRaw[2] * outputSize[0], boxRaw[3] * outputSize[1]];
|
||||
// console.log(box);
|
||||
detected.push({ box, boxRaw, score: scores[i] });
|
||||
}
|
||||
/*
|
||||
t.nms = await tf.image.nonMaxSuppressionAsync(t.boxes, t.scores, 1, config.body.detector?.minConfidence || 0.1, config.body.detector?.iouThreshold || 0.1);
|
||||
const boxes = t.boxes.arraySync();
|
||||
const scores = t.scores.dataSync();
|
||||
const nms = t.nms.dataSync();
|
||||
const detected: Array<DetectedBox> = [];
|
||||
for (const i of Array.from(nms)) {
|
||||
const boxRaw: Box = boxes[i];
|
||||
const box: Box = [boxRaw[0] * outputSize[0], boxRaw[0] * outputSize[1], boxRaw[3] * outputSize[0], boxRaw[2] * outputSize[1]];
|
||||
detected.push({ box, boxRaw, score: scores[i] });
|
||||
}
|
||||
*/
|
||||
Object.keys(t).forEach((tensor) => tf.dispose(t[tensor]));
|
||||
return detectedBoxes;
|
||||
}
|
||||
|
||||
export async function detectBoxes(input: Tensor, config: Config, outputSize: [number, number]) {
|
||||
const t: Record<string, Tensor> = {};
|
||||
t.res = model?.execute(input, ['Identity']) as Tensor; //
|
||||
t.logitsRaw = tf.slice(t.res, [0, 0, 0], [1, -1, 1]);
|
||||
t.boxesRaw = tf.slice(t.res, [0, 0, 1], [1, -1, -1]);
|
||||
t.logits = tf.squeeze(t.logitsRaw);
|
||||
t.boxes = tf.squeeze(t.boxesRaw);
|
||||
const boxes = await decodeResults(t.boxes, t.logits, config, outputSize);
|
||||
Object.keys(t).forEach((tensor) => tf.dispose(t[tensor]));
|
||||
return boxes;
|
||||
return detected;
|
||||
}
|
||||
|
|
|
@ -4,13 +4,13 @@
|
|||
* Based on: [**EfficientPose**](https://github.com/daniegr/EfficientPose)
|
||||
*/
|
||||
|
||||
import * as tf from 'dist/tfjs.esm.js';
|
||||
import { log, now } from '../util/util';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import { loadModel } from '../tfjs/load';
|
||||
import * as coords from './efficientposecoords';
|
||||
import { constants } from '../tfjs/constants';
|
||||
import type { BodyResult, Point, BodyLandmark, BodyAnnotation } from '../result';
|
||||
import type { GraphModel, Tensor4D } from '../tfjs/types';
|
||||
import type { GraphModel, Tensor } from '../tfjs/types';
|
||||
import type { Config } from '../config';
|
||||
import { env } from '../util/env';
|
||||
|
||||
|
@ -50,8 +50,8 @@ async function max2d(inputs, minScore): Promise<[number, number, number]> {
|
|||
return [0, 0, newScore];
|
||||
}
|
||||
|
||||
export async function predict(image: Tensor4D, config: Config): Promise<BodyResult[]> {
|
||||
if (!model?.['executor'] || !model?.inputs[0].shape) return [];
|
||||
export async function predict(image: Tensor, config: Config): Promise<BodyResult[]> {
|
||||
if (!model?.['executor']) return [];
|
||||
const skipTime = (config.body.skipTime || 0) > (now() - lastTime);
|
||||
const skipFrame = skipped < (config.body.skipFrames || 0);
|
||||
if (config.skipAllowed && skipTime && skipFrame && Object.keys(cache.keypoints).length > 0) {
|
||||
|
@ -61,7 +61,8 @@ export async function predict(image: Tensor4D, config: Config): Promise<BodyResu
|
|||
skipped = 0;
|
||||
return new Promise(async (resolve) => {
|
||||
const tensor = tf.tidy(() => {
|
||||
const resize = tf.image.resizeBilinear(image, [model?.inputs[0].shape?.[2] || 0, model?.inputs[0].shape?.[1] || 0], false);
|
||||
if (!model?.inputs[0].shape) return null;
|
||||
const resize = tf.image.resizeBilinear(image, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
|
||||
const enhance = tf.mul(resize, constants.tf2);
|
||||
const norm = tf.sub(enhance, constants.tf1);
|
||||
return norm;
|
||||
|
|
|
@ -4,9 +4,9 @@
|
|||
* Based on: [**MoveNet**](https://blog.tensorflow.org/2021/05/next-generation-pose-detection-with-movenet-and-tensorflowjs.html)
|
||||
*/
|
||||
|
||||
import * as tf from 'dist/tfjs.esm.js';
|
||||
import { log, now } from '../util/util';
|
||||
import * as box from '../util/box';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import * as coords from './movenetcoords';
|
||||
import * as fix from './movenetfix';
|
||||
import { loadModel } from '../tfjs/load';
|
||||
|
@ -39,8 +39,6 @@ export async function load(config: Config): Promise<GraphModel> {
|
|||
} else if (config.debug) log('cached model:', model['modelUrl']);
|
||||
inputSize = (model?.['executor'] && model?.inputs?.[0].shape) ? model.inputs[0].shape[2] : 0;
|
||||
if (inputSize < 64) inputSize = 256;
|
||||
// @ts-ignore private property
|
||||
if (tf.env().flagRegistry.WEBGL_USE_SHAPES_UNIFORMS) tf.env().set('WEBGL_USE_SHAPES_UNIFORMS', false); // default=false <https://github.com/tensorflow/tfjs/issues/5205>
|
||||
return model;
|
||||
}
|
||||
|
||||
|
@ -86,8 +84,8 @@ function parseMultiPose(res, config, image) {
|
|||
const bodies: BodyResult[] = [];
|
||||
for (let id = 0; id < res[0].length; id++) {
|
||||
const kpt = res[0][id];
|
||||
const boxScore = Math.round(100 * kpt[51 + 4]) / 100;
|
||||
if (boxScore > config.body.minConfidence) {
|
||||
const totalScore = Math.round(100 * kpt[51 + 4]) / 100;
|
||||
if (totalScore > config.body.minConfidence) {
|
||||
const keypoints: BodyKeypoint[] = [];
|
||||
for (let i = 0; i < 17; i++) {
|
||||
const score = kpt[3 * i + 2];
|
||||
|
@ -101,10 +99,10 @@ function parseMultiPose(res, config, image) {
|
|||
});
|
||||
}
|
||||
}
|
||||
// const newBox = box.calc(keypoints.map((pt) => pt.position), [image.shape[2], image.shape[1]]);
|
||||
const newBox = box.calc(keypoints.map((pt) => pt.position), [image.shape[2], image.shape[1]]);
|
||||
// movenet-multipose has built-in box details
|
||||
const boxRaw: Box = [kpt[51 + 1], kpt[51 + 0], kpt[51 + 3] - kpt[51 + 1], kpt[51 + 2] - kpt[51 + 0]];
|
||||
const boxNorm: Box = [Math.trunc(boxRaw[0] * (image.shape[2] || 0)), Math.trunc(boxRaw[1] * (image.shape[1] || 0)), Math.trunc(boxRaw[2] * (image.shape[2] || 0)), Math.trunc(boxRaw[3] * (image.shape[1] || 0))];
|
||||
// const boxRaw: Box = [kpt[51 + 1], kpt[51 + 0], kpt[51 + 3] - kpt[51 + 1], kpt[51 + 2] - kpt[51 + 0]];
|
||||
// const box: Box = [Math.trunc(boxRaw[0] * (image.shape[2] || 0)), Math.trunc(boxRaw[1] * (image.shape[1] || 0)), Math.trunc(boxRaw[2] * (image.shape[2] || 0)), Math.trunc(boxRaw[3] * (image.shape[1] || 0))];
|
||||
const annotations: Record<BodyAnnotation, Point[][]> = {} as Record<BodyAnnotation, Point[][]>;
|
||||
for (const [name, indexes] of Object.entries(coords.connected)) {
|
||||
const pt: Point[][] = [];
|
||||
|
@ -115,8 +113,7 @@ function parseMultiPose(res, config, image) {
|
|||
}
|
||||
annotations[name] = pt;
|
||||
}
|
||||
// const body: BodyResult = { id, score: totalScore, box: newBox.box, boxRaw: newBox.boxRaw, keypoints: [...keypoints], annotations };
|
||||
const body: BodyResult = { id, score: boxScore, box: boxNorm, boxRaw, keypoints: [...keypoints], annotations };
|
||||
const body: BodyResult = { id, score: totalScore, box: newBox.box, boxRaw: newBox.boxRaw, keypoints: [...keypoints], annotations };
|
||||
fix.bodyParts(body);
|
||||
bodies.push(body);
|
||||
}
|
||||
|
@ -138,6 +135,39 @@ export async function predict(input: Tensor, config: Config): Promise<BodyResult
|
|||
return new Promise(async (resolve) => {
|
||||
const t: Record<string, Tensor> = {};
|
||||
skipped = 0;
|
||||
// run detection on squared input and cached boxes
|
||||
/*
|
||||
cache.bodies = []; // reset bodies result
|
||||
if (cache.boxes.length >= (config.body.maxDetected || 0)) { // if we have enough cached boxes run detection using cache
|
||||
for (let i = 0; i < cache.boxes.length; i++) { // run detection based on cached boxes
|
||||
t.crop = tf.image.cropAndResize(input, [cache.boxes[i]], [0], [inputSize, inputSize], 'bilinear');
|
||||
t.cast = tf.cast(t.crop, 'int32');
|
||||
// t.input = prepareImage(input);
|
||||
t.res = model?.execute(t.cast) as Tensor;
|
||||
const res = await t.res.array();
|
||||
const newBodies = (t.res.shape[2] === 17) ? await parseSinglePose(res, config, input, cache.boxes[i]) : await parseMultiPose(res, config, input, cache.boxes[i]);
|
||||
cache.bodies = cache.bodies.concat(newBodies);
|
||||
Object.keys(t).forEach((tensor) => tf.dispose(t[tensor]));
|
||||
}
|
||||
}
|
||||
if (cache.bodies.length !== config.body.maxDetected) { // did not find enough bodies based on cached boxes so run detection on full frame
|
||||
t.input = prepareImage(input);
|
||||
t.res = model?.execute(t.input) as Tensor;
|
||||
const res = await t.res.array();
|
||||
cache.bodies = (t.res.shape[2] === 17) ? await parseSinglePose(res, config, input, [0, 0, 1, 1]) : await parseMultiPose(res, config, input, [0, 0, 1, 1]);
|
||||
for (const body of cache.bodies) rescaleBody(body, [input.shape[2] || 1, input.shape[1] || 1]);
|
||||
Object.keys(t).forEach((tensor) => tf.dispose(t[tensor]));
|
||||
}
|
||||
cache.boxes.length = 0; // reset cache
|
||||
for (let i = 0; i < cache.bodies.length; i++) {
|
||||
if (cache.bodies[i].keypoints.length > (coords.kpt.length / 2)) { // only update cache if we detected at least half keypoints
|
||||
const scaledBox = box.scale(cache.bodies[i].boxRaw, boxExpandFact);
|
||||
const cropBox = box.crop(scaledBox);
|
||||
cache.boxes.push(cropBox);
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
// run detection on squared input and no cached boxes
|
||||
t.input = fix.padInput(input, inputSize);
|
||||
t.res = model?.execute(t.input) as Tensor;
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
import * as tf from 'dist/tfjs.esm.js';
|
||||
import type { BodyKeypoint, BodyResult } from '../result';
|
||||
import * as box from '../util/box';
|
||||
import * as coords from './movenetcoords';
|
||||
import type { Tensor, Tensor3D } from '../tfjs/types';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import type { Tensor } from '../tfjs/types';
|
||||
|
||||
const maxJitter = 0.005; // default allowed jitter is within 0.5%
|
||||
|
||||
|
@ -83,7 +83,7 @@ export function padInput(input: Tensor, inputSize: number): Tensor {
|
|||
[0, 0], // dont touch rbg
|
||||
];
|
||||
t.pad = tf.pad(input, cache.padding);
|
||||
t.resize = tf.image.resizeBilinear(t.pad as Tensor3D, [inputSize, inputSize]);
|
||||
t.resize = tf.image.resizeBilinear(t.pad, [inputSize, inputSize]);
|
||||
const final = tf.cast(t.resize, 'int32');
|
||||
Object.keys(t).forEach((tensor) => tf.dispose(t[tensor]));
|
||||
return final;
|
||||
|
|
|
@ -4,11 +4,11 @@
|
|||
* Based on: [**PoseNet**](https://medium.com/tensorflow/real-time-human-pose-estimation-in-the-browser-with-tensorflow-js-7dd0bc881cd5)
|
||||
*/
|
||||
|
||||
import * as tf from 'dist/tfjs.esm.js';
|
||||
import { log } from '../util/util';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import { loadModel } from '../tfjs/load';
|
||||
import type { BodyResult, BodyLandmark, Box } from '../result';
|
||||
import type { Tensor, GraphModel, Tensor4D } from '../tfjs/types';
|
||||
import type { Tensor, GraphModel } from '../tfjs/types';
|
||||
import type { Config } from '../config';
|
||||
import { env } from '../util/env';
|
||||
import * as utils from './posenetutils';
|
||||
|
@ -155,7 +155,7 @@ export function decode(offsets, scores, displacementsFwd, displacementsBwd, maxD
|
|||
return poses;
|
||||
}
|
||||
|
||||
export async function predict(input: Tensor4D, config: Config): Promise<BodyResult[]> {
|
||||
export async function predict(input: Tensor, config: Config): Promise<BodyResult[]> {
|
||||
/** posenet is mostly obsolete
|
||||
* caching is not implemented
|
||||
*/
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/* eslint-disable no-multi-spaces */
|
||||
|
||||
/** Possible TensorFlow backends */
|
||||
export type BackendEnum = '' | 'cpu' | 'wasm' | 'webgl' | 'humangl' | 'tensorflow' | 'webgpu' | 'none';
|
||||
export type BackendEnum = '' | 'cpu' | 'wasm' | 'webgl' | 'humangl' | 'tensorflow' | 'webgpu';
|
||||
|
||||
/** Possible values for `human.warmup` */
|
||||
export type WarmupEnum = '' | 'none' | 'face' | 'full' | 'body';
|
||||
|
@ -33,14 +33,8 @@ export interface FaceDetectorConfig extends GenericConfig {
|
|||
maxDetected: number,
|
||||
/** minimum confidence for a detected face before results are discarded */
|
||||
minConfidence: number,
|
||||
/** minimum size in pixels of a detected face box before resutls are discared */
|
||||
minSize: number,
|
||||
/** minimum overlap between two detected faces before one is discarded */
|
||||
iouThreshold: number,
|
||||
/** how much should face box be enlarged over the min/max facial coordinates */
|
||||
scale: number,
|
||||
/** automatically pad image to square */
|
||||
square: boolean,
|
||||
/** should child models perform on masked image of a face */
|
||||
mask: boolean,
|
||||
/** should face detection return processed and cropped face tensor that can with an external model for addtional processing?
|
||||
|
@ -55,10 +49,7 @@ export interface FaceMeshConfig extends GenericConfig {
|
|||
}
|
||||
|
||||
/** Iris part of face configuration */
|
||||
export interface FaceIrisConfig extends GenericConfig {
|
||||
/** how much should iris box be enlarged over the min/max iris coordinates */
|
||||
scale: number,
|
||||
}
|
||||
export interface FaceIrisConfig extends GenericConfig {}
|
||||
|
||||
/** Attention part of face configuration */
|
||||
export interface FaceAttentionConfig extends GenericConfig {}
|
||||
|
@ -195,8 +186,6 @@ export interface FilterConfig {
|
|||
return: boolean,
|
||||
/** flip input as mirror image */
|
||||
flip: boolean,
|
||||
/** apply auto-brighness */
|
||||
autoBrightness: boolean,
|
||||
/** range: -1 (darken) to 1 (lighten) */
|
||||
brightness: number,
|
||||
/** range: -1 (reduce contrast) to 1 (increase contrast) */
|
||||
|
@ -361,7 +350,6 @@ const config: Config = {
|
|||
height: 0,
|
||||
flip: false,
|
||||
return: true,
|
||||
autoBrightness: true,
|
||||
brightness: 0,
|
||||
contrast: 0,
|
||||
sharpness: 0,
|
||||
|
@ -383,14 +371,12 @@ const config: Config = {
|
|||
enabled: true,
|
||||
detector: {
|
||||
modelPath: 'blazeface.json',
|
||||
rotation: false,
|
||||
rotation: true,
|
||||
maxDetected: 1,
|
||||
skipFrames: 99,
|
||||
skipTime: 2500,
|
||||
minConfidence: 0.2,
|
||||
minSize: 0,
|
||||
iouThreshold: 0.1,
|
||||
scale: 1.4,
|
||||
mask: false,
|
||||
return: false,
|
||||
},
|
||||
|
@ -405,7 +391,6 @@ const config: Config = {
|
|||
},
|
||||
iris: {
|
||||
enabled: true,
|
||||
scale: 2.3,
|
||||
modelPath: 'iris.json',
|
||||
},
|
||||
emotion: {
|
||||
|
@ -456,12 +441,12 @@ const config: Config = {
|
|||
modelPath: 'handtrack.json',
|
||||
},
|
||||
skeleton: {
|
||||
modelPath: 'handlandmark-lite.json',
|
||||
modelPath: 'handlandmark-full.json',
|
||||
},
|
||||
},
|
||||
object: {
|
||||
enabled: false,
|
||||
modelPath: 'centernet.json',
|
||||
modelPath: 'mb3-centernet.json',
|
||||
minConfidence: 0.2,
|
||||
iouThreshold: 0.4,
|
||||
maxDetected: 10,
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import { mergeDeep } from '../util/util';
|
||||
import { getCanvasContext, rect, point, curves, colorDepth, replace, labels } from './primitives';
|
||||
import { getCanvasContext, rect, point, curves, colorDepth } from './primitives';
|
||||
import { options } from './options';
|
||||
import type { BodyResult } from '../result';
|
||||
import type { AnyCanvas, DrawOptions } from '../exports';
|
||||
|
@ -8,7 +8,7 @@ import type { AnyCanvas, DrawOptions } from '../exports';
|
|||
export function body(inCanvas: AnyCanvas, result: BodyResult[], drawOptions?: Partial<DrawOptions>) {
|
||||
const localOptions: DrawOptions = mergeDeep(options, drawOptions);
|
||||
if (!result || !inCanvas) return;
|
||||
const ctx = getCanvasContext(inCanvas) as CanvasRenderingContext2D;
|
||||
const ctx = getCanvasContext(inCanvas);
|
||||
if (!ctx) return;
|
||||
ctx.lineJoin = 'round';
|
||||
for (let i = 0; i < result.length; i++) {
|
||||
|
@ -18,11 +18,13 @@ export function body(inCanvas: AnyCanvas, result: BodyResult[], drawOptions?: Pa
|
|||
ctx.font = localOptions.font;
|
||||
if (localOptions.drawBoxes && result[i].box && result[i].box.length === 4) {
|
||||
rect(ctx, result[i].box[0], result[i].box[1], result[i].box[2], result[i].box[3], localOptions);
|
||||
if (localOptions.drawLabels && (localOptions.bodyLabels?.length > 0)) {
|
||||
let l = localOptions.bodyLabels.slice();
|
||||
l = replace(l, '[id]', result[i].id.toFixed(0));
|
||||
l = replace(l, '[score]', 100 * result[i].score);
|
||||
labels(ctx, l, result[i].box[0], result[i].box[1], localOptions);
|
||||
if (localOptions.drawLabels) {
|
||||
if (localOptions.shadowColor && localOptions.shadowColor !== '') {
|
||||
ctx.fillStyle = localOptions.shadowColor;
|
||||
ctx.fillText(`body ${100 * result[i].score}%`, result[i].box[0] + 3, 1 + result[i].box[1] + localOptions.lineHeight, result[i].box[2]);
|
||||
}
|
||||
ctx.fillStyle = localOptions.labelColor;
|
||||
ctx.fillText(`body ${100 * result[i].score}%`, result[i].box[0] + 2, 0 + result[i].box[1] + localOptions.lineHeight, result[i].box[2]);
|
||||
}
|
||||
}
|
||||
if (localOptions.drawPoints && result[i].keypoints) {
|
||||
|
@ -32,14 +34,12 @@ export function body(inCanvas: AnyCanvas, result: BodyResult[], drawOptions?: Pa
|
|||
point(ctx, result[i].keypoints[pt].position[0], result[i].keypoints[pt].position[1], 0, localOptions);
|
||||
}
|
||||
}
|
||||
if (localOptions.drawLabels && (localOptions.bodyPartLabels?.length > 0) && result[i].keypoints) {
|
||||
if (localOptions.drawLabels && result[i].keypoints) {
|
||||
ctx.font = localOptions.font;
|
||||
for (const pt of result[i].keypoints) {
|
||||
if (!pt.score || (pt.score === 0)) continue;
|
||||
let l = localOptions.bodyPartLabels.slice();
|
||||
l = replace(l, '[label]', pt.part);
|
||||
l = replace(l, '[score]', 100 * pt.score);
|
||||
labels(ctx, l, pt.position[0], pt.position[1], localOptions);
|
||||
ctx.fillStyle = colorDepth(pt.position[2], localOptions);
|
||||
ctx.fillText(`${pt.part} ${Math.trunc(100 * pt.score)}%`, pt.position[0] + 4, pt.position[1] + 4);
|
||||
}
|
||||
}
|
||||
if (localOptions.drawPolygons && result[i].keypoints && result[i].annotations) {
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
* Module that implements helper draw functions, exposed as human.draw
|
||||
*/
|
||||
|
||||
import * as tf from 'dist/tfjs.esm.js';
|
||||
import { mergeDeep, now } from '../util/util';
|
||||
import { env } from '../util/env';
|
||||
import { getCanvasContext, rect } from './primitives';
|
||||
|
@ -12,10 +11,8 @@ import { body } from './body';
|
|||
import { hand } from './hand';
|
||||
import { object } from './object';
|
||||
import { gesture } from './gesture';
|
||||
import { defaultLabels } from './labels';
|
||||
import type { Result, PersonResult } from '../result';
|
||||
import type { AnyCanvas, DrawOptions } from '../exports';
|
||||
import type { Tensor2D } from '../tfjs/types';
|
||||
|
||||
let drawTime = 0;
|
||||
|
||||
|
@ -30,7 +27,7 @@ export { gesture } from './gesture';
|
|||
export function person(inCanvas: AnyCanvas, result: PersonResult[], drawOptions?: Partial<DrawOptions>) {
|
||||
const localOptions: DrawOptions = mergeDeep(options, drawOptions);
|
||||
if (!result || !inCanvas) return;
|
||||
const ctx = getCanvasContext(inCanvas) as CanvasRenderingContext2D;
|
||||
const ctx = getCanvasContext(inCanvas);
|
||||
if (!ctx) return;
|
||||
ctx.lineJoin = 'round';
|
||||
ctx.font = localOptions.font;
|
||||
|
@ -57,27 +54,11 @@ export function person(inCanvas: AnyCanvas, result: PersonResult[], drawOptions?
|
|||
/** draw processed canvas */
|
||||
export function canvas(input: AnyCanvas | HTMLImageElement | HTMLVideoElement, output: AnyCanvas) {
|
||||
if (!input || !output) return;
|
||||
const ctx = getCanvasContext(output) as CanvasRenderingContext2D;
|
||||
const ctx = getCanvasContext(output);
|
||||
if (!ctx) return;
|
||||
ctx.drawImage(input, 0, 0);
|
||||
}
|
||||
|
||||
/** draw processed canvas */
|
||||
export async function tensor(input: Tensor2D, output: HTMLCanvasElement) {
|
||||
if (!input || !output) return;
|
||||
if (!env.browser) return;
|
||||
// const backend = tf.getBackend();
|
||||
// if (backend === 'webgpu') tf.browser.draw(input, output);
|
||||
// else await tf.browser.toPixels(input, output);
|
||||
await tf.browser.toPixels(input, output);
|
||||
// const ctx = getCanvasContext(output) as CanvasRenderingContext2D;
|
||||
// if (!ctx) return;
|
||||
// const image = await process(input);
|
||||
// result.canvas = image.canvas;
|
||||
// human.tf.dispose(image.tensor);
|
||||
// ctx.drawImage(image.canvas, 0, 0);
|
||||
}
|
||||
|
||||
/** meta-function that performs draw for: canvas, face, body, hand */
|
||||
export async function all(inCanvas: AnyCanvas, result: Result, drawOptions?: Partial<DrawOptions>) {
|
||||
if (!result?.performance || !inCanvas) return null;
|
||||
|
@ -95,14 +76,3 @@ export async function all(inCanvas: AnyCanvas, result: Result, drawOptions?: Par
|
|||
result.performance.draw = drawTime;
|
||||
return promise;
|
||||
}
|
||||
|
||||
/** sets default label templates for face/body/hand/object/gestures */
|
||||
export function init() {
|
||||
options.faceLabels = defaultLabels.face;
|
||||
options.bodyLabels = defaultLabels.body;
|
||||
options.bodyPartLabels = defaultLabels.bodyPart;
|
||||
options.handLabels = defaultLabels.hand;
|
||||
options.fingerLabels = defaultLabels.finger;
|
||||
options.objectLabels = defaultLabels.object;
|
||||
options.gestureLabels = defaultLabels.gesture;
|
||||
}
|
||||
|
|
104
src/draw/face.ts
|
@ -1,66 +1,77 @@
|
|||
import { TRI468 as triangulation } from '../face/facemeshcoords';
|
||||
import { mergeDeep } from '../util/util';
|
||||
import { getCanvasContext, rad2deg, rect, point, lines, arrow, labels, replace } from './primitives';
|
||||
import { getCanvasContext, rad2deg, rect, point, lines, arrow } from './primitives';
|
||||
import { options } from './options';
|
||||
import * as facemeshConstants from '../face/constants';
|
||||
import type { FaceResult } from '../result';
|
||||
import type { AnyCanvas, DrawOptions } from '../exports';
|
||||
|
||||
let localOptions: DrawOptions;
|
||||
let opt: DrawOptions;
|
||||
|
||||
function drawLabels(f: FaceResult, ctx: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D) {
|
||||
if (!localOptions.drawLabels || (localOptions.faceLabels?.length === 0)) return;
|
||||
let l = localOptions.faceLabels.slice();
|
||||
l = replace(l, '[id]', f.id.toFixed(0));
|
||||
if (f.score) l = replace(l, '[score]', 100 * f.score);
|
||||
if (f.gender) l = replace(l, '[gender]', f.gender);
|
||||
if (f.genderScore) l = replace(l, '[genderScore]', 100 * f.genderScore);
|
||||
if (f.age) l = replace(l, '[age]', f.age);
|
||||
if (f.distance) l = replace(l, '[distance]', 100 * f.distance);
|
||||
if (f.real) l = replace(l, '[real]', 100 * f.real);
|
||||
if (f.live) l = replace(l, '[live]', 100 * f.live);
|
||||
if (opt.drawLabels) {
|
||||
// silly hack since fillText does not suport new line
|
||||
const labels:string[] = [];
|
||||
labels.push(`face: ${Math.trunc(100 * f.score)}%`);
|
||||
if (f.genderScore) labels.push(`${f.gender || ''} ${Math.trunc(100 * f.genderScore)}%`);
|
||||
if (f.age) labels.push(`age: ${f.age || ''}`);
|
||||
if (f.iris) labels.push(`distance: ${f.iris}`);
|
||||
if (f.real) labels.push(`real: ${Math.trunc(100 * f.real)}%`);
|
||||
if (f.live) labels.push(`live: ${Math.trunc(100 * f.live)}%`);
|
||||
if (f.emotion && f.emotion.length > 0) {
|
||||
const emotion = f.emotion.map((a) => `${Math.trunc(100 * a.score)}% ${a.emotion}`);
|
||||
if (emotion.length > 3) emotion.length = 3;
|
||||
l = replace(l, '[emotions]', emotion.join(' '));
|
||||
labels.push(emotion.join(' '));
|
||||
}
|
||||
if (f.rotation?.angle && f.rotation?.gaze) {
|
||||
if (f.rotation.angle.roll) labels.push(`roll: ${rad2deg(f.rotation.angle.roll)}° yaw:${rad2deg(f.rotation.angle.yaw)}° pitch:${rad2deg(f.rotation.angle.pitch)}°`);
|
||||
if (f.rotation.gaze.bearing) labels.push(`gaze: ${rad2deg(f.rotation.gaze.bearing)}°`);
|
||||
}
|
||||
if (labels.length === 0) labels.push('face');
|
||||
ctx.fillStyle = opt.color;
|
||||
for (let i = labels.length - 1; i >= 0; i--) {
|
||||
const x = Math.max(f.box[0], 0);
|
||||
const y = i * opt.lineHeight + f.box[1];
|
||||
if (opt.shadowColor && opt.shadowColor !== '') {
|
||||
ctx.fillStyle = opt.shadowColor;
|
||||
ctx.fillText(labels[i], x + 5, y + 16);
|
||||
}
|
||||
ctx.fillStyle = opt.labelColor;
|
||||
ctx.fillText(labels[i], x + 4, y + 15);
|
||||
}
|
||||
}
|
||||
if (f.rotation?.angle?.roll) l = replace(l, '[roll]', rad2deg(f.rotation.angle.roll));
|
||||
if (f.rotation?.angle?.yaw) l = replace(l, '[yaw]', rad2deg(f.rotation.angle.yaw));
|
||||
if (f.rotation?.angle?.pitch) l = replace(l, '[pitch]', rad2deg(f.rotation.angle.pitch));
|
||||
if (f.rotation?.gaze?.bearing) l = replace(l, '[gaze]', rad2deg(f.rotation.gaze.bearing));
|
||||
labels(ctx, l, f.box[0], f.box[1], localOptions);
|
||||
}
|
||||
|
||||
function drawIrisElipse(f: FaceResult, ctx: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D) {
|
||||
// iris: array[center, left, top, right, bottom]
|
||||
if (f.annotations?.leftEyeIris && f.annotations?.leftEyeIris[0]) {
|
||||
ctx.strokeStyle = localOptions.useDepth ? 'rgba(255, 200, 255, 0.3)' : localOptions.color;
|
||||
ctx.strokeStyle = opt.useDepth ? 'rgba(255, 200, 255, 0.3)' : opt.color;
|
||||
ctx.beginPath();
|
||||
const sizeX = Math.abs(f.annotations.leftEyeIris[3][0] - f.annotations.leftEyeIris[1][0]) / 2;
|
||||
const sizeY = Math.abs(f.annotations.leftEyeIris[4][1] - f.annotations.leftEyeIris[2][1]) / 2;
|
||||
ctx.ellipse(f.annotations.leftEyeIris[0][0], f.annotations.leftEyeIris[0][1], sizeX, sizeY, 0, 0, 2 * Math.PI);
|
||||
ctx.stroke();
|
||||
if (localOptions.fillPolygons) {
|
||||
ctx.fillStyle = localOptions.useDepth ? 'rgba(255, 255, 200, 0.3)' : localOptions.color;
|
||||
if (opt.fillPolygons) {
|
||||
ctx.fillStyle = opt.useDepth ? 'rgba(255, 255, 200, 0.3)' : opt.color;
|
||||
ctx.fill();
|
||||
}
|
||||
}
|
||||
if (f.annotations?.rightEyeIris && f.annotations?.rightEyeIris[0]) {
|
||||
ctx.strokeStyle = localOptions.useDepth ? 'rgba(255, 200, 255, 0.3)' : localOptions.color;
|
||||
ctx.strokeStyle = opt.useDepth ? 'rgba(255, 200, 255, 0.3)' : opt.color;
|
||||
ctx.beginPath();
|
||||
const sizeX = Math.abs(f.annotations.rightEyeIris[3][0] - f.annotations.rightEyeIris[1][0]) / 2;
|
||||
const sizeY = Math.abs(f.annotations.rightEyeIris[4][1] - f.annotations.rightEyeIris[2][1]) / 2;
|
||||
ctx.ellipse(f.annotations.rightEyeIris[0][0], f.annotations.rightEyeIris[0][1], sizeX, sizeY, 0, 0, 2 * Math.PI);
|
||||
ctx.stroke();
|
||||
if (localOptions.fillPolygons) {
|
||||
ctx.fillStyle = localOptions.useDepth ? 'rgba(255, 255, 200, 0.3)' : localOptions.color;
|
||||
if (opt.fillPolygons) {
|
||||
ctx.fillStyle = opt.useDepth ? 'rgba(255, 255, 200, 0.3)' : opt.color;
|
||||
ctx.fill();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function drawGazeSpheres(f: FaceResult, ctx: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D) {
|
||||
if (localOptions.drawGaze && f.rotation?.angle && typeof Path2D !== 'undefined') {
|
||||
if (opt.drawGaze && f.rotation?.angle && typeof Path2D !== 'undefined') {
|
||||
ctx.strokeStyle = 'pink';
|
||||
const valX = (f.box[0] + f.box[2] / 2) - (f.box[3] * rad2deg(f.rotation.angle.yaw) / 90);
|
||||
const valY = (f.box[1] + f.box[3] / 2) + (f.box[2] * rad2deg(f.rotation.angle.pitch) / 90);
|
||||
|
@ -84,7 +95,7 @@ function drawGazeSpheres(f: FaceResult, ctx: CanvasRenderingContext2D | Offscree
|
|||
}
|
||||
|
||||
function drawGazeArrows(f: FaceResult, ctx: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D) {
|
||||
if (localOptions.drawGaze && f.rotation?.gaze.strength && f.rotation.gaze.bearing && f.annotations.leftEyeIris && f.annotations.rightEyeIris && f.annotations.leftEyeIris[0] && f.annotations.rightEyeIris[0]) {
|
||||
if (opt.drawGaze && f.rotation?.gaze.strength && f.rotation.gaze.bearing && f.annotations.leftEyeIris && f.annotations.rightEyeIris && f.annotations.leftEyeIris[0] && f.annotations.rightEyeIris[0]) {
|
||||
ctx.strokeStyle = 'pink';
|
||||
ctx.fillStyle = 'pink';
|
||||
const leftGaze = [
|
||||
|
@ -101,16 +112,16 @@ function drawGazeArrows(f: FaceResult, ctx: CanvasRenderingContext2D | Offscreen
|
|||
}
|
||||
|
||||
function drawFacePolygons(f: FaceResult, ctx: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D) {
|
||||
if (localOptions.drawPolygons && f.mesh.length >= 468) {
|
||||
if (opt.drawPolygons && f.mesh.length >= 468) {
|
||||
ctx.lineWidth = 1;
|
||||
for (let i = 0; i < triangulation.length / 3; i++) {
|
||||
const points = [triangulation[i * 3 + 0], triangulation[i * 3 + 1], triangulation[i * 3 + 2]].map((index) => f.mesh[index]);
|
||||
lines(ctx, points, localOptions);
|
||||
lines(ctx, points, opt);
|
||||
}
|
||||
drawIrisElipse(f, ctx);
|
||||
}
|
||||
/*
|
||||
if (localOptions.drawPolygons && f.contours.length > 1) {
|
||||
if (opt.drawPolygons && f.contours.length > 1) {
|
||||
ctx.lineWidth = 5;
|
||||
lines(ctx, f.contours, opt);
|
||||
}
|
||||
|
@ -119,42 +130,33 @@ function drawFacePolygons(f: FaceResult, ctx: CanvasRenderingContext2D | Offscre
|
|||
}
|
||||
|
||||
function drawFacePoints(f: FaceResult, ctx: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D) {
|
||||
if (localOptions.drawPoints) {
|
||||
if (f?.mesh.length >= 468) {
|
||||
if (opt.drawPoints && f.mesh.length >= 468) {
|
||||
for (let i = 0; i < f.mesh.length; i++) {
|
||||
point(ctx, f.mesh[i][0], f.mesh[i][1], f.mesh[i][2], localOptions);
|
||||
if (localOptions.drawAttention) {
|
||||
if (facemeshConstants.LANDMARKS_REFINEMENT_LIPS_CONFIG.includes(i)) point(ctx, f.mesh[i][0], f.mesh[i][1], (f.mesh[i][2] as number) + 127, localOptions);
|
||||
if (facemeshConstants.LANDMARKS_REFINEMENT_LEFT_EYE_CONFIG.includes(i)) point(ctx, f.mesh[i][0], f.mesh[i][1], (f.mesh[i][2] as number) - 127, localOptions);
|
||||
if (facemeshConstants.LANDMARKS_REFINEMENT_RIGHT_EYE_CONFIG.includes(i)) point(ctx, f.mesh[i][0], f.mesh[i][1], (f.mesh[i][2] as number) - 127, localOptions);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (const [k, v] of Object.entries(f?.annotations || {})) {
|
||||
if (!v?.[0]) continue;
|
||||
const pt = v[0];
|
||||
point(ctx, pt[0], pt[1], 0, localOptions);
|
||||
if (localOptions.drawLabels) labels(ctx, k, pt[0], pt[1], localOptions);
|
||||
point(ctx, f.mesh[i][0], f.mesh[i][1], f.mesh[i][2], opt);
|
||||
if (opt.drawAttention) {
|
||||
if (facemeshConstants.LANDMARKS_REFINEMENT_LIPS_CONFIG.includes(i)) point(ctx, f.mesh[i][0], f.mesh[i][1], (f.mesh[i][2] as number) + 127, opt);
|
||||
if (facemeshConstants.LANDMARKS_REFINEMENT_LEFT_EYE_CONFIG.includes(i)) point(ctx, f.mesh[i][0], f.mesh[i][1], (f.mesh[i][2] as number) - 127, opt);
|
||||
if (facemeshConstants.LANDMARKS_REFINEMENT_RIGHT_EYE_CONFIG.includes(i)) point(ctx, f.mesh[i][0], f.mesh[i][1], (f.mesh[i][2] as number) - 127, opt);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function drawFaceBoxes(f: FaceResult, ctx) {
|
||||
if (localOptions.drawBoxes) {
|
||||
rect(ctx, f.box[0], f.box[1], f.box[2], f.box[3], localOptions);
|
||||
if (opt.drawBoxes) {
|
||||
rect(ctx, f.box[0], f.box[1], f.box[2], f.box[3], opt);
|
||||
}
|
||||
}
|
||||
|
||||
/** draw detected faces */
|
||||
export function face(inCanvas: AnyCanvas, result: FaceResult[], drawOptions?: Partial<DrawOptions>) {
|
||||
localOptions = mergeDeep(options, drawOptions);
|
||||
opt = mergeDeep(options, drawOptions);
|
||||
if (!result || !inCanvas) return;
|
||||
const ctx = getCanvasContext(inCanvas) as CanvasRenderingContext2D;
|
||||
const ctx = getCanvasContext(inCanvas);
|
||||
if (!ctx) return;
|
||||
ctx.font = localOptions.font;
|
||||
ctx.strokeStyle = localOptions.color;
|
||||
ctx.fillStyle = localOptions.color;
|
||||
ctx.font = opt.font;
|
||||
ctx.strokeStyle = opt.color;
|
||||
ctx.fillStyle = opt.color;
|
||||
for (const f of result) {
|
||||
drawFaceBoxes(f, ctx);
|
||||
drawLabels(f, ctx);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import { mergeDeep } from '../util/util';
|
||||
import { getCanvasContext, replace, labels } from './primitives';
|
||||
import { getCanvasContext } from './primitives';
|
||||
import { options } from './options';
|
||||
import type { GestureResult } from '../result';
|
||||
import type { AnyCanvas, DrawOptions } from '../exports';
|
||||
|
@ -8,21 +8,25 @@ import type { AnyCanvas, DrawOptions } from '../exports';
|
|||
export function gesture(inCanvas: AnyCanvas, result: GestureResult[], drawOptions?: Partial<DrawOptions>) {
|
||||
const localOptions: DrawOptions = mergeDeep(options, drawOptions);
|
||||
if (!result || !inCanvas) return;
|
||||
if (localOptions.drawGestures && (localOptions.gestureLabels?.length > 0)) {
|
||||
const ctx = getCanvasContext(inCanvas) as CanvasRenderingContext2D;
|
||||
if (localOptions.drawGestures) {
|
||||
const ctx = getCanvasContext(inCanvas);
|
||||
if (!ctx) return;
|
||||
ctx.font = localOptions.font;
|
||||
ctx.fillStyle = localOptions.color;
|
||||
let i = 1;
|
||||
for (let j = 0; j < result.length; j++) {
|
||||
const [where, what] = Object.entries(result[j]);
|
||||
let where: unknown[] = []; // what&where is a record
|
||||
let what: unknown[] = []; // what&where is a record
|
||||
[where, what] = Object.entries(result[j]);
|
||||
if ((what.length > 1) && ((what[1] as string).length > 0)) {
|
||||
const who = where[1] as number > 0 ? `#${where[1]}` : '';
|
||||
let l = localOptions.gestureLabels.slice();
|
||||
l = replace(l, '[where]', where[0]);
|
||||
l = replace(l, '[who]', who);
|
||||
l = replace(l, '[what]', what[1]);
|
||||
labels(ctx, l, 8, 2 + (i * localOptions.lineHeight), localOptions);
|
||||
const label = `${where[0]} ${who}: ${what[1]}`;
|
||||
if (localOptions.shadowColor && localOptions.shadowColor !== '') {
|
||||
ctx.fillStyle = localOptions.shadowColor;
|
||||
ctx.fillText(label, 8, 2 + (i * localOptions.lineHeight));
|
||||
}
|
||||
ctx.fillStyle = localOptions.labelColor;
|
||||
ctx.fillText(label, 6, 0 + (i * localOptions.lineHeight));
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import { mergeDeep } from '../util/util';
|
||||
import { getCanvasContext, rect, point, colorDepth, replace, labels } from './primitives';
|
||||
import { getCanvasContext, rect, point, colorDepth } from './primitives';
|
||||
import { options } from './options';
|
||||
import type { HandResult } from '../result';
|
||||
import type { AnyCanvas, DrawOptions, Point } from '../exports';
|
||||
|
@ -8,7 +8,7 @@ import type { AnyCanvas, DrawOptions, Point } from '../exports';
|
|||
export function hand(inCanvas: AnyCanvas, result: HandResult[], drawOptions?: Partial<DrawOptions>) {
|
||||
const localOptions: DrawOptions = mergeDeep(options, drawOptions);
|
||||
if (!result || !inCanvas) return;
|
||||
const ctx = getCanvasContext(inCanvas) as CanvasRenderingContext2D;
|
||||
const ctx = getCanvasContext(inCanvas);
|
||||
if (!ctx) return;
|
||||
ctx.lineJoin = 'round';
|
||||
ctx.font = localOptions.font;
|
||||
|
@ -17,12 +17,13 @@ export function hand(inCanvas: AnyCanvas, result: HandResult[], drawOptions?: Pa
|
|||
ctx.strokeStyle = localOptions.color;
|
||||
ctx.fillStyle = localOptions.color;
|
||||
rect(ctx, h.box[0], h.box[1], h.box[2], h.box[3], localOptions);
|
||||
if (localOptions.drawLabels && (localOptions.handLabels?.length > 0)) {
|
||||
let l = localOptions.handLabels.slice();
|
||||
l = replace(l, '[id]', h.id.toFixed(0));
|
||||
l = replace(l, '[label]', h.label);
|
||||
l = replace(l, '[score]', 100 * h.score);
|
||||
labels(ctx, l, h.box[0], h.box[1], localOptions);
|
||||
if (localOptions.drawLabels) {
|
||||
if (localOptions.shadowColor && localOptions.shadowColor !== '') {
|
||||
ctx.fillStyle = localOptions.shadowColor;
|
||||
ctx.fillText(`hand:${Math.trunc(100 * h.score)}%`, h.box[0] + 3, 1 + h.box[1] + localOptions.lineHeight, h.box[2]); // can use h.label
|
||||
}
|
||||
ctx.fillStyle = localOptions.labelColor;
|
||||
ctx.fillText(`hand:${Math.trunc(100 * h.score)}%`, h.box[0] + 2, 0 + h.box[1] + localOptions.lineHeight, h.box[2]); // can use h.label
|
||||
}
|
||||
ctx.stroke();
|
||||
}
|
||||
|
@ -34,12 +35,20 @@ export function hand(inCanvas: AnyCanvas, result: HandResult[], drawOptions?: Pa
|
|||
}
|
||||
}
|
||||
}
|
||||
if (localOptions.drawLabels && h.annotations && (localOptions.fingerLabels?.length > 0)) {
|
||||
for (const [part, pt] of Object.entries(h.annotations)) {
|
||||
let l = localOptions.fingerLabels.slice();
|
||||
l = replace(l, '[label]', part);
|
||||
labels(ctx, l, pt[pt.length - 1][0], pt[pt.length - 1][1], localOptions);
|
||||
}
|
||||
if (localOptions.drawLabels && h.annotations) {
|
||||
const addHandLabel = (part: Point[], title: string) => {
|
||||
if (!part || part.length === 0 || !part[0]) return;
|
||||
const z = part[part.length - 1][2] || -256;
|
||||
ctx.fillStyle = colorDepth(z, localOptions);
|
||||
ctx.fillText(title, part[part.length - 1][0] + 4, part[part.length - 1][1] + 4);
|
||||
};
|
||||
ctx.font = localOptions.font;
|
||||
addHandLabel(h.annotations.index, 'index');
|
||||
addHandLabel(h.annotations.middle, 'middle');
|
||||
addHandLabel(h.annotations.ring, 'ring');
|
||||
addHandLabel(h.annotations.pinky, 'pinky');
|
||||
addHandLabel(h.annotations.thumb, 'thumb');
|
||||
addHandLabel(h.annotations.palm, 'palm');
|
||||
}
|
||||
if (localOptions.drawPolygons && h.annotations) {
|
||||
const addHandLine = (part: Point[]) => {
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
export const defaultLabels = {
|
||||
face: `face
|
||||
confidence: [score]%
|
||||
[gender] [genderScore]%
|
||||
age: [age] years
|
||||
distance: [distance]cm
|
||||
real: [real]%
|
||||
live: [live]%
|
||||
[emotions]
|
||||
roll: [roll]° yaw:[yaw]° pitch:[pitch]°
|
||||
gaze: [gaze]°`,
|
||||
body: 'body [score]%',
|
||||
bodyPart: '[label] [score]%',
|
||||
object: '[label] [score]%',
|
||||
hand: '[label] [score]%',
|
||||
finger: '[label]',
|
||||
gesture: '[where] [who]: [what]',
|
||||
};
|
|
@ -1,5 +1,5 @@
|
|||
import { mergeDeep } from '../util/util';
|
||||
import { getCanvasContext, rect, replace, labels } from './primitives';
|
||||
import { getCanvasContext, rect } from './primitives';
|
||||
import { options } from './options';
|
||||
import type { ObjectResult } from '../result';
|
||||
import type { AnyCanvas, DrawOptions } from '../exports';
|
||||
|
@ -8,7 +8,7 @@ import type { AnyCanvas, DrawOptions } from '../exports';
|
|||
export function object(inCanvas: AnyCanvas, result: ObjectResult[], drawOptions?: Partial<DrawOptions>) {
|
||||
const localOptions: DrawOptions = mergeDeep(options, drawOptions);
|
||||
if (!result || !inCanvas) return;
|
||||
const ctx = getCanvasContext(inCanvas) as CanvasRenderingContext2D;
|
||||
const ctx = getCanvasContext(inCanvas);
|
||||
if (!ctx) return;
|
||||
ctx.lineJoin = 'round';
|
||||
ctx.font = localOptions.font;
|
||||
|
@ -17,12 +17,14 @@ export function object(inCanvas: AnyCanvas, result: ObjectResult[], drawOptions?
|
|||
ctx.strokeStyle = localOptions.color;
|
||||
ctx.fillStyle = localOptions.color;
|
||||
rect(ctx, h.box[0], h.box[1], h.box[2], h.box[3], localOptions);
|
||||
if (localOptions.drawLabels && (localOptions.objectLabels?.length > 0)) {
|
||||
let l = localOptions.objectLabels.slice();
|
||||
l = replace(l, '[id]', h.id.toFixed(0));
|
||||
l = replace(l, '[label]', h.label);
|
||||
l = replace(l, '[score]', 100 * h.score);
|
||||
labels(ctx, l, h.box[0], h.box[1], localOptions);
|
||||
if (localOptions.drawLabels) {
|
||||
const label = `${h.label} ${Math.round(100 * h.score)}%`;
|
||||
if (localOptions.shadowColor && localOptions.shadowColor !== '') {
|
||||
ctx.fillStyle = localOptions.shadowColor;
|
||||
ctx.fillText(label, h.box[0] + 3, 1 + h.box[1] + localOptions.lineHeight, h.box[2]);
|
||||
}
|
||||
ctx.fillStyle = localOptions.labelColor;
|
||||
ctx.fillText(label, h.box[0] + 2, 0 + h.box[1] + localOptions.lineHeight, h.box[2]);
|
||||
}
|
||||
ctx.stroke();
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
/** Draw Options
|
||||
* - Accessed via `human.draw.options` or provided per each draw method as the drawOptions optional parameter
|
||||
*/
|
||||
|
||||
export interface DrawOptions {
|
||||
/** draw line color */
|
||||
color: string,
|
||||
|
@ -41,20 +40,6 @@ export interface DrawOptions {
|
|||
useDepth: boolean,
|
||||
/** should lines be curved? */
|
||||
useCurves: boolean,
|
||||
/** string template for face labels */
|
||||
faceLabels: string,
|
||||
/** string template for body labels */
|
||||
bodyLabels: string,
|
||||
/** string template for body part labels */
|
||||
bodyPartLabels: string,
|
||||
/** string template for hand labels */
|
||||
handLabels: string,
|
||||
/** string template for hand labels */
|
||||
fingerLabels: string,
|
||||
/** string template for object labels */
|
||||
objectLabels: string,
|
||||
/** string template for gesture labels */
|
||||
gestureLabels: string,
|
||||
}
|
||||
|
||||
/** currently set draw options {@link DrawOptions} */
|
||||
|
@ -78,11 +63,4 @@ export const options: DrawOptions = {
|
|||
fillPolygons: false as boolean,
|
||||
useDepth: true as boolean,
|
||||
useCurves: false as boolean,
|
||||
faceLabels: '' as string,
|
||||
bodyLabels: '' as string,
|
||||
bodyPartLabels: '' as string,
|
||||
objectLabels: '' as string,
|
||||
handLabels: '' as string,
|
||||
fingerLabels: '' as string,
|
||||
gestureLabels: '' as string,
|
||||
};
|
||||
|
|
|
@ -7,7 +7,7 @@ export const getCanvasContext = (input: AnyCanvas) => {
|
|||
if (!input) log('draw error: invalid canvas');
|
||||
else if (!input.getContext) log('draw error: canvas context not defined');
|
||||
else {
|
||||
const ctx = input.getContext('2d', { willReadFrequently: true });
|
||||
const ctx = input.getContext('2d');
|
||||
if (!ctx) log('draw error: cannot get canvas context');
|
||||
else return ctx;
|
||||
}
|
||||
|
@ -16,28 +16,12 @@ export const getCanvasContext = (input: AnyCanvas) => {
|
|||
|
||||
export const rad2deg = (theta: number) => Math.round((theta * 180) / Math.PI);
|
||||
|
||||
export const replace = (str: string, source: string, target: string | number) => str.replace(source, typeof target === 'number' ? target.toFixed(1) : target);
|
||||
|
||||
export const colorDepth = (z: number | undefined, opt: DrawOptions): string => { // performance optimization needed
|
||||
if (!opt.useDepth || typeof z === 'undefined') return opt.color;
|
||||
const rgb = Uint8ClampedArray.from([127 + (2 * z), 127 - (2 * z), 255]);
|
||||
return `rgba(${rgb[0]}, ${rgb[1]}, ${rgb[2]}, ${opt.alpha})`;
|
||||
};
|
||||
|
||||
export function labels(ctx: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D, str: string, startX: number, startY: number, localOptions: DrawOptions) {
|
||||
const line: string[] = str.replace(/\[.*\]/g, '').split('\n').map((l) => l.trim()); // remove unmatched templates and split into array
|
||||
const x = Math.max(0, startX);
|
||||
for (let i = line.length - 1; i >= 0; i--) {
|
||||
const y = i * localOptions.lineHeight + startY;
|
||||
if (localOptions.shadowColor && localOptions.shadowColor !== '') {
|
||||
ctx.fillStyle = localOptions.shadowColor;
|
||||
ctx.fillText(line[i], x + 5, y + 16);
|
||||
}
|
||||
ctx.fillStyle = localOptions.labelColor;
|
||||
ctx.fillText(line[i], x + 4, y + 15);
|
||||
}
|
||||
}
|
||||
|
||||
export function point(ctx: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D, x: number, y: number, z: number | undefined, localOptions: DrawOptions) {
|
||||
ctx.fillStyle = colorDepth(z, localOptions);
|
||||
ctx.beginPath();
|
||||
|
|
|
@ -6,10 +6,8 @@ export * from './config';
|
|||
/* Export results details */
|
||||
export * from './result';
|
||||
|
||||
/**
|
||||
* Explict reexport of main @tensorflow/tfjs types
|
||||
*/
|
||||
export type { Tensor, Tensor1D, Tensor2D, Tensor3D, Tensor4D, TensorLike, GraphModel, Rank } from './tfjs/types';
|
||||
/* Explict reexport of main @tensorflow/tfjs types */
|
||||
export type { Tensor, TensorLike, GraphModel, Rank } from './tfjs/types';
|
||||
|
||||
// re-export types
|
||||
export type { DrawOptions } from './draw/options';
|
||||
|
@ -21,6 +19,8 @@ export type { WebCam, WebCamConfig } from './util/webcam';
|
|||
// export type { Models, ModelStats, KernelOps } from './models';
|
||||
export type { ModelInfo } from './tfjs/load';
|
||||
|
||||
// define enum types
|
||||
|
||||
/** Events dispatched by `human.events`
|
||||
* - `create`: triggered when Human object is instantiated
|
||||
* - `load`: triggered when models are loaded (explicitly or on-demand)
|
||||
|
|
|
@ -60,9 +60,7 @@ export const calculateFaceAngle = (face: FaceResult, imageSize: [number, number]
|
|||
let thetaZ: number;
|
||||
if (r10 < 1) { // YZX calculation
|
||||
if (r10 > -1) {
|
||||
// thetaZ = Math.asin(r10);
|
||||
const cosThetaZ = Math.sqrt(r00 * r00 + r20 * r20); // <https://github.com/vladmandic/human/issues/464>
|
||||
thetaZ = Math.atan2(r10, cosThetaZ);
|
||||
thetaZ = Math.asin(r10);
|
||||
thetaY = Math.atan2(-r20, r00);
|
||||
thetaX = Math.atan2(-r12, r11);
|
||||
} else {
|
||||
|
@ -78,9 +76,9 @@ export const calculateFaceAngle = (face: FaceResult, imageSize: [number, number]
|
|||
if (Number.isNaN(thetaX)) thetaX = 0;
|
||||
if (Number.isNaN(thetaY)) thetaY = 0;
|
||||
if (Number.isNaN(thetaZ)) thetaZ = 0;
|
||||
// return { pitch: 2 * -thetaX, yaw: 2 * -thetaY, roll: 2 * -thetaZ };
|
||||
return { pitch: -thetaX, yaw: -thetaY, roll: -thetaZ };
|
||||
return { pitch: 2 * -thetaX, yaw: 2 * -thetaY, roll: 2 * -thetaZ };
|
||||
};
|
||||
|
||||
/*
|
||||
const meshToEulerAngle = (mesh) => { // simple Euler angle calculation based existing 3D mesh
|
||||
const radians = (a1, a2, b1, b2) => Math.atan2(b2 - a2, b1 - a1);
|
||||
|
|
|
@ -1,28 +0,0 @@
|
|||
import type { FaceResult } from '../result';
|
||||
|
||||
export function calculateCameraDistance(face: FaceResult, width: number): number {
|
||||
// iris points are [center, left, top, right, bottom]
|
||||
// average size of human iris is 11.7mm - fairly constant for all ages/genders/races
|
||||
const f = face?.annotations;
|
||||
if (!f?.leftEyeIris || !f?.rightEyeIris) return 0;
|
||||
// get size of left and right iris in pixels, pick larger one as its likely to be more accurate and normalize to 0..1 range instead of pixels
|
||||
const irisSize = Math.max(Math.abs(f.leftEyeIris[3][0] - f.leftEyeIris[1][0]), Math.abs(f.rightEyeIris[3][0] - f.rightEyeIris[1][0])) / width;
|
||||
// distance of eye from camera in meters
|
||||
const cameraDistance = Math.round(1.17 / irisSize) / 100;
|
||||
return cameraDistance;
|
||||
}
|
||||
|
||||
export function calculateEyesDistance(face: FaceResult, width: number): number {
|
||||
// average distance between eyes is 65mm - fairly constant for typical adult male, but varies otherwise
|
||||
const f = face?.annotations;
|
||||
if (!f?.leftEyeIris || !f?.rightEyeIris) return 0;
|
||||
// get size of left and right iris in pixels, pick larger one as its likely to be more accurate and normalize to 0..1 range instead of pixels
|
||||
const irisSize = Math.max(Math.abs(f.leftEyeIris[3][0] - f.leftEyeIris[1][0]), Math.abs(f.rightEyeIris[3][0] - f.rightEyeIris[1][0])) / width;
|
||||
// pixel x and y distance of centers of left and right iris, you can use edges instead
|
||||
const irisDistanceXY = [f.leftEyeIris[0][0] - f.rightEyeIris[0][0], f.leftEyeIris[0][1] - f.rightEyeIris[0][1]];
|
||||
// absolute distance bewtween eyes in 0..1 range to account for head pitch (we can ignore yaw)
|
||||
const irisDistance = Math.sqrt((irisDistanceXY[0] * irisDistanceXY[0]) + (irisDistanceXY[1] * irisDistanceXY[1])) / width;
|
||||
// distance between eyes in meters
|
||||
const eyesDistance = Math.round(1.17 * irisDistance / irisSize) / 100;
|
||||
return eyesDistance;
|
||||
}
|
|
@ -2,10 +2,10 @@
|
|||
* Anti-spoofing model implementation
|
||||
*/
|
||||
|
||||
import * as tf from 'dist/tfjs.esm.js';
|
||||
import { log, now } from '../util/util';
|
||||
import type { Config } from '../config';
|
||||
import type { GraphModel, Tensor, Tensor4D } from '../tfjs/types';
|
||||
import type { GraphModel, Tensor } from '../tfjs/types';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import { loadModel } from '../tfjs/load';
|
||||
import { env } from '../util/env';
|
||||
|
||||
|
@ -22,8 +22,8 @@ export async function load(config: Config): Promise<GraphModel> {
|
|||
return model;
|
||||
}
|
||||
|
||||
export async function predict(image: Tensor4D, config: Config, idx: number, count: number): Promise<number> {
|
||||
if (!model?.['executor']) return 0;
|
||||
export async function predict(image: Tensor, config: Config, idx: number, count: number): Promise<number> {
|
||||
if (!model || !model?.['executor']) return 0;
|
||||
const skipTime = (config.face.antispoof?.skipTime || 0) > (now() - lastTime);
|
||||
const skipFrame = skipped < (config.face.antispoof?.skipFrames || 0);
|
||||
if (config.skipAllowed && skipTime && skipFrame && (lastCount === count) && cached[idx]) {
|
||||
|
|
|
@ -3,23 +3,24 @@
|
|||
* See `facemesh.ts` for entry point
|
||||
*/
|
||||
|
||||
import * as tf from 'dist/tfjs.esm.js';
|
||||
import { log } from '../util/util';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import * as util from './facemeshutil';
|
||||
import { loadModel } from '../tfjs/load';
|
||||
import { constants } from '../tfjs/constants';
|
||||
import type { Config } from '../config';
|
||||
import type { Tensor, GraphModel, Tensor1D, Tensor2D, Tensor4D } from '../tfjs/types';
|
||||
import type { Tensor, GraphModel } from '../tfjs/types';
|
||||
import { env } from '../util/env';
|
||||
import type { Point } from '../result';
|
||||
|
||||
const keypointsCount = 6;
|
||||
const faceBoxScaleFactor = 1.4;
|
||||
let model: GraphModel | null;
|
||||
let anchors: Tensor | null = null;
|
||||
let inputSize = 0;
|
||||
let inputSizeT: Tensor | null = null;
|
||||
|
||||
export interface DetectBox { startPoint: Point, endPoint: Point, landmarks: Point[], confidence: number, size: [number, number] }
|
||||
interface DetectBox { startPoint: Point, endPoint: Point, landmarks: Point[], confidence: number }
|
||||
|
||||
export const size = () => inputSize;
|
||||
|
||||
|
@ -34,7 +35,6 @@ export async function load(config: Config): Promise<GraphModel> {
|
|||
}
|
||||
|
||||
function decodeBoxes(boxOutputs: Tensor) {
|
||||
if (!anchors || !inputSizeT) return tf.zeros([0, 0]);
|
||||
const t: Record<string, Tensor> = {};
|
||||
t.boxStarts = tf.slice(boxOutputs, [0, 1], [-1, 2]);
|
||||
t.centers = tf.add(t.boxStarts, anchors);
|
||||
|
@ -46,36 +46,25 @@ function decodeBoxes(boxOutputs: Tensor) {
|
|||
t.ends = tf.add(t.centersNormalized, t.halfBoxSize);
|
||||
t.startNormalized = tf.mul(t.starts, inputSizeT);
|
||||
t.endNormalized = tf.mul(t.ends, inputSizeT);
|
||||
const boxes = tf.concat2d([t.startNormalized as Tensor2D, t.endNormalized as Tensor2D], 1);
|
||||
const boxes = tf.concat2d([t.startNormalized, t.endNormalized], 1);
|
||||
Object.keys(t).forEach((tensor) => tf.dispose(t[tensor]));
|
||||
return boxes;
|
||||
}
|
||||
|
||||
export async function getBoxes(inputImage: Tensor4D, config: Config): Promise<DetectBox[]> {
|
||||
export async function getBoxes(inputImage: Tensor, config: Config) {
|
||||
// sanity check on input
|
||||
if ((!inputImage) || (inputImage['isDisposedInternal']) || (inputImage.shape.length !== 4) || (inputImage.shape[1] < 1) || (inputImage.shape[2] < 1)) return [];
|
||||
const t: Record<string, Tensor> = {};
|
||||
let pad = [0, 0];
|
||||
let scale = [1, 1];
|
||||
if (config?.face?.detector?.square) {
|
||||
const xy = Math.max(inputImage.shape[2], inputImage.shape[1]);
|
||||
pad = [Math.floor((xy - inputImage.shape[2]) / 2), Math.floor((xy - inputImage.shape[1]) / 2)];
|
||||
t.padded = tf.pad(inputImage, [[0, 0], [pad[1], pad[1]], [pad[0], pad[0]], [0, 0]]);
|
||||
scale = [inputImage.shape[2] / xy, inputImage.shape[1] / xy];
|
||||
pad = [pad[0] / inputSize, pad[1] / inputSize];
|
||||
} else {
|
||||
t.padded = inputImage.clone();
|
||||
}
|
||||
t.resized = tf.image.resizeBilinear(t.padded as Tensor4D, [inputSize, inputSize]);
|
||||
t.resized = tf.image.resizeBilinear(inputImage, [inputSize, inputSize]);
|
||||
t.div = tf.div(t.resized, constants.tf127);
|
||||
t.normalized = tf.sub(t.div, constants.tf1);
|
||||
t.normalized = tf.sub(t.div, constants.tf05);
|
||||
const res = model?.execute(t.normalized) as Tensor[];
|
||||
if (Array.isArray(res) && res.length > 2) { // pinto converted model?
|
||||
const sorted = res.sort((a, b) => a.size - b.size);
|
||||
t.concat384 = tf.concat([sorted[0], sorted[2]], 2); // dim: 384, 1 + 16
|
||||
t.concat512 = tf.concat([sorted[1], sorted[3]], 2); // dim: 512, 1 + 16
|
||||
t.concat = tf.concat([t.concat512, t.concat384], 1);
|
||||
t.batch = tf.squeeze(t.concat, [0]);
|
||||
t.batch = tf.squeeze(t.concat, 0);
|
||||
} else if (Array.isArray(res)) { // new facemesh-detection tfhub model
|
||||
t.batch = tf.squeeze(res[0]);
|
||||
} else { // original blazeface tfhub model
|
||||
|
@ -86,7 +75,7 @@ export async function getBoxes(inputImage: Tensor4D, config: Config): Promise<De
|
|||
t.logits = tf.slice(t.batch, [0, 0], [-1, 1]);
|
||||
t.sigmoid = tf.sigmoid(t.logits);
|
||||
t.scores = tf.squeeze(t.sigmoid);
|
||||
t.nms = await tf.image.nonMaxSuppressionAsync(t.boxes as Tensor2D, t.scores as Tensor1D, (config.face.detector?.maxDetected || 0), (config.face.detector?.iouThreshold || 0), (config.face.detector?.minConfidence || 0));
|
||||
t.nms = await tf.image.nonMaxSuppressionAsync(t.boxes, t.scores, (config.face.detector?.maxDetected || 0), (config.face.detector?.iouThreshold || 0), (config.face.detector?.minConfidence || 0));
|
||||
const nms = await t.nms.array() as number[];
|
||||
const boxes: DetectBox[] = [];
|
||||
const scores = await t.scores.data();
|
||||
|
@ -99,24 +88,16 @@ export async function getBoxes(inputImage: Tensor4D, config: Config): Promise<De
|
|||
b.squeeze = tf.squeeze(b.slice);
|
||||
b.landmarks = tf.reshape(b.squeeze, [keypointsCount, -1]);
|
||||
const points = await b.bbox.data();
|
||||
const unpadded = [ // TODO fix this math
|
||||
points[0] * scale[0] - pad[0],
|
||||
points[1] * scale[1] - pad[1],
|
||||
points[2] * scale[0] - pad[0],
|
||||
points[3] * scale[1] - pad[1],
|
||||
];
|
||||
const rawBox = {
|
||||
startPoint: [unpadded[0], unpadded[1]] as Point,
|
||||
endPoint: [unpadded[2], unpadded[3]] as Point,
|
||||
startPoint: [points[0], points[1]] as Point,
|
||||
endPoint: [points[2], points[3]] as Point,
|
||||
landmarks: (await b.landmarks.array()) as Point[],
|
||||
confidence,
|
||||
};
|
||||
b.anchor = tf.slice(anchors as Tensor, [nms[i], 0], [1, 2]);
|
||||
const anchor = await b.anchor.data();
|
||||
const scaledBox = util.scaleBoxCoordinates(rawBox, [(inputImage.shape[2] || 0) / inputSize, (inputImage.shape[1] || 0) / inputSize], anchor);
|
||||
const enlargedBox = util.enlargeBox(scaledBox, config.face.detector?.scale || 1.4);
|
||||
const scaledBox = util.scaleBoxCoordinates(rawBox, [(inputImage.shape[2] || 0) / inputSize, (inputImage.shape[1] || 0) / inputSize]);
|
||||
const enlargedBox = util.enlargeBox(scaledBox, config.face['scale'] || faceBoxScaleFactor);
|
||||
const squaredBox = util.squarifyBox(enlargedBox);
|
||||
if (squaredBox.size[0] > (config.face.detector?.['minSize'] || 0) && squaredBox.size[1] > (config.face.detector?.['minSize'] || 0)) boxes.push(squaredBox);
|
||||
boxes.push(squaredBox);
|
||||
Object.keys(b).forEach((tensor) => tf.dispose(b[tensor]));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,9 +3,9 @@
|
|||
* Uses FaceMesh, Emotion and FaceRes models to create a unified pipeline
|
||||
*/
|
||||
|
||||
import * as tf from 'dist/tfjs.esm.js';
|
||||
import { log, now } from '../util/util';
|
||||
import { env } from '../util/env';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import * as facemesh from './facemesh';
|
||||
import * as emotion from '../gear/emotion';
|
||||
import * as faceres from './faceres';
|
||||
|
@ -18,14 +18,13 @@ import * as ssrnetGender from '../gear/ssrnet-gender';
|
|||
import * as mobilefacenet from './mobilefacenet';
|
||||
import * as insightface from './insightface';
|
||||
import type { FaceResult, Emotion, Gender, Race } from '../result';
|
||||
import type { Tensor4D } from '../tfjs/types';
|
||||
import type { Tensor } from '../tfjs/types';
|
||||
import type { Human } from '../human';
|
||||
import { calculateFaceAngle } from './angles';
|
||||
import { calculateCameraDistance } from './anthropometry';
|
||||
|
||||
interface DescRes { age: number, gender: Gender, genderScore: number, descriptor: number[], race?: { score: number, race: Race }[] }
|
||||
|
||||
export const detectFace = async (instance: Human /* instance of human */, input: Tensor4D): Promise<FaceResult[]> => {
|
||||
export const detectFace = async (instance: Human /* instance of human */, input: Tensor): Promise<FaceResult[]> => {
|
||||
// run facemesh, includes blazeface and iris
|
||||
let timeStamp: number = now();
|
||||
let ageRes: { age: number } | Promise<{ age: number }> | null;
|
||||
|
@ -40,7 +39,8 @@ export const detectFace = async (instance: Human /* instance of human */, input:
|
|||
|
||||
const faceRes: FaceResult[] = [];
|
||||
instance.state = 'run:face';
|
||||
const faces: FaceResult[] = await facemesh.predict(input, instance.config);
|
||||
|
||||
const faces = await facemesh.predict(input, instance.config);
|
||||
instance.performance.face = env.perfadd ? (instance.performance.face || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
||||
if (!input.shape || input.shape.length !== 4) return [];
|
||||
if (!faces) return [];
|
||||
|
@ -68,11 +68,11 @@ export const detectFace = async (instance: Human /* instance of human */, input:
|
|||
// run emotion, inherits face from blazeface
|
||||
instance.analyze('Start Emotion:');
|
||||
if (instance.config.async) {
|
||||
emotionRes = instance.config.face.emotion?.enabled ? emotion.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : [];
|
||||
emotionRes = instance.config.face.emotion?.enabled ? emotion.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : [];
|
||||
} else {
|
||||
instance.state = 'run:emotion';
|
||||
timeStamp = now();
|
||||
emotionRes = instance.config.face.emotion?.enabled ? await emotion.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : [];
|
||||
emotionRes = instance.config.face.emotion?.enabled ? await emotion.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : [];
|
||||
instance.performance.emotion = env.perfadd ? (instance.performance.emotion || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
||||
}
|
||||
instance.analyze('End Emotion:');
|
||||
|
@ -80,11 +80,11 @@ export const detectFace = async (instance: Human /* instance of human */, input:
|
|||
// run antispoof, inherits face from blazeface
|
||||
instance.analyze('Start AntiSpoof:');
|
||||
if (instance.config.async) {
|
||||
antispoofRes = instance.config.face.antispoof?.enabled ? antispoof.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : 0;
|
||||
antispoofRes = instance.config.face.antispoof?.enabled ? antispoof.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : 0;
|
||||
} else {
|
||||
instance.state = 'run:antispoof';
|
||||
timeStamp = now();
|
||||
antispoofRes = instance.config.face.antispoof?.enabled ? await antispoof.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : 0;
|
||||
antispoofRes = instance.config.face.antispoof?.enabled ? await antispoof.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : 0;
|
||||
instance.performance.antispoof = env.perfadd ? (instance.performance.antispoof || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
||||
}
|
||||
instance.analyze('End AntiSpoof:');
|
||||
|
@ -92,11 +92,11 @@ export const detectFace = async (instance: Human /* instance of human */, input:
|
|||
// run liveness, inherits face from blazeface
|
||||
instance.analyze('Start Liveness:');
|
||||
if (instance.config.async) {
|
||||
livenessRes = instance.config.face.liveness?.enabled ? liveness.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : 0;
|
||||
livenessRes = instance.config.face.liveness?.enabled ? liveness.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : 0;
|
||||
} else {
|
||||
instance.state = 'run:liveness';
|
||||
timeStamp = now();
|
||||
livenessRes = instance.config.face.liveness?.enabled ? await liveness.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : 0;
|
||||
livenessRes = instance.config.face.liveness?.enabled ? await liveness.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : 0;
|
||||
instance.performance.liveness = env.perfadd ? (instance.performance.antispoof || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
||||
}
|
||||
instance.analyze('End Liveness:');
|
||||
|
@ -104,25 +104,25 @@ export const detectFace = async (instance: Human /* instance of human */, input:
|
|||
// run gear, inherits face from blazeface
|
||||
instance.analyze('Start GEAR:');
|
||||
if (instance.config.async) {
|
||||
gearRes = instance.config.face.gear?.enabled ? gear.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : null;
|
||||
gearRes = instance.config.face.gear?.enabled ? gear.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : null;
|
||||
} else {
|
||||
instance.state = 'run:gear';
|
||||
timeStamp = now();
|
||||
gearRes = instance.config.face.gear?.enabled ? await gear.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : null;
|
||||
gearRes = instance.config.face.gear?.enabled ? await gear.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : null;
|
||||
instance.performance.gear = Math.trunc(now() - timeStamp);
|
||||
}
|
||||
instance.analyze('End GEAR:');
|
||||
|
||||
// run ssrnet, inherits face from blazeface
|
||||
// run gear, inherits face from blazeface
|
||||
instance.analyze('Start SSRNet:');
|
||||
if (instance.config.async) {
|
||||
ageRes = instance.config.face['ssrnet']?.enabled ? ssrnetAge.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : null;
|
||||
genderRes = instance.config.face['ssrnet']?.enabled ? ssrnetGender.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : null;
|
||||
ageRes = instance.config.face['ssrnet']?.enabled ? ssrnetAge.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : null;
|
||||
genderRes = instance.config.face['ssrnet']?.enabled ? ssrnetGender.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : null;
|
||||
} else {
|
||||
instance.state = 'run:ssrnet';
|
||||
timeStamp = now();
|
||||
ageRes = instance.config.face['ssrnet']?.enabled ? await ssrnetAge.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : null;
|
||||
genderRes = instance.config.face['ssrnet']?.enabled ? await ssrnetGender.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : null;
|
||||
ageRes = instance.config.face['ssrnet']?.enabled ? await ssrnetAge.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : null;
|
||||
genderRes = instance.config.face['ssrnet']?.enabled ? await ssrnetGender.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : null;
|
||||
instance.performance.ssrnet = Math.trunc(now() - timeStamp);
|
||||
}
|
||||
instance.analyze('End SSRNet:');
|
||||
|
@ -130,11 +130,11 @@ export const detectFace = async (instance: Human /* instance of human */, input:
|
|||
// run mobilefacenet alternative, inherits face from blazeface
|
||||
instance.analyze('Start MobileFaceNet:');
|
||||
if (instance.config.async) {
|
||||
mobilefacenetRes = instance.config.face['mobilefacenet']?.enabled ? mobilefacenet.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : null;
|
||||
mobilefacenetRes = instance.config.face['mobilefacenet']?.enabled ? mobilefacenet.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : null;
|
||||
} else {
|
||||
instance.state = 'run:mobilefacenet';
|
||||
timeStamp = now();
|
||||
mobilefacenetRes = instance.config.face['mobilefacenet']?.enabled ? await mobilefacenet.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : null;
|
||||
mobilefacenetRes = instance.config.face['mobilefacenet']?.enabled ? await mobilefacenet.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : null;
|
||||
instance.performance.mobilefacenet = Math.trunc(now() - timeStamp);
|
||||
}
|
||||
instance.analyze('End MobileFaceNet:');
|
||||
|
@ -142,11 +142,11 @@ export const detectFace = async (instance: Human /* instance of human */, input:
|
|||
// run insightface alternative, inherits face from blazeface
|
||||
instance.analyze('Start InsightFace:');
|
||||
if (instance.config.async) {
|
||||
insightfaceRes = instance.config.face['insightface']?.enabled ? insightface.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : null;
|
||||
insightfaceRes = instance.config.face['insightface']?.enabled ? insightface.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : null;
|
||||
} else {
|
||||
instance.state = 'run:mobilefacenet';
|
||||
timeStamp = now();
|
||||
insightfaceRes = instance.config.face['insightface']?.enabled ? await insightface.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : null;
|
||||
insightfaceRes = instance.config.face['insightface']?.enabled ? await insightface.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : null;
|
||||
instance.performance.mobilefacenet = Math.trunc(now() - timeStamp);
|
||||
}
|
||||
instance.analyze('End InsightFace:');
|
||||
|
@ -154,11 +154,11 @@ export const detectFace = async (instance: Human /* instance of human */, input:
|
|||
// run faceres, inherits face from blazeface
|
||||
instance.analyze('Start Description:');
|
||||
if (instance.config.async) {
|
||||
descRes = faceres.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length);
|
||||
descRes = faceres.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length);
|
||||
} else {
|
||||
instance.state = 'run:description';
|
||||
timeStamp = now();
|
||||
descRes = await faceres.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length);
|
||||
descRes = await faceres.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length);
|
||||
instance.performance.description = env.perfadd ? (instance.performance.description || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
||||
}
|
||||
instance.analyze('End Description:');
|
||||
|
@ -194,10 +194,20 @@ export const detectFace = async (instance: Human /* instance of human */, input:
|
|||
(descRes as DescRes).descriptor = insightfaceRes as number[];
|
||||
}
|
||||
|
||||
const irisSize = instance.config.face.iris?.enabled ? calculateCameraDistance(faces[i], input.shape[2]) : 0;
|
||||
// calculate iris distance
|
||||
// iris: array[ center, left, top, right, bottom]
|
||||
if (!instance.config.face.iris?.enabled) {
|
||||
// if (faces[i]?.annotations?.leftEyeIris) delete faces[i].annotations.leftEyeIris;
|
||||
// if (faces[i]?.annotations?.rightEyeIris) delete faces[i].annotations.rightEyeIris;
|
||||
}
|
||||
const irisSize = (faces[i]?.annotations?.leftEyeIris?.[0] && faces[i]?.annotations?.rightEyeIris?.[0]
|
||||
&& (faces[i].annotations.leftEyeIris.length > 0) && (faces[i].annotations.rightEyeIris.length > 0)
|
||||
&& (faces[i].annotations.leftEyeIris[0] !== null) && (faces[i].annotations.rightEyeIris[0] !== null))
|
||||
? Math.max(Math.abs(faces[i].annotations.leftEyeIris[3][0] - faces[i].annotations.leftEyeIris[1][0]), Math.abs(faces[i].annotations.rightEyeIris[4][1] - faces[i].annotations.rightEyeIris[2][1])) / input.shape[2]
|
||||
: 0; // note: average human iris size is 11.7mm
|
||||
|
||||
// optionally return tensor
|
||||
const tensor = instance.config.face.detector?.return ? tf.squeeze(faces[i].tensor as Tensor4D) : null;
|
||||
const tensor = instance.config.face.detector?.return ? tf.squeeze(faces[i].tensor) : null;
|
||||
// dispose original face tensor
|
||||
tf.dispose(faces[i].tensor);
|
||||
// delete temp face image
|
||||
|
@ -215,7 +225,7 @@ export const detectFace = async (instance: Human /* instance of human */, input:
|
|||
if (emotionRes) res.emotion = emotionRes as { score: number, emotion: Emotion }[];
|
||||
if (antispoofRes) res.real = antispoofRes as number;
|
||||
if (livenessRes) res.live = livenessRes as number;
|
||||
if (irisSize > 0) res.distance = irisSize;
|
||||
if (irisSize && irisSize !== 0) res.iris = Math.trunc(500 / irisSize / 11.7) / 100;
|
||||
if (rotation) res.rotation = rotation;
|
||||
if (tensor) res.tensor = tensor;
|
||||
faceRes.push(res);
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
// https://github.com/TropComplique/FaceBoxes-tensorflow
|
||||
|
||||
import * as tf from 'dist/tfjs.esm.js';
|
||||
import { log } from '../util/util';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import { loadModel } from '../tfjs/load';
|
||||
import type { GraphModel, Tensor } from '../tfjs/types';
|
||||
import type { Config } from '../config';
|
||||
|
@ -9,11 +9,13 @@ import type { Config } from '../config';
|
|||
type Box = [number, number, number, number];
|
||||
|
||||
export class FaceBoxes {
|
||||
enlarge: number;
|
||||
model: GraphModel;
|
||||
config: Config;
|
||||
inputSize: 0;
|
||||
|
||||
constructor(model, config: Config) {
|
||||
this.enlarge = 1.1;
|
||||
this.model = model;
|
||||
this.config = config;
|
||||
this.inputSize = model.inputs[0].shape ? model.inputs[0].shape[2] : 0;
|
||||
|
@ -21,23 +23,22 @@ export class FaceBoxes {
|
|||
|
||||
async estimateFaces(input, config) {
|
||||
if (config) this.config = config;
|
||||
const enlarge = this.config.face.detector?.minConfidence || 0.1;
|
||||
const results: { confidence: number, box: Box, boxRaw: Box, image: Tensor }[] = [];
|
||||
const resizeT = tf.image.resizeBilinear(input, [this.inputSize, this.inputSize]);
|
||||
const castT = resizeT.toInt();
|
||||
const [scoresT, boxesT, numT] = await this.model.executeAsync(castT) as Tensor[];
|
||||
const scores = await scoresT.data();
|
||||
const squeezeT = tf.squeeze(boxesT);
|
||||
const boxes = squeezeT.arraySync() as number[][];
|
||||
const boxes = squeezeT.arraySync();
|
||||
scoresT.dispose();
|
||||
boxesT.dispose();
|
||||
squeezeT.dispose();
|
||||
numT.dispose();
|
||||
castT.dispose();
|
||||
resizeT.dispose();
|
||||
for (let i = 0; i < boxes.length; i++) {
|
||||
for (const i in boxes) {
|
||||
if (scores[i] && scores[i] > (this.config.face.detector?.minConfidence || 0.1)) {
|
||||
const crop = [boxes[i][0] / enlarge, boxes[i][1] / enlarge, boxes[i][2] * enlarge, boxes[i][3] * enlarge];
|
||||
const crop = [boxes[i][0] / this.enlarge, boxes[i][1] / this.enlarge, boxes[i][2] * this.enlarge, boxes[i][3] * this.enlarge];
|
||||
const boxRaw: Box = [crop[1], crop[0], (crop[3]) - (crop[1]), (crop[2]) - (crop[0])];
|
||||
const box: Box = [
|
||||
parseInt((boxRaw[0] * input.shape[2]).toString()),
|
||||
|
|
|
@ -7,9 +7,9 @@
|
|||
* - Eye Iris Details: [**MediaPipe Iris**](https://drive.google.com/file/d/1bsWbokp9AklH2ANjCfmjqEzzxO1CNbMu/view)
|
||||
*/
|
||||
|
||||
import * as tf from 'dist/tfjs.esm.js';
|
||||
import { log, now } from '../util/util';
|
||||
import { loadModel } from '../tfjs/load';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import * as blazeface from './blazeface';
|
||||
import * as util from './facemeshutil';
|
||||
import * as coords from './facemeshcoords';
|
||||
|
@ -17,10 +17,11 @@ import * as iris from './iris';
|
|||
import * as attention from './attention';
|
||||
import { histogramEqualization } from '../image/enhance';
|
||||
import { env } from '../util/env';
|
||||
import type { GraphModel, Tensor, Tensor4D } from '../tfjs/types';
|
||||
import type { GraphModel, Tensor } from '../tfjs/types';
|
||||
import type { FaceResult, FaceLandmark, Point } from '../result';
|
||||
import type { Config } from '../config';
|
||||
import type { DetectBox } from './blazeface';
|
||||
|
||||
interface DetectBox { startPoint: Point, endPoint: Point, landmarks: Point[], confidence: number }
|
||||
|
||||
const cache = {
|
||||
boxes: [] as DetectBox[],
|
||||
|
@ -31,7 +32,8 @@ const cache = {
|
|||
let model: GraphModel | null = null;
|
||||
let inputSize = 0;
|
||||
|
||||
export async function predict(input: Tensor4D, config: Config): Promise<FaceResult[]> {
|
||||
export async function predict(input: Tensor, config: Config): Promise<FaceResult[]> {
|
||||
if (!model?.['executor']) return [];
|
||||
// reset cached boxes
|
||||
const skipTime = (config.face.detector?.skipTime || 0) > (now() - cache.timestamp);
|
||||
const skipFrame = cache.skipped < (config.face.detector?.skipFrames || 0);
|
||||
|
@ -59,7 +61,6 @@ export async function predict(input: Tensor4D, config: Config): Promise<FaceResu
|
|||
score: 0,
|
||||
boxScore: 0,
|
||||
faceScore: 0,
|
||||
size: [0, 0],
|
||||
// contoursRaw: [],
|
||||
// contours: [],
|
||||
annotations: {} as Record<FaceLandmark, Point[]>,
|
||||
|
@ -73,14 +74,18 @@ export async function predict(input: Tensor4D, config: Config): Promise<FaceResu
|
|||
if (equilized) face.tensor = equilized;
|
||||
}
|
||||
face.boxScore = Math.round(100 * box.confidence) / 100;
|
||||
if (!config.face.mesh?.enabled || !model?.['executor']) { // mesh not enabled or not loaded, return resuts from detector only
|
||||
if (!config.face.mesh?.enabled) { // mesh not enabled, return resuts from detector only
|
||||
face.box = util.clampBox(box, input);
|
||||
face.boxRaw = util.getRawBox(box, input);
|
||||
face.score = face.boxScore;
|
||||
face.size = box.size;
|
||||
face.mesh = box.landmarks;
|
||||
face.mesh = box.landmarks.map((pt) => [
|
||||
((box.startPoint[0] + box.endPoint[0])) / 2 + ((box.endPoint[0] + box.startPoint[0]) * pt[0] / blazeface.size()),
|
||||
((box.startPoint[1] + box.endPoint[1])) / 2 + ((box.endPoint[1] + box.startPoint[1]) * pt[1] / blazeface.size()),
|
||||
]);
|
||||
face.meshRaw = face.mesh.map((pt) => [pt[0] / (input.shape[2] || 0), pt[1] / (input.shape[1] || 0), (pt[2] || 0) / size]);
|
||||
for (const key of Object.keys(coords.blazeFaceLandmarks)) face.annotations[key] = [face.mesh[coords.blazeFaceLandmarks[key] as number]]; // add annotations
|
||||
for (const key of Object.keys(coords.blazeFaceLandmarks)) {
|
||||
face.annotations[key] = [face.mesh[coords.blazeFaceLandmarks[key] as number]]; // add annotations
|
||||
}
|
||||
} else if (!model) { // mesh enabled, but not loaded
|
||||
if (config.debug) log('face mesh detection requested, but model is not loaded');
|
||||
} else { // mesh enabled
|
||||
|
@ -95,12 +100,14 @@ export async function predict(input: Tensor4D, config: Config): Promise<FaceResu
|
|||
face.faceScore = Math.round(100 * faceConfidence[0]) / 100;
|
||||
if (face.faceScore < (config.face.detector?.minConfidence || 1)) { // low confidence in detected mesh
|
||||
box.confidence = face.faceScore; // reset confidence of cached box
|
||||
if (config.face.mesh['keepInvalid']) {
|
||||
if (config.face.mesh.keepInvalid) {
|
||||
face.box = util.clampBox(box, input);
|
||||
face.boxRaw = util.getRawBox(box, input);
|
||||
face.size = box.size;
|
||||
face.score = face.boxScore;
|
||||
face.mesh = box.landmarks;
|
||||
face.mesh = box.landmarks.map((pt) => [
|
||||
((box.startPoint[0] + box.endPoint[0])) / 2 + ((box.endPoint[0] + box.startPoint[0]) * pt[0] / blazeface.size()),
|
||||
((box.startPoint[1] + box.endPoint[1])) / 2 + ((box.endPoint[1] + box.startPoint[1]) * pt[1] / blazeface.size()),
|
||||
]);
|
||||
face.meshRaw = face.mesh.map((pt) => [pt[0] / (input.shape[2] || 1), pt[1] / (input.shape[1] || 1), (pt[2] || 0) / size]);
|
||||
for (const key of Object.keys(coords.blazeFaceLandmarks)) {
|
||||
face.annotations[key] = [face.mesh[coords.blazeFaceLandmarks[key] as number]]; // add annotations
|
||||
|
@ -114,21 +121,15 @@ export async function predict(input: Tensor4D, config: Config): Promise<FaceResu
|
|||
if (config.face.attention?.enabled) {
|
||||
rawCoords = await attention.augment(rawCoords, results); // augment iris results using attention model results
|
||||
} else if (config.face.iris?.enabled) {
|
||||
rawCoords = await iris.augmentIris(rawCoords, face.tensor, inputSize, config); // run iris model and augment results
|
||||
rawCoords = await iris.augmentIris(rawCoords, face.tensor, inputSize); // run iris model and augment results
|
||||
}
|
||||
face.mesh = util.transformRawCoords(rawCoords, box, angle, rotationMatrix, inputSize); // get processed mesh
|
||||
face.meshRaw = face.mesh.map((pt) => [pt[0] / (input.shape[2] || 0), pt[1] / (input.shape[1] || 0), (pt[2] || 0) / size]);
|
||||
for (const key of Object.keys(coords.meshAnnotations)) face.annotations[key] = coords.meshAnnotations[key].map((index) => face.mesh[index]); // add annotations
|
||||
face.score = face.faceScore;
|
||||
const calculatedBox = {
|
||||
...util.calculateFaceBox(face.mesh, box),
|
||||
confidence: box.confidence,
|
||||
landmarks: box.landmarks,
|
||||
size: box.size,
|
||||
};
|
||||
const calculatedBox = { ...util.calculateFaceBox(face.mesh, box), confidence: box.confidence, landmarks: box.landmarks };
|
||||
face.box = util.clampBox(calculatedBox, input);
|
||||
face.boxRaw = util.getRawBox(calculatedBox, input);
|
||||
face.size = calculatedBox.size;
|
||||
/*
|
||||
const contoursT = results.find((t) => t.shape[t.shape.length - 1] === 266) as Tensor;
|
||||
const contoursData = contoursT && await contoursT.data(); // 133 x 2d points
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
* See `facemesh.ts` for entry point
|
||||
*/
|
||||
|
||||
import * as tf from 'dist/tfjs.esm.js';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import * as coords from './facemeshcoords';
|
||||
import { constants } from '../tfjs/constants';
|
||||
import type { Box, Point } from '../result';
|
||||
|
@ -31,19 +31,10 @@ export const getRawBox = (box, input): Box => (box ? [
|
|||
(box.endPoint[1] - box.startPoint[1]) / (input.shape[1] || 0),
|
||||
] : [0, 0, 0, 0]);
|
||||
|
||||
export const scaleBoxCoordinates = (box, factor, anchor) => {
|
||||
export const scaleBoxCoordinates = (box, factor) => {
|
||||
const startPoint: Point = [box.startPoint[0] * factor[0], box.startPoint[1] * factor[1]];
|
||||
const endPoint: Point = [box.endPoint[0] * factor[0], box.endPoint[1] * factor[1]];
|
||||
// const centerPoint = [(startPoint[0] + endPoint[0]) / 2, (startPoint[1] + endPoint[1]) / 2];
|
||||
const landmarks = box.landmarks.map((pt) => [(pt[0] + anchor[0]) * factor[0], (pt[1] + anchor[1]) * factor[1]]);
|
||||
/**
|
||||
face.mesh = box.landmarks.map((pt) => [
|
||||
((box.startPoint[0] + box.endPoint[0]) / 2) + (pt[0] * input.shape[2] / blazeface.size()),
|
||||
((box.startPoint[1] + box.endPoint[1]) / 2) + (pt[1] * input.shape[1] / blazeface.size()),
|
||||
]);
|
||||
*/
|
||||
|
||||
return { startPoint, endPoint, landmarks, confidence: box.confidence };
|
||||
return { startPoint, endPoint, landmarks: box.landmarks, confidence: box.confidence };
|
||||
};
|
||||
|
||||
export const cutAndResize = (box, image, cropSize) => {
|
||||
|
@ -60,36 +51,20 @@ export const enlargeBox = (box, factor) => {
|
|||
const center = getBoxCenter(box);
|
||||
const size = getBoxSize(box);
|
||||
const halfSize: [number, number] = [factor * size[0] / 2, factor * size[1] / 2];
|
||||
return {
|
||||
startPoint: [center[0] - halfSize[0], center[1] - halfSize[1]] as Point,
|
||||
endPoint: [center[0] + halfSize[0], center[1] + halfSize[1]] as Point,
|
||||
landmarks: box.landmarks,
|
||||
confidence: box.confidence,
|
||||
size,
|
||||
};
|
||||
return { startPoint: [center[0] - halfSize[0], center[1] - halfSize[1]] as Point, endPoint: [center[0] + halfSize[0], center[1] + halfSize[1]] as Point, landmarks: box.landmarks, confidence: box.confidence };
|
||||
};
|
||||
|
||||
export const squarifyBox = (box) => {
|
||||
const centers = getBoxCenter(box);
|
||||
const size = getBoxSize(box);
|
||||
const halfSize = Math.max(...size) / 2;
|
||||
return {
|
||||
startPoint: [Math.round(centers[0] - halfSize), Math.round(centers[1] - halfSize)] as Point,
|
||||
endPoint: [Math.round(centers[0] + halfSize), Math.round(centers[1] + halfSize)] as Point,
|
||||
landmarks: box.landmarks,
|
||||
confidence: box.confidence,
|
||||
size: [Math.round(size[0]), Math.round(size[1])] as [number, number],
|
||||
};
|
||||
return { startPoint: [Math.round(centers[0] - halfSize), Math.round(centers[1] - halfSize)] as Point, endPoint: [Math.round(centers[0] + halfSize), Math.round(centers[1] + halfSize)] as Point, landmarks: box.landmarks, confidence: box.confidence };
|
||||
};
|
||||
|
||||
export const calculateLandmarksBoundingBox = (landmarks) => {
|
||||
const x = landmarks.map((d) => d[0]);
|
||||
const y = landmarks.map((d) => d[1]);
|
||||
return {
|
||||
startPoint: [Math.min(...x), Math.min(...y)] as Point,
|
||||
endPoint: [Math.max(...x), Math.max(...y)] as Point,
|
||||
landmarks,
|
||||
};
|
||||
return { startPoint: [Math.min(...x), Math.min(...y)] as Point, endPoint: [Math.max(...x), Math.max(...y)] as Point, landmarks };
|
||||
};
|
||||
|
||||
export const fixedRotationMatrix = [[1, 0, 0], [0, 1, 0], [0, 0, 1]];
|
||||
|
@ -200,7 +175,7 @@ export function correctFaceRotation(rotate, box, input, inputSize) {
|
|||
if (largeAngle) { // perform rotation only if angle is sufficiently high
|
||||
const center: Point = getBoxCenter(box);
|
||||
const centerRaw: Point = [center[0] / input.shape[2], center[1] / input.shape[1]];
|
||||
const rotated = tf.image.rotateWithOffset(input, angle, 0, [centerRaw[0], centerRaw[1]]);
|
||||
const rotated = tf.image.rotateWithOffset(input, angle, 0, centerRaw);
|
||||
rotationMatrix = buildRotationMatrix(-angle, center);
|
||||
face = cutAndResize(box, rotated, [inputSize, inputSize]);
|
||||
tf.dispose(rotated);
|
||||
|
|
|
@ -2,17 +2,17 @@
|
|||
* FaceRes model implementation
|
||||
*
|
||||
* Returns Age, Gender, Descriptor
|
||||
* Implements Face similarity function
|
||||
* Implements Face simmilarity function
|
||||
*
|
||||
* Based on: [**HSE-FaceRes**](https://github.com/HSE-asavchenko/HSE_FaceRec_tf)
|
||||
*/
|
||||
|
||||
import * as tf from 'dist/tfjs.esm.js';
|
||||
import { log, now } from '../util/util';
|
||||
import { env } from '../util/env';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import { loadModel } from '../tfjs/load';
|
||||
import { constants } from '../tfjs/constants';
|
||||
import type { Tensor, GraphModel, Tensor4D, Tensor1D } from '../tfjs/types';
|
||||
import type { Tensor, GraphModel } from '../tfjs/types';
|
||||
import type { Config } from '../config';
|
||||
import type { Gender, Race } from '../result';
|
||||
|
||||
|
@ -32,17 +32,10 @@ export async function load(config: Config): Promise<GraphModel> {
|
|||
return model;
|
||||
}
|
||||
|
||||
export function enhance(input, config: Config): Tensor {
|
||||
const tensor = (input.image || input.tensor || input) as Tensor4D; // input received from detector is already normalized to 0..1, input is also assumed to be straightened
|
||||
export function enhance(input): Tensor {
|
||||
const tensor = (input.image || input.tensor || input) as Tensor; // input received from detector is already normalized to 0..1, input is also assumed to be straightened
|
||||
if (!model?.inputs[0].shape) return tensor; // model has no shape so no point continuing
|
||||
let crop: Tensor;
|
||||
if (config.face.description?.['crop'] > 0) { // optional crop
|
||||
const cropval = config.face.description?.['crop'];
|
||||
const box = [[cropval, cropval, 1 - cropval, 1 - cropval]];
|
||||
crop = tf.image.cropAndResize(tensor, box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]);
|
||||
} else {
|
||||
crop = tf.image.resizeBilinear(tensor, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
|
||||
}
|
||||
const crop: Tensor = tf.image.resizeBilinear(tensor, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
|
||||
const norm: Tensor = tf.mul(crop, constants.tf255);
|
||||
tf.dispose(crop);
|
||||
return norm;
|
||||
|
@ -65,7 +58,7 @@ export function enhance(input, config: Config): Tensor {
|
|||
*/
|
||||
}
|
||||
|
||||
export async function predict(image: Tensor4D, config: Config, idx: number, count: number): Promise<FaceRes> {
|
||||
export async function predict(image: Tensor, config: Config, idx: number, count: number): Promise<FaceRes> {
|
||||
const obj: FaceRes = {
|
||||
age: 0 as number,
|
||||
gender: 'unknown' as Gender,
|
||||
|
@ -82,7 +75,7 @@ export async function predict(image: Tensor4D, config: Config, idx: number, coun
|
|||
skipped = 0;
|
||||
return new Promise(async (resolve) => {
|
||||
if (config.face.description?.enabled) {
|
||||
const enhanced = enhance(image, config);
|
||||
const enhanced = enhance(image);
|
||||
const resT = model?.execute(enhanced) as Tensor[];
|
||||
lastTime = now();
|
||||
tf.dispose(enhanced);
|
||||
|
@ -93,7 +86,7 @@ export async function predict(image: Tensor4D, config: Config, idx: number, coun
|
|||
obj.gender = gender[0] <= 0.5 ? 'female' : 'male';
|
||||
obj.genderScore = Math.min(0.99, confidence);
|
||||
}
|
||||
const argmax = tf.argMax(resT.find((t) => t.shape[1] === 100) as Tensor1D, 1);
|
||||
const argmax = tf.argMax(resT.find((t) => t.shape[1] === 100), 1);
|
||||
const ageIdx: number = (await argmax.data())[0];
|
||||
tf.dispose(argmax);
|
||||
const ageT = resT.find((t) => t.shape[1] === 100) as Tensor;
|
||||
|
|