Compare commits

..

520 Commits
main ... 2.11.1

Author SHA1 Message Date
Vladimir Mandic a6bd39f166 update release 2022-10-09 14:34:58 -04:00
Vladimir Mandic 6f713e6d05 2.11.1 2022-10-09 14:32:15 -04:00
Vladimir Mandic e1eaff4b31 update tfjs 2022-10-09 13:59:59 -04:00
Vladimir Mandic d362e040e9 add rvm segmentation model 2022-10-02 15:09:00 -04:00
Vladimir Mandic 2bb6d94b93 update wiki 2022-09-30 10:20:20 -04:00
Vladimir Mandic a4e7e065ee update typedefs and typedocs 2022-09-30 10:20:08 -04:00
Vladimir Mandic b2c89d3bb5 add human.webcam methods 2022-09-29 21:28:13 -04:00
Vladimir Mandic 339b34bcf3 update dependencies 2022-09-27 11:51:55 -04:00
Vladimir Mandic 77194de344 update faceid 2022-09-25 10:15:47 -04:00
Vladimir Mandic 2666181b3b update readme 2022-09-25 08:28:29 -04:00
Vladimir Mandic 62f642a245
Create FUNDING.yml 2022-09-25 08:17:26 -04:00
Vladimir Mandic c2bf0c43db update readme 2022-09-24 11:43:29 -04:00
Vladimir Mandic 2e5ca3b954 update demos 2022-09-21 15:31:17 -04:00
Vladimir Mandic 515274800b fix rotation interpolation 2022-09-21 13:51:49 -04:00
Vladimir Mandic edad35484f 2.10.3 2022-09-21 13:49:11 -04:00
Vladimir Mandic e7ca53ab6b update samples 2022-09-19 10:46:11 -04:00
Vladimir Mandic f098b5659c add human.video method 2022-09-17 17:19:51 -04:00
Vladimir Mandic ed5f093ff8 update readme 2022-09-14 11:39:12 -04:00
Vladimir Mandic 90a5489e34 update readme 2022-09-13 16:31:23 -04:00
Vladimir Mandic 552da54cb1 update todo 2022-09-12 09:39:39 -04:00
Vladimir Mandic 3699b147b8 update node resolver 2022-09-11 12:26:24 -04:00
Vladimir Mandic 91f3d7644c 2.10.2 2022-09-11 11:43:13 -04:00
George Bougakov a2dc24ecce Add Node.js ESM compatibility (#292) 2022-09-11 11:40:46 -04:00
Vladimir Mandic b89123b9b2 update 2022-09-08 08:02:26 -04:00
Vladimir Mandic 632b405ec5 update todo 2022-09-07 12:46:19 -04:00
Vladimir Mandic d39ce5e8a4 release 2022-09-07 12:42:49 -04:00
Vladimir Mandic 928ee524f3 2.10.1 2022-09-07 12:34:08 -04:00
Vladimir Mandic cd0d39a8e3 release candidate 2022-09-07 10:54:01 -04:00
Vladimir Mandic be328a3707 update 2022-09-06 13:38:39 -04:00
Vladimir Mandic c9120d5c67 add config flags 2022-09-06 10:28:54 -04:00
Vladimir Mandic ad7463c7d3 test update 2022-09-03 17:17:46 -04:00
Vladimir Mandic 4da18dda84 update settings 2022-09-03 07:15:34 -04:00
Vladimir Mandic 668fdec9a6 release preview 2022-09-03 07:13:08 -04:00
Vladimir Mandic 9cc61ae65f optimize startup sequence 2022-09-02 14:07:10 -04:00
Vladimir Mandic 94b7253642 update 2022-09-02 12:04:26 -04:00
Vladimir Mandic 3c324033ab reorder backend init code 2022-09-02 11:57:47 -04:00
Vladimir Mandic b1cb0684e4 test embedding 2022-09-02 11:11:51 -04:00
Vladimir Mandic e4b12ddff0 update backend 2022-09-02 10:22:24 -04:00
Vladimir Mandic 0c8567bb1f embedding test 2022-09-02 08:08:21 -04:00
Vladimir Mandic e3e2d3267c update tests 2022-09-01 09:27:29 -04:00
Vladimir Mandic 55846ab4e1 add browser iife tests 2022-08-31 18:30:47 -04:00
Vladimir Mandic 94207aad4f minor bug fixes and increased test coverage 2022-08-31 11:29:19 -04:00
Vladimir Mandic da3ad3033f extend release tests 2022-08-30 11:42:38 -04:00
Vladimir Mandic 1d00e154b0 add model load exception handling 2022-08-30 10:34:56 -04:00
Vladimir Mandic 22755355f7 add softwareKernels config option 2022-08-30 10:28:33 -04:00
Vladimir Mandic 79775267bc update typescript 2022-08-28 13:12:27 -04:00
Vladimir Mandic 497ca46006 update todo 2022-08-24 08:18:34 -04:00
Vladimir Mandic 04766d9693 update tfjs 2022-08-24 08:10:36 -04:00
Vladimir Mandic 32556d5954 update todo 2022-08-21 15:24:19 -04:00
Vladimir Mandic 5b4a693429 expand type safety 2022-08-21 15:23:03 -04:00
Vladimir Mandic 369026c2b1 full eslint rule rewrite 2022-08-21 13:34:51 -04:00
Vladimir Mandic bcb26f3a2e update demo notes 2022-08-20 09:38:08 -04:00
Vladimir Mandic 202c808f5b 2.9.4 2022-08-20 09:29:22 -04:00
Vladimir Mandic 3db85dd8cc add browser test 2022-08-19 09:15:29 -04:00
Vladimir Mandic 7f688743fd update wiki 2022-08-15 11:48:55 -04:00
Vladimir Mandic a8c613c903 add tensorflow library detection 2022-08-15 11:40:15 -04:00
Vladimir Mandic cc1b1ae5e6 fix wasm detection 2022-08-15 11:29:56 -04:00
Vladimir Mandic 58d4390735 update build pipeline 2022-08-12 09:51:45 -04:00
Vladimir Mandic 8ec586e2b8 enumerate additional models 2022-08-12 09:13:48 -04:00
Vladimir Mandic 9a80bbc39d release refresh 2022-08-10 13:50:33 -04:00
Vladimir Mandic a83f07a972 2.9.3 2022-08-10 13:45:19 -04:00
Vladimir Mandic 11bc5c63b4 rehault testing framework 2022-08-10 13:44:38 -04:00
Vladimir Mandic 60b0669206 release refresh 2022-08-08 15:15:57 -04:00
Vladimir Mandic 6c4c3dacca update pending todo notes 2022-08-08 15:10:34 -04:00
Vladimir Mandic 24a03b04ab update wiki 2022-08-08 15:09:39 -04:00
Vladimir Mandic 65cda56e3e add insightface 2022-08-08 15:09:26 -04:00
Vladimir Mandic bfe63d6955 2.9.2 2022-08-08 13:38:16 -04:00
Vladimir Mandic fd9f8b4e95 update profiling methods 2022-08-04 09:15:13 -04:00
Vladimir Mandic c425cfe6e0 update build platform 2022-07-29 09:24:04 -04:00
Vladimir Mandic 7be582acae update packages definitions 2022-07-26 07:36:57 -04:00
Vladimir Mandic 7b8b843771 release rebuild 2022-07-25 08:33:07 -04:00
Vladimir Mandic 8a62a3a87d 2.9.1 2022-07-25 08:30:38 -04:00
Vladimir Mandic 4d0b9fff98 update tfjs 2022-07-25 08:30:34 -04:00
Vladimir Mandic 0ea4edf39c update tfjs 2022-07-23 14:45:40 -04:00
Vladimir Mandic 4ce7fa22a0 full rebuild 2022-07-21 13:06:13 -04:00
Vladimir Mandic 789f6770e7 release cleanup 2022-07-21 12:53:10 -04:00
Vladimir Mandic 3f54aa8c3e tflite experiments 2022-07-19 17:49:58 -04:00
Vladimir Mandic e51a58211d update wiki 2022-07-18 08:22:42 -04:00
Vladimir Mandic 5f03ae3053 add load monitor test 2022-07-18 08:22:19 -04:00
Vladimir Mandic f543c82ec9 beta for upcoming major release 2022-07-17 21:31:08 -04:00
Vladimir Mandic c625892e18 swtich to release version of tfjs 2022-07-16 09:08:58 -04:00
Vladimir Mandic 0abc7ce124 update method signatures 2022-07-14 10:41:52 -04:00
Vladimir Mandic b341aadd0f update demo 2022-07-14 10:02:23 -04:00
Vladimir Mandic 8480468869 update typedocs 2022-07-14 09:36:08 -04:00
Vladimir Mandic c08395433e placeholder for face contours 2022-07-13 12:08:23 -04:00
Vladimir Mandic ccfd5ef49e improve face compare in main demo 2022-07-13 09:26:00 -04:00
Vladimir Mandic bd110857f8 add webview support 2022-07-13 08:53:37 -04:00
Vladimir Mandic 5c375117d7 update dependencies 2022-07-13 08:23:18 -04:00
FaeronGaming ad4b23aa34 fix(gear): ensure gear.modelPath is used for loadModel() 2022-07-13 08:22:28 -04:00
Vladimir Mandic 50d117c22e npm default install should be prod only 2022-07-07 12:11:05 +02:00
Vladimir Mandic 88f3b6eeda fix npm v7 compatibility 2022-07-05 05:03:31 -04:00
Vladimir Mandic bc009e165e add getModelStats method 2022-07-02 03:39:40 -04:00
Vladimir Mandic b201435660 rebuild 2022-06-21 13:26:58 -04:00
Vladimir Mandic 22bcd417fd update 2022-06-10 08:47:22 -04:00
Vladimir Mandic c39e13e60a release build 2022-06-08 08:52:19 -04:00
Vladimir Mandic 71729677be 2.8.1 2022-06-08 08:44:52 -04:00
Vladimir Mandic f23aab9866 webgpu and wasm optimizations 2022-06-02 10:39:53 -04:00
Vladimir Mandic 53f5edbe26 add faceboxes prototype 2022-05-30 08:58:54 -04:00
Vladimir Mandic 2b0b7bbb7b updated facemesh and attention models 2022-05-29 21:12:18 -04:00
Vladimir Mandic 694f08708a full rebuild 2022-05-24 07:28:51 -04:00
Vladimir Mandic 2a60b0787f 2.7.4 2022-05-24 07:28:43 -04:00
Vladimir Mandic 35c2d2452b 2.7.3 2022-05-24 07:19:38 -04:00
Vladimir Mandic 465176e2dd add face.mesh.keepInvalid config flag 2022-05-22 08:50:51 -04:00
Vladimir Mandic cc18f16b2e initial work for new facemesh model 2022-05-18 17:42:40 -04:00
Vladimir Mandic 2596b82580 update changelog 2022-05-18 08:35:06 -04:00
Vladimir Mandic 4ce4a92b6f update tfjs 2022-05-18 08:33:33 -04:00
Vladimir Mandic 6b21638372 2.7.2 2022-05-12 16:47:41 -04:00
Vladimir Mandic 8ab1256e11 fix demo when used with video files 2022-05-12 16:47:21 -04:00
Vladimir Mandic 5275475c26 major release 2022-05-09 08:16:00 -04:00
Vladimir Mandic eae60a2693 2.7.1 2022-05-09 08:14:00 -04:00
Vladimir Mandic 08cf2be7cf update wiki 2022-04-23 13:02:00 -04:00
Vladimir Mandic 0a8f194e02 update todo 2022-04-21 09:58:13 -04:00
Vladimir Mandic ea4a99a1a1 support 4k input 2022-04-21 09:39:40 -04:00
Vladimir Mandic 95f278b72e update tfjs 2022-04-21 09:38:36 -04:00
Vladimir Mandic e239386fd8 add attention draw methods 2022-04-18 12:26:05 -04:00
Vladimir Mandic 365ce35f2d fix coloring function 2022-04-18 11:29:45 -04:00
Vladimir Mandic 90b38664ec enable precompile as part of warmup 2022-04-15 07:54:27 -04:00
Vladimir Mandic e8156f4adb prepare release beta 2022-04-14 11:55:49 -04:00
Vladimir Mandic b155cae9bb change default face crop 2022-04-14 11:47:08 -04:00
Vladimir Mandic 55aa98817c update wiki 2022-04-11 11:55:30 -04:00
Vladimir Mandic e7871bc79c face attention model is available in human-models 2022-04-11 11:51:04 -04:00
Vladimir Mandic daa89efd39 beta release 2.7 2022-04-11 11:46:35 -04:00
Vladimir Mandic d347babe11 refactor draw methods 2022-04-11 11:46:00 -04:00
Vladimir Mandic 346e722839 implement face attention model 2022-04-11 11:45:24 -04:00
Vladimir Mandic 0cb473dc0d add electronjs demo 2022-04-10 11:00:41 -04:00
Vladimir Mandic a3d890119c rebuild 2022-04-10 10:13:13 -04:00
Vladimir Mandic 7835126870 rebuild 2022-04-05 12:25:41 -04:00
Vladimir Mandic 0c0fff6a99 update tfjs 2022-04-01 12:38:05 -04:00
Vladimir Mandic 13a78e477b update 2022-04-01 09:13:32 -04:00
Vladimir Mandic 1125a79a8c 2.6.5 2022-04-01 09:12:13 -04:00
Vladimir Mandic 4b3edc5f78 bundle offscreencanvas types 2022-04-01 09:12:04 -04:00
Vladimir Mandic 0086a9169d prototype precompile pass 2022-03-19 11:02:30 -04:00
Vladimir Mandic 738ee9a7e5 fix changelog generation 2022-03-16 11:38:57 -04:00
Vladimir Mandic f350f01fb0 fix indexdb config check 2022-03-16 11:19:56 -04:00
Vladimir Mandic f7be4a4c1f update typescript and tensorflow 2022-03-07 13:24:06 -05:00
Vladimir Mandic 9da67575fe 2.6.4 2022-02-27 07:25:45 -05:00
Vladimir Mandic 9452cf097c fix types typo 2022-02-17 08:15:57 -05:00
Vladimir Mandic 3bc8a5a1b2 refresh 2022-02-14 07:53:28 -05:00
Vladimir Mandic 51246d43f8 add config option wasmPlatformFetch 2022-02-10 15:35:32 -05:00
Vladimir Mandic 7ce0aaf8b4 2.6.3 2022-02-10 15:32:53 -05:00
Vladimir Mandic 3df1c8c8b8 rebuild 2022-02-10 12:27:21 -05:00
Vladimir Mandic c83b8d9485 update toolkit 2022-02-07 10:12:59 -05:00
Vladimir Mandic c20a8102c4 2.6.2 2022-02-07 09:47:17 -05:00
Vladimir Mandic d1ae5dc761 update todo 2022-01-20 08:24:23 -05:00
Vladimir Mandic b586c0d998 release rebuild 2022-01-20 08:17:06 -05:00
Vladimir Mandic bfcc6b63e8 2.6.1 2022-01-20 07:54:56 -05:00
Vladimir Mandic d96d275061 implement model caching using indexdb 2022-01-17 11:03:21 -05:00
Vladimir Mandic 1c2032fd89 prototype global fetch handler 2022-01-16 09:49:55 -05:00
Vladimir Mandic 4471881930 update samples 2022-01-15 09:18:14 -05:00
Vladimir Mandic 086f30c3f5 update samples 2022-01-15 09:11:04 -05:00
Vladimir Mandic 1e666d7a27 update samples with images under cc licence only 2022-01-14 16:10:32 -05:00
Vladimir Mandic ea03e67601 fix face box and hand tracking when in front of face 2022-01-14 09:46:16 -05:00
Vladimir Mandic ca1b609eb8 2.5.8 2022-01-14 09:42:57 -05:00
Vladimir Mandic beafe9bfef update 2022-01-08 12:43:44 -05:00
Vladimir Mandic 60e13020ea update wiki 2022-01-05 11:49:10 -05:00
Vladimir Mandic 9a6ad885f4 update wiki 2022-01-05 09:55:07 -05:00
Vladimir Mandic 5a3bbd0d4a update 2022-01-05 08:34:31 -05:00
Vladimir Mandic ac1522386b update demos 2022-01-01 08:13:04 -05:00
Vladimir Mandic 3a3e2eb067 update blazepose 2021-12-31 13:58:03 -05:00
Vladimir Mandic 1059c9c3de update dependencies 2021-12-30 12:39:29 -05:00
Vladimir Mandic 77f7d6cce2 update hand annotations 2021-12-30 12:14:09 -05:00
Vladimir Mandic a8aec4e610 update blazepose 2021-12-29 12:37:46 -05:00
Vladimir Mandic 4fd18fa5fa update 2021-12-28 11:39:54 -05:00
Vladimir Mandic 4c8a12512a update demos 2021-12-28 09:40:32 -05:00
Vladimir Mandic d2ce0331e8 fix samples 2021-12-28 07:03:05 -05:00
libowen.eric 3788c82ffc fix(src): typo 2021-12-28 06:59:16 -05:00
Vladimir Mandic daffddd057 change on how face box is calculated 2021-12-27 10:59:56 -05:00
Vladimir Mandic bbe68c3c55 2.5.7 2021-12-27 09:29:15 -05:00
Vladimir Mandic 48a0c36d9e update 2021-12-22 10:04:41 -05:00
Vladimir Mandic ff3d9df22e fix posenet 2021-12-18 12:24:01 -05:00
Vladimir Mandic 56f6c54ee5 release refresh 2021-12-15 09:30:26 -05:00
Vladimir Mandic 7aff9ad28d 2.5.6 2021-12-15 09:26:40 -05:00
Vladimir Mandic 31de3e2d86 strong type for string enums 2021-12-15 09:26:32 -05:00
Vladimir Mandic 1afe7f7777 update 2021-12-14 15:45:43 -05:00
Vladimir Mandic abac67389b rebuild 2021-12-13 21:38:55 -05:00
Vladimir Mandic 03b2a9f4ea update tfjs 2021-12-09 14:44:26 -05:00
Vladimir Mandic 48bb49dd8f fix node detection in electron environment 2021-12-07 17:02:33 -05:00
Vladimir Mandic ab1f44ff30 update 2021-12-01 08:27:05 -05:00
Vladimir Mandic 5a7553aa14 2.5.5 2021-12-01 08:21:55 -05:00
Vladimir Mandic b7d1dca089 update readme 2021-11-26 12:14:40 -05:00
Vladimir Mandic 9d798cb073 added human-motion 2021-11-26 12:12:46 -05:00
Vladimir Mandic 268f2602d1 add offscreencanvas typedefs 2021-11-26 11:55:52 -05:00
Vladimir Mandic 5cf64c1803 update blazepose and extend hand annotations 2021-11-24 16:17:03 -05:00
Vladimir Mandic 690a94f5ea release preview 2021-11-23 10:40:40 -05:00
Vladimir Mandic ec41b72710 fix face box scaling on detection 2021-11-23 08:36:32 -05:00
Vladimir Mandic 40429cedda cleanup 2021-11-22 14:44:25 -05:00
Vladimir Mandic 39c3f4ea1d 2.5.4 2021-11-22 14:33:46 -05:00
Vladimir Mandic c53be42d11 prototype blazepose detector 2021-11-22 14:33:40 -05:00
Vladimir Mandic 1162f6e06b minor fixes 2021-11-21 16:55:17 -05:00
Vladimir Mandic 3132f298c3 add body 3d interpolation 2021-11-19 18:30:57 -05:00
Vladimir Mandic 96e5399991 edit blazepose keypoints 2021-11-19 16:11:03 -05:00
Vladimir Mandic 5da11225b6 new build process 2021-11-18 10:10:06 -05:00
Vladimir Mandic 6e60ec8d22 2.5.3 2021-11-18 10:06:07 -05:00
Vladimir Mandic 1a53bb14de update typescript 2021-11-17 16:50:21 -05:00
Vladimir Mandic 15fb4981c9 create typedef rollup 2021-11-17 15:45:49 -05:00
Vladimir Mandic a3359460e2 optimize centernet 2021-11-16 20:16:49 -05:00
Vladimir Mandic fb81d557e4 cache frequent tf constants 2021-11-16 18:31:07 -05:00
Vladimir Mandic 8cc5c938f4 add extra face rotation prior to mesh 2021-11-16 13:07:44 -05:00
Vladimir Mandic 0420a5d144 release 2.5.2 2021-11-15 09:26:38 -05:00
Vladimir Mandic 6d580a0b4f improve error handling 2021-11-14 11:22:52 -05:00
Vladimir Mandic 4f641380ce 2.5.2 2021-11-14 10:43:00 -05:00
Vladimir Mandic 39fa2396a1 fix mobilefacenet module 2021-11-13 17:26:19 -05:00
Vladimir Mandic 4b526ad53c fix gear and ssrnet modules 2021-11-13 12:23:32 -05:00
Vladimir Mandic 76830567f7 fix for face crop when mesh is disabled 2021-11-12 15:17:08 -05:00
Vladimir Mandic 61a7de3c0f implement optional face masking 2021-11-12 15:07:23 -05:00
Vladimir Mandic 9d026ea950 update todo 2021-11-11 17:02:32 -05:00
Vladimir Mandic 8cc810bb69 add similarity score range normalization 2021-11-11 17:01:10 -05:00
Vladimir Mandic 6b69f38d55 add faceid demo 2021-11-11 11:30:55 -05:00
Vladimir Mandic a019176a15 documentation overhaul 2021-11-10 12:21:45 -05:00
Vladimir Mandic d6acf90013 auto tensor shape and channels handling 2021-11-09 19:39:18 -05:00
Vladimir Mandic 485c10ab0a disable use of path2d in node 2021-11-09 18:10:54 -05:00
Vladimir Mandic 34c52df4d6 update wiki 2021-11-09 14:45:45 -05:00
Vladimir Mandic f8a51deae4 add liveness module and facerecognition demo 2021-11-09 14:37:50 -05:00
Vladimir Mandic cbc1fddbf7 initial version of facerecognition demo 2021-11-09 10:39:23 -05:00
Vladimir Mandic 13a71485cd rebuild 2021-11-08 16:41:30 -05:00
Vladimir Mandic a0654dc231 add type defs when working with relative path imports 2021-11-08 16:36:20 -05:00
Vladimir Mandic 419dd219a6 disable humangl backend if webgl 1.0 is detected 2021-11-08 11:35:35 -05:00
Vladimir Mandic 7343d2e8ec add additional hand gestures 2021-11-08 07:36:26 -05:00
Vladimir Mandic 391812d251 2.5.1 2021-11-08 06:25:07 -05:00
Vladimir Mandic e0d66fffd9 update automated tests 2021-11-07 10:10:23 -05:00
Vladimir Mandic 588c9c08f0 new human.compare api 2021-11-07 10:03:33 -05:00
Vladimir Mandic 2b1f5a2b3f added links to release notes 2021-11-07 08:14:14 -05:00
Vladimir Mandic d937953f4d update readme 2021-11-06 10:26:04 -04:00
Vladimir Mandic 3200460055 new frame change detection algorithm 2021-11-06 10:21:51 -04:00
Vladimir Mandic 018e61f2bb add histogram equalization 2021-11-05 15:35:53 -04:00
Vladimir Mandic 6c75c26a51 add histogram equalization 2021-11-05 15:09:54 -04:00
Vladimir Mandic 065349c085 implement wasm missing ops 2021-11-05 13:36:53 -04:00
Vladimir Mandic 52769ef2e9 performance and memory optimizations 2021-11-05 11:28:06 -04:00
Vladimir Mandic 91440273f2 fix react compatibility issues 2021-11-04 06:34:13 -04:00
Vladimir Mandic d30a571a9c improve box rescaling for all modules 2021-11-03 16:32:07 -04:00
Vladimir Mandic 8634ec7fd9 improve precision using wasm backend 2021-11-02 11:42:15 -04:00
Vladimir Mandic cff58d6384 refactor predict with execute 2021-11-02 11:07:11 -04:00
Vladimir Mandic e45e7ebd55 update tests 2021-10-31 09:58:48 -04:00
Vladimir Mandic eeadb396fa update hand landmarks model 2021-10-31 09:06:33 -04:00
Vladimir Mandic 8ba083651f patch tfjs type defs 2021-10-31 08:03:42 -04:00
Vladimir Mandic a109f3b051 start 2.5 major version 2021-10-30 12:21:54 -04:00
Vladimir Mandic bcb6e34a1e build and docs cleanup 2021-10-29 15:55:20 -04:00
Vladimir Mandic 2c2688023e fix firefox bug 2021-10-28 17:25:50 -04:00
Vladimir Mandic 792930fc91 update tfjs 2021-10-28 14:40:31 -04:00
Vladimir Mandic 5bf74a53ad 2.4.3 2021-10-28 13:59:57 -04:00
Vladimir Mandic 74cf335523 additional human.performance counters 2021-10-27 09:45:38 -04:00
Vladimir Mandic ff48422c13 2.4.2 2021-10-27 09:44:17 -04:00
Vladimir Mandic 8892886734 add ts demo 2021-10-27 08:16:06 -04:00
Vladimir Mandic 4357fbc0ba switch from es2018 to es2020 for main build 2021-10-26 19:38:23 -04:00
Vladimir Mandic e2866b2bd6 switch to custom tfjs for demos 2021-10-26 15:08:05 -04:00
Vladimir Mandic 3b449dbfe3 update todo 2021-10-25 13:45:04 -04:00
Vladimir Mandic 38b581373e release 2.4 2021-10-25 13:29:29 -04:00
Vladimir Mandic f0f8a8ddff 2.4.1 2021-10-25 13:09:41 -04:00
Vladimir Mandic 3d4c12fec3 refactoring plus jsdoc comments 2021-10-25 13:09:00 -04:00
Vladimir Mandic 1f03270e76 increase face similarity match resolution 2021-10-25 09:44:13 -04:00
Vladimir Mandic 90ce714446 update todo 2021-10-23 09:42:41 -04:00
Vladimir Mandic c38773bf26 time based caching 2021-10-23 09:38:52 -04:00
Vladimir Mandic 267c87536a turn on minification 2021-10-22 20:14:13 -04:00
Vladimir Mandic 3d81a19a66 update todo 2021-10-22 16:11:02 -04:00
Vladimir Mandic b517dc7f1b initial work on skipTime 2021-10-22 16:09:52 -04:00
Vladimir Mandic 1534b58235 added generic types 2021-10-22 14:46:19 -04:00
Vladimir Mandic 129365e52f enhanced typing exports 2021-10-22 13:49:40 -04:00
Vladimir Mandic b2b07e4b36 update tfjs to 3.10.0 2021-10-22 09:48:27 -04:00
Vladimir Mandic fa0a93e9b2 add optional autodetected custom wasm path 2021-10-21 12:42:08 -04:00
Vladimir Mandic 7e1b2840a2 2.3.6 2021-10-21 11:31:46 -04:00
Vladimir Mandic dc5e46adde fix for human.draw labels and typedefs 2021-10-21 10:54:51 -04:00
Vladimir Mandic 01316a4c2d refactor human.env to a class type 2021-10-21 10:26:44 -04:00
Vladimir Mandic 7f87d2633a add human.custom.esm using custom tfjs build 2021-10-20 17:49:00 -04:00
Vladimir Mandic 7a05cf3743 update handtrack boxes and refactor handpose 2021-10-20 09:10:57 -04:00
Vladimir Mandic a741ad95cb update demos 2021-10-19 11:28:59 -04:00
Vladimir Mandic 3b9fc2e8a7 2.3.5 2021-10-19 11:25:05 -04:00
Jimmy Nyström eaeb02592e Removed direct usage of performance.now
Switched to using the utility function that works in both nodejs and browser environments
2021-10-19 09:58:14 -04:00
Vladimir Mandic ff156bc413 update 2021-10-19 08:09:46 -04:00
Vladimir Mandic 8bd987a7b3 2.3.4 2021-10-19 08:05:19 -04:00
Vladimir Mandic 6291d779ef update dependencies and refresh release 2021-10-19 07:58:51 -04:00
Vladimir Mandic 131cc2609a minor blazepose optimizations 2021-10-15 09:34:40 -04:00
Vladimir Mandic 209506611a compress samples 2021-10-15 07:25:51 -04:00
Vladimir Mandic 2463b16e85 remove posenet from default package 2021-10-15 06:49:41 -04:00
Vladimir Mandic d046513c92 enhanced movenet postprocessing 2021-10-14 12:26:59 -04:00
Vladimir Mandic 84df7f885f update handtrack skip algorithm 2021-10-13 14:49:41 -04:00
Vladimir Mandic 515fbf76e7 use transferrable buffer for worker messages 2021-10-13 11:53:54 -04:00
Vladimir Mandic d47557cfdd update todo 2021-10-13 11:02:44 -04:00
Vladimir Mandic 203fcfa904 add optional anti-spoofing module 2021-10-13 10:56:56 -04:00
Vladimir Mandic ff894a1ee7 update todo 2021-10-13 08:36:20 -04:00
Vladimir Mandic ab453f69df add node-match advanced example using worker thread pool 2021-10-13 08:06:11 -04:00
Vladimir Mandic df53d373e1 package updates 2021-10-12 14:17:33 -04:00
Vladimir Mandic 2930255757 optimize image preprocessing 2021-10-12 11:39:18 -04:00
Vladimir Mandic c430d0d99d update imagefx 2021-10-12 09:48:00 -04:00
Vladimir Mandic 67afa5952f set webgpu optimized flags 2021-10-11 09:22:39 -04:00
Vladimir Mandic ce37a0f716 major precision improvements to movenet and handtrack 2021-10-10 22:29:20 -04:00
Vladimir Mandic 90ec92bbe2 image processing fixes 2021-10-10 17:52:43 -04:00
Vladimir Mandic 1b7ee44659 redesign body and hand caching and interpolation 2021-10-08 18:39:04 -04:00
Vladimir Mandic a7b06eafb2 demo default config cleanup 2021-10-08 07:48:48 -04:00
Vladimir Mandic fd7f7c2195 improve gaze and face angle visualizations in draw 2021-10-07 10:33:10 -04:00
Vladimir Mandic d4322fc0f9 release 2.3.1 2021-10-06 11:33:58 -04:00
Vladimir Mandic b2d5b8322d 2.3.1 2021-10-06 11:30:44 -04:00
Vladimir Mandic 02afd6c54f workaround for chrome offscreencanvas bug 2021-10-06 11:30:34 -04:00
Vladimir Mandic 12644a3e06 fix backend conflict in webworker 2021-10-04 17:03:36 -04:00
Vladimir Mandic 8a50618e9a add blazepose v2 and add annotations to body results 2021-10-04 16:29:15 -04:00
Vladimir Mandic ab3cda4f51 fix backend order initialization 2021-10-03 08:12:26 -04:00
Vladimir Mandic a76ebdaf00 added docker notes 2021-10-02 11:41:51 -04:00
Vladimir Mandic 429df55ac5 update dependencies 2021-10-02 07:46:07 -04:00
Vladimir Mandic ab69d5414a updated hint rules 2021-10-01 12:07:14 -04:00
Vladimir Mandic 8b2225d737 updated facematch demo 2021-10-01 11:40:57 -04:00
Vladimir Mandic 03fd6378c4 update wiki 2021-09-30 14:29:14 -04:00
Vladimir Mandic 8579766d5f breaking change: new similarity and match methods 2021-09-30 14:28:16 -04:00
Vladimir Mandic 23b937e5e2 update facematch demo 2021-09-29 08:02:23 -04:00
Vladimir Mandic 44a5c30e0d update movenet-multipose and samples 2021-09-28 17:07:34 -04:00
Vladimir Mandic c1af3888f9 tweaked default values 2021-09-28 13:48:29 -04:00
Vladimir Mandic eed3d67928 update todo 2021-09-28 12:02:47 -04:00
Vladimir Mandic 61c8ab9b2c enable handtrack as default model 2021-09-28 12:02:17 -04:00
Vladimir Mandic 49cbbb387e redesign face processing 2021-09-28 12:01:48 -04:00
Vladimir Mandic b6f7d683e1 update types and dependencies 2021-09-27 14:39:54 -04:00
Vladimir Mandic 0643fb50df refactoring 2021-09-27 13:58:13 -04:00
Vladimir Mandic ae0e7533eb define app specific types 2021-09-27 09:19:43 -04:00
Vladimir Mandic 3f8db964ba implement box caching for movenet 2021-09-27 08:53:41 -04:00
Vladimir Mandic 8864b5f7c1 update todo 2021-09-26 10:09:30 -04:00
Vladimir Mandic ab93b7ffb2 update todo 2021-09-26 10:03:39 -04:00
Vladimir Mandic a250db9042 update wiki 2021-09-26 06:53:06 -04:00
Vladimir Mandic b5f307f49b autodetect number of bodies and hands 2021-09-25 19:14:03 -04:00
Vladimir Mandic 6f0c0e77b8 upload new samples 2021-09-25 16:31:44 -04:00
Vladimir Mandic 16f993c266 new samples gallery and major code folder restructure 2021-09-25 11:51:15 -04:00
Vladimir Mandic e3f477a50d update todo 2021-09-24 09:57:03 -04:00
Vladimir Mandic 79822d3e01 new release 2021-09-24 09:55:27 -04:00
Vladimir Mandic a8f37111ad 2.2.3 2021-09-24 09:46:35 -04:00
Vladimir Mandic a2b52b3f52 optimize model loading 2021-09-23 14:09:41 -04:00
Vladimir Mandic 43d3c6ce80 support segmentation for nodejs 2021-09-22 19:27:12 -04:00
Vladimir Mandic 7d636c8522 update todo and docs 2021-09-22 16:00:43 -04:00
Vladimir Mandic 182136fbfb redo segmentation and handtracking 2021-09-22 15:16:14 -04:00
Vladimir Mandic 70181b53e9 prototype handtracking 2021-09-21 16:48:16 -04:00
Vladimir Mandic cb9af0a48c automated browser tests 2021-09-20 22:06:49 -04:00
Vladimir Mandic 0d681e4908 support for dynamic backend switching 2021-09-20 21:59:49 -04:00
Vladimir Mandic e0c43098dd initial automated browser tests 2021-09-20 17:17:13 -04:00
Vladimir Mandic 2695b215df enhanced automated test coverage 2021-09-20 09:42:34 -04:00
Vladimir Mandic ddf5d5f0f2 more automated tests 2021-09-19 14:20:22 -04:00
Vladimir Mandic d7cdda2e1b added configuration validation 2021-09-19 14:07:53 -04:00
Vladimir Mandic f59250bde6 updated build platform and typedoc theme 2021-09-18 19:09:02 -04:00
Vladimir Mandic 6ced256a42 prevent validation failed on some model combinations 2021-09-17 14:30:57 -04:00
Vladimir Mandic ab53f1cde7 webgl exception handling 2021-09-17 14:07:44 -04:00
Vladimir Mandic bee6f3b651 2.2.2 2021-09-17 14:07:32 -04:00
Vladimir Mandic 06ab47f941 experimental webgl status monitoring 2021-09-17 11:23:00 -04:00
Vladimir Mandic a261b7bd99 major release 2021-09-16 10:49:42 -04:00
Vladimir Mandic ca56407d49 2.2.1 2021-09-16 10:46:24 -04:00
Vladimir Mandic a50c3e2103 add vr model demo 2021-09-16 10:15:20 -04:00
Vladimir Mandic b0b0702208 update readme 2021-09-15 19:12:05 -04:00
Vladimir Mandic ee0b4af220 all tests passing 2021-09-15 19:02:51 -04:00
Vladimir Mandic 1e8a932a2b redefine draw helpers interface 2021-09-15 18:58:54 -04:00
Vladimir Mandic 4febbc1d3e add simple webcam and webrtc demo 2021-09-15 13:59:18 -04:00
Vladimir Mandic 35583d3a04 added visual results browser to demo 2021-09-15 11:15:38 -04:00
Vladimir Mandic 9ace39fdab reorganize tfjs bundle 2021-09-14 22:07:13 -04:00
Vladimir Mandic 155c7c2a00 experimental custom tfjs bundle - disabled 2021-09-14 20:07:08 -04:00
Vladimir Mandic 71b08aa8f8 add platform and backend capabilities detection 2021-09-13 23:24:04 -04:00
Vladimir Mandic c94e5f86c2 update changelog and todo 2021-09-13 13:54:42 -04:00
Vladimir Mandic 6569b62f70 update dependencies 2021-09-13 13:34:41 -04:00
Vladimir Mandic b39f0d55bb enhanced automated tests 2021-09-13 13:30:46 -04:00
Vladimir Mandic 36150c0730 enable canvas patching for nodejs 2021-09-13 13:30:08 -04:00
Vladimir Mandic 9d4955cb8a full ts strict typechecks 2021-09-13 13:29:14 -04:00
Vladimir Mandic 6796a9a1ba fix multiple memory leaks 2021-09-13 13:28:35 -04:00
Vladimir Mandic edc719cc9e modularize human class and add model validation 2021-09-12 18:37:06 -04:00
Vladimir Mandic 0374ecdcc3 update todo 2021-09-12 13:18:33 -04:00
Vladimir Mandic fd0df97d94 add dynamic kernel op detection 2021-09-12 13:17:33 -04:00
Vladimir Mandic b492ce5c40 added human.env diagnostic class 2021-09-12 12:42:17 -04:00
Vladimir Mandic c03b7b3f4c minor typos 2021-09-12 08:49:56 -04:00
Vladimir Mandic 259036e8ca release candidate 2021-09-12 00:30:11 -04:00
Vladimir Mandic f423bfe9e3 parametrize face config 2021-09-12 00:05:06 -04:00
Vladimir Mandic 6c8faf7de2 mark all config items as optional 2021-09-11 23:59:41 -04:00
Vladimir Mandic eef2b43852 redefine config and result interfaces 2021-09-11 23:54:35 -04:00
Vladimir Mandic 7b284f63f6 fix usge of string enums 2021-09-11 23:08:18 -04:00
Vladimir Mandic 4d2ca7dd2d start using partial definitions 2021-09-11 16:11:00 -04:00
Vladimir Mandic cc864891bf implement event emitters 2021-09-11 16:00:16 -04:00
Vladimir Mandic f4d73e46b9 fix iife loader 2021-09-11 11:42:48 -04:00
Vladimir Mandic b36cd175b0 update sourcemaps 2021-09-11 11:17:13 -04:00
Vladimir Mandic fbd04f8555 simplify dependencies 2021-09-11 10:29:31 -04:00
Vladimir Mandic f42f64427a change build process 2021-09-10 21:21:29 -04:00
Vladimir Mandic 589511f1f3 updated wiki 2021-09-06 08:17:48 -04:00
Vladimir Mandic 2c05cfea24 update lint exceptions 2021-09-05 17:05:46 -04:00
Vladimir Mandic 14164dc603 update wiki 2021-09-05 16:48:57 -04:00
Vladimir Mandic 43074a660c add benchmark info 2021-09-05 16:42:11 -04:00
Vladimir Mandic e6f004dbf9 update hand detector processing algorithm 2021-09-02 08:50:16 -04:00
Vladimir Mandic 76cdfa88a6 update 2021-08-31 18:24:30 -04:00
Vladimir Mandic 90a21de40e simplify canvas handling in nodejs 2021-08-31 18:22:16 -04:00
Vladimir Mandic 2be4c00d3a full rebuild 2021-08-31 14:50:16 -04:00
Vladimir Mandic 6f6577c3b0 2.1.5 2021-08-31 14:49:07 -04:00
Vladimir Mandic eaaa71df07 added demo node-canvas 2021-08-31 14:48:55 -04:00
Vladimir Mandic de89111e70 update node-fetch 2021-08-31 13:29:29 -04:00
Vladimir Mandic 9cd8a433fe dynamically generate default wasm path 2021-08-31 13:00:06 -04:00
Vladimir Mandic 21d4af9235 updated wiki 2021-08-23 08:41:50 -04:00
Vladimir Mandic e5821563a1 implement finger poses in hand detection and gestures 2021-08-20 20:43:03 -04:00
Vladimir Mandic 49c547d698 implemented movenet-multipose model 2021-08-20 09:05:07 -04:00
Vladimir Mandic 6764b0f5be update todo 2021-08-19 17:28:07 -04:00
Vladimir Mandic 6b6a991cf2 2.1.4 2021-08-19 16:17:03 -04:00
Vladimir Mandic 5d871f1dd9 add static type definitions to main class 2021-08-19 16:16:56 -04:00
Vladimir Mandic 58fc62e646 fix interpolation overflow 2021-08-18 14:28:31 -04:00
Vladimir Mandic f460eca131 rebuild full 2021-08-17 18:49:49 -04:00
Vladimir Mandic a559d05378 update angle calculations 2021-08-17 18:46:50 -04:00
Vladimir Mandic cf7ba1ef30 improve face box caching 2021-08-17 09:15:47 -04:00
Vladimir Mandic cf64b3259e strict type checks 2021-08-17 08:51:17 -04:00
Vladimir Mandic 4d3a112985 add webgu checks 2021-08-15 08:09:40 -04:00
Vladimir Mandic 0b0e63f183 update todo 2021-08-14 18:02:39 -04:00
Vladimir Mandic f40eb5d380 experimental webgpu support 2021-08-14 18:00:26 -04:00
Vladimir Mandic d0d0aba8fa add experimental webgu demo 2021-08-14 13:39:26 -04:00
Vladimir Mandic 00d7efa724 add backend initialization checks 2021-08-14 11:17:51 -04:00
Vladimir Mandic 4fd37dfd56 complete async work 2021-08-14 11:16:26 -04:00
Vladimir Mandic af5ab60495 update node-webcam 2021-08-13 18:47:37 -04:00
Vladimir Mandic 6e06695780 list detect cameras 2021-08-13 10:34:09 -04:00
Vladimir Mandic af28fff323 switch to async data reads 2021-08-12 09:31:16 -04:00
Vladimir Mandic f877c2515b 2.1.3 2021-08-12 09:29:48 -04:00
Vladimir Mandic 0fd7683a56 fix centernet & update blazeface 2021-08-11 18:59:02 -04:00
Vladimir Mandic cafb7732ab update todo 2021-08-09 10:46:03 -04:00
Vladimir Mandic 7dad79933e update model list 2021-08-06 08:50:50 -04:00
Vladimir Mandic c6ec8dec76 minor update 2021-08-06 08:29:41 -04:00
Vladimir Mandic 958f898d4b minor update 2021-08-05 10:38:04 -04:00
Vladimir Mandic 5c529d5889 update build process to remove warnings 2021-07-31 20:42:28 -04:00
Vladimir Mandic cd8861b6bb update todo 2021-07-31 07:43:50 -04:00
Vladimir Mandic b1b9cc2954 update typedoc links 2021-07-31 07:29:37 -04:00
Vladimir Mandic 0868aeb350 replace movenet with lightning-v4 2021-07-30 07:18:54 -04:00
Vladimir Mandic 173fa35b7c update eslint rules 2021-07-30 06:49:41 -04:00
Vladimir Mandic db1502829a enable webgl uniform support for faster warmup 2021-07-29 16:35:16 -04:00
Vladimir Mandic c3b95f452e 2.1.2 2021-07-29 16:34:03 -04:00
Vladimir Mandic a3dea5a01f fix unregistered ops in tfjs 2021-07-29 16:06:03 -04:00
Vladimir Mandic 1dccff181b update build 2021-07-29 12:50:06 -04:00
Vladimir Mandic 45d4095bff fix typo 2021-07-29 11:26:19 -04:00
Vladimir Mandic 80299c8600 updated wiki 2021-07-29 11:06:34 -04:00
Vladimir Mandic e3470dc2f1 rebuild new release 2021-07-29 11:03:21 -04:00
Vladimir Mandic 190c3a60b4 2.1.1 2021-07-29 11:02:02 -04:00
Vladimir Mandic 80526ee02a updated gesture types 2021-07-29 11:01:50 -04:00
Vladimir Mandic 3e26e91340 update tfjs and typescript 2021-07-29 09:53:13 -04:00
Vladimir Mandic 46f2426621 updated minimum version of nodejs to v14 2021-07-29 09:41:17 -04:00
Vladimir Mandic 1de80c9e36 add note on manually disping tensor 2021-06-18 13:39:20 -04:00
Vladimir Mandic b506f67e91 update todo 2021-06-18 09:19:34 -04:00
Vladimir Mandic b408c47847 modularize model loading 2021-06-18 09:16:21 -04:00
Vladimir Mandic 20f9ab4519 update typedoc 2021-06-18 07:25:33 -04:00
Vladimir Mandic 70510e9a2f 2.0.3 2021-06-18 07:20:33 -04:00
Vladimir Mandic 07412090d8 update 2021-06-16 15:47:01 -04:00
Vladimir Mandic 65c4d2581f update 2021-06-16 15:46:05 -04:00
Vladimir Mandic a558dd8870 fix demo paths 2021-06-16 15:40:35 -04:00
Vladimir Mandic 66615cac76 added multithreaded demo 2021-06-14 10:23:06 -04:00
Vladimir Mandic 1491561ad2 2.0.2 2021-06-14 10:20:49 -04:00
Vladimir Mandic 1d7227b02a reorganize demos 2021-06-14 08:16:10 -04:00
Vladimir Mandic f0ed4d2cd5 fix centernet box width & height 2021-06-11 16:12:24 -04:00
Vladimir Mandic e205509a39 update todo 2021-06-09 07:27:19 -04:00
Vladimir Mandic c3ac335771 update 2021-06-09 07:19:03 -04:00
Vladimir Mandic 53f960f821 update demo menu documentation 2021-06-09 07:17:54 -04:00
Vladimir Mandic 7944953ab7 update 2021-06-08 07:37:15 -04:00
Vladimir Mandic 6d91ea3f53 add body segmentation sample 2021-06-08 07:29:08 -04:00
Vladimir Mandic 8d3aacea46 add release notes 2021-06-08 07:09:37 -04:00
Vladimir Mandic b08afe09c0 release 2.0 2021-06-08 07:06:16 -04:00
Vladimir Mandic f2abd3c069 2.0.1 2021-06-08 07:02:11 -04:00
Vladimir Mandic 496d09aab7 add video drag&drop capability 2021-06-07 08:38:16 -04:00
Vladimir Mandic 69330e6335 update readme 2021-06-06 20:49:48 -04:00
Vladimir Mandic c7cd2f8a59 update packages 2021-06-06 20:47:59 -04:00
Vladimir Mandic af3699c769 modularize build platform 2021-06-06 20:34:29 -04:00
Vladimir Mandic 5b81690dc9 custom build tfjs from sources 2021-06-06 19:00:34 -04:00
Vladimir Mandic 7a76f9c065 update wasm to tfjs 3.7.0 2021-06-06 12:58:06 -04:00
Vladimir Mandic af31b5b9ef update defaults 2021-06-05 20:06:36 -04:00
Vladimir Mandic 6d728b4e5c modularize build platform 2021-06-05 17:51:46 -04:00
Vladimir Mandic 22c849593e enable body segmentation and background replacement in demo 2021-06-05 16:13:41 -04:00
Vladimir Mandic 60b7143da4 minor git corruption 2021-06-05 15:23:17 -04:00
Vladimir Mandic 3e73fd8742 update 2021-06-05 15:10:28 -04:00
Vladimir Mandic ac7d1c3e12 update 2021-06-05 13:02:01 -04:00
Vladimir Mandic 4be7fc9294 unified build 2021-06-05 12:59:11 -04:00
Vladimir Mandic 5d42b85084 enable body segmentation and background replacement 2021-06-05 11:54:49 -04:00
Vladimir Mandic 13d82a176a work on body segmentation 2021-06-04 20:22:05 -04:00
Vladimir Mandic f6e91fb47d added experimental body segmentation module 2021-06-04 13:52:40 -04:00
Vladimir Mandic f85356843d add meet and selfie models 2021-06-04 13:51:01 -04:00
Vladimir Mandic 47e2f78324 update for tfjs 3.7.0 2021-06-04 09:20:59 -04:00
Vladimir Mandic b4a9934f92 update 2021-06-04 07:03:34 -04:00
Vladimir Mandic 835fd8d184 update gaze strength calculations 2021-06-03 09:53:11 -04:00
Vladimir Mandic e4cdd3ffca update build with automatic linter 2021-06-03 09:41:53 -04:00
Vladimir Mandic 852f22d786 add live hints to demo 2021-06-02 17:29:50 -04:00
Vladimir Mandic c100d03405 switch worker from module to iife importscripts 2021-06-02 16:46:07 -04:00
Vladimir Mandic 7b19e5d246 release candidate 2021-06-02 13:39:02 -04:00
Vladimir Mandic 2cc6f380ba update wiki 2021-06-02 13:35:59 -04:00
Vladimir Mandic 9d7b7dcdce update tests and demos 2021-06-02 13:35:33 -04:00
Vladimir Mandic 32bb8eba58 added samples to git 2021-06-02 12:44:12 -04:00
Vladimir Mandic 375c6f65fc implemented drag & drop for image processing 2021-06-02 12:43:43 -04:00
Vladimir Mandic 92f225d3df release candidate 2021-06-01 08:59:09 -04:00
Vladimir Mandic 47c5c6c822 breaking changes to results.face output properties 2021-06-01 07:37:17 -04:00
Vladimir Mandic c4df466a9a breaking changes to results.object output properties 2021-06-01 07:07:01 -04:00
Vladimir Mandic 616aad6add breaking changes to results.hand output properties 2021-06-01 07:01:59 -04:00
Vladimir Mandic 00f5bd6fde breaking changes to results.body output properties 2021-06-01 06:55:40 -04:00
Vladimir Mandic dc2b68532f update wiki 2021-05-31 10:40:24 -04:00
Vladimir Mandic 23276d522c implemented human.next global interpolation method 2021-05-31 10:40:07 -04:00
Vladimir Mandic f218f96749 update wiki 2021-05-30 23:22:21 -04:00
Vladimir Mandic 0b62a4f9a3 finished draw buffering and smoothing and enabled by default 2021-05-30 23:21:48 -04:00
Vladimir Mandic d29a47e5a2 update wiki 2021-05-30 18:46:23 -04:00
Vladimir Mandic 25c45b20c6 update typedoc definitions 2021-05-30 18:45:39 -04:00
Vladimir Mandic 52f1fccb28 update pwa scope 2021-05-30 18:00:51 -04:00
Vladimir Mandic 168ad14fda implemented service worker 2021-05-30 17:56:40 -04:00
Vladimir Mandic 168fd473c6 update todo 2021-05-30 12:05:27 -04:00
Vladimir Mandic fa33b660af quantized centernet 2021-05-30 12:03:52 -04:00
Vladimir Mandic 60f6f75d35 release candidate 2021-05-30 12:03:34 -04:00
Vladimir Mandic 55d2848336 added usage restrictions 2021-05-30 09:51:23 -04:00
Vladimir Mandic 5f08806e8f update security policy 2021-05-30 09:41:24 -04:00
Vladimir Mandic e1ba7ef942 quantize handdetect model 2021-05-29 18:29:57 -04:00
Vladimir Mandic dafaca5b3d update todo list 2021-05-29 09:24:09 -04:00
Vladimir Mandic 30df35ec7c added experimental movenet-lightning and removed blazepose from default dist 2021-05-29 09:20:01 -04:00
Vladimir Mandic 0c4574a5a3 update 2021-05-28 15:54:29 -04:00
Vladimir Mandic b9a8d27d9c added experimental face.rotation.gaze 2021-05-28 15:53:51 -04:00
Vladimir Mandic ef22c94d62 fix and optimize for mobile platform 2021-05-28 10:43:48 -04:00
Vladimir Mandic 96ef4a4805 lock typescript to 4.2 due to typedoc incompatibility with 4.3 2021-05-27 16:07:02 -04:00
Vladimir Mandic 53a8d12d7b 1.9.4 2021-05-27 16:05:20 -04:00
Vladimir Mandic 9f0621ba99 fix demo facecompare 2021-05-26 08:52:31 -04:00
Vladimir Mandic 59a1fb3855 webhint and lighthouse optimizations 2021-05-26 08:47:31 -04:00
Vladimir Mandic 92e611e735 update 2021-05-26 07:59:52 -04:00
Vladimir Mandic 3c1111a831 add camera startup diag messages 2021-05-26 07:57:51 -04:00
Vladimir Mandic 2903adbf37 update all box calculations 2021-05-25 08:58:20 -04:00
Vladimir Mandic 7b4a90cfb5 implemented unified result.persons that combines face, body and hands for each person 2021-05-24 11:10:13 -04:00
Vladimir Mandic 02ba8016e2 update iris distance docs 2021-05-24 07:18:03 -04:00
Vladimir Mandic 33e8a92cd8 update iris distance calculations 2021-05-24 07:16:38 -04:00
Vladimir Mandic 19b17acbf5 added experimental results interpolation for smooth draw operations 2021-05-23 13:55:33 -04:00
Vladimir Mandic dbd18e0344 1.9.3 2021-05-23 13:54:44 -04:00
Vladimir Mandic 17ae986665 use green weighted for input diff calculation 2021-05-23 13:54:22 -04:00
Vladimir Mandic c70e9817ef implement experimental drawOptions.bufferedOutput and bufferedFactor 2021-05-23 13:52:49 -04:00
Vladimir Mandic 76f624f78f use explicit tensor interface 2021-05-22 21:54:18 -04:00
Vladimir Mandic 2307767161 add tfjs types and remove all instances of any 2021-05-22 21:47:59 -04:00
Vladimir Mandic 24ec73e037 enhance strong typing 2021-05-22 14:53:51 -04:00
Vladimir Mandic f2ac34f4a9 rebuild all for release 2021-05-22 13:17:07 -04:00
Vladimir Mandic 01e7855578 1.9.2 2021-05-22 13:15:11 -04:00
Vladimir Mandic 15925dd75d add id and boxraw on missing objects 2021-05-22 12:41:29 -04:00
Vladimir Mandic f0420232c6 restructure results strong typing 2021-05-22 12:33:19 -04:00
Vladimir Mandic 21d37c0f31 update dependencies 2021-05-21 06:54:02 -04:00
Vladimir Mandic c71475af90 1.9.1 2021-05-21 06:51:31 -04:00
Vladimir Mandic 5566c145ea caching improvements 2021-05-20 19:14:07 -04:00
Vladimir Mandic 6b736ebb0d add experimental mb3-centernet object detection 2021-05-19 08:27:28 -04:00
291 changed files with 164756 additions and 69246 deletions

View File

@ -1,8 +1,9 @@
{ {
"$schema": "https://developer.microsoft.com/json-schemas/api-extractor/v7/api-extractor.schema.json", "$schema": "https://developer.microsoft.com/json-schemas/api-extractor/v7/api-extractor.schema.json",
"mainEntryPointFilePath": "types/lib/src/human.d.ts", "mainEntryPointFilePath": "types/lib/src/human.d.ts",
"bundledPackages": ["@tensorflow/tfjs-core", "@tensorflow/tfjs-converter", "@tensorflow/tfjs-data", "@tensorflow/tfjs-layers"],
"compiler": { "compiler": {
"skipLibCheck": true "skipLibCheck": false
}, },
"newlineKind": "lf", "newlineKind": "lf",
"dtsRollup": { "dtsRollup": {

View File

@ -12,10 +12,10 @@
"clean": ["clean"] "clean": ["clean"]
}, },
"clean": { "clean": {
"locations": ["dist/*", "types/*", "typedoc/*"] "locations": ["dist/*", "types/lib/*", "typedoc/*"]
}, },
"lint": { "lint": {
"locations": [ "**/*.json", "src/**/*.ts", "test/**/*.js", "demo/**/*.js", "**/*.md" ], "locations": [ "*.json", "src/**/*.ts", "test/**/*.js", "demo/**/*.js" ],
"rules": { } "rules": { }
}, },
"changelog": { "changelog": {
@ -24,8 +24,8 @@
"serve": { "serve": {
"sslKey": "node_modules/@vladmandic/build/cert/https.key", "sslKey": "node_modules/@vladmandic/build/cert/https.key",
"sslCrt": "node_modules/@vladmandic/build/cert/https.crt", "sslCrt": "node_modules/@vladmandic/build/cert/https.crt",
"httpPort": 8000, "httpPort": 10030,
"httpsPort": 8001, "httpsPort": 10031,
"documentRoot": ".", "documentRoot": ".",
"defaultFolder": "demo", "defaultFolder": "demo",
"defaultFile": "index.html" "defaultFile": "index.html"
@ -39,13 +39,6 @@
"banner": { "js": "/*\n Human\n homepage: <https://github.com/vladmandic/human>\n author: <https://github.com/vladmandic>'\n*/\n" } "banner": { "js": "/*\n Human\n homepage: <https://github.com/vladmandic/human>\n author: <https://github.com/vladmandic>'\n*/\n" }
}, },
"targets": [ "targets": [
{
"name": "tfjs/browser/version",
"platform": "browser",
"format": "esm",
"input": "tfjs/tf-version.ts",
"output": "dist/tfjs.version.js"
},
{ {
"name": "tfjs/nodejs/cpu", "name": "tfjs/nodejs/cpu",
"platform": "node", "platform": "node",
@ -84,7 +77,6 @@
"format": "cjs", "format": "cjs",
"input": "tfjs/tf-node-wasm.ts", "input": "tfjs/tf-node-wasm.ts",
"output": "dist/tfjs.esm.js", "output": "dist/tfjs.esm.js",
"minify": false,
"external": ["@tensorflow"] "external": ["@tensorflow"]
}, },
{ {
@ -95,6 +87,13 @@
"output": "dist/human.node-wasm.js", "output": "dist/human.node-wasm.js",
"external": ["@tensorflow"] "external": ["@tensorflow"]
}, },
{
"name": "tfjs/browser/version",
"platform": "browser",
"format": "esm",
"input": "tfjs/tf-version.ts",
"output": "dist/tfjs.version.js"
},
{ {
"name": "tfjs/browser/esm/nobundle", "name": "tfjs/browser/esm/nobundle",
"platform": "browser", "platform": "browser",
@ -113,13 +112,13 @@
"external": ["@tensorflow"] "external": ["@tensorflow"]
}, },
{ {
"name": "tfjs/browser/esm/bundle", "name": "tfjs/browser/esm/custom",
"platform": "browser", "platform": "browser",
"format": "esm", "format": "esm",
"input": "tfjs/tf-browser.ts", "input": "tfjs/tf-custom.ts",
"output": "dist/tfjs.esm.js", "output": "dist/tfjs.esm.js",
"sourcemap": false, "sourcemap": false,
"minify": true "minify": false
}, },
{ {
"name": "human/browser/iife/bundle", "name": "human/browser/iife/bundle",
@ -160,15 +159,6 @@
"output": "demo/faceid/index.js", "output": "demo/faceid/index.js",
"sourcemap": true, "sourcemap": true,
"external": ["*/human.esm.js"] "external": ["*/human.esm.js"]
},
{
"name": "demo/tracker",
"platform": "browser",
"format": "esm",
"input": "demo/tracker/index.ts",
"output": "demo/tracker/index.js",
"sourcemap": true,
"external": ["*/human.esm.js"]
} }
] ]
}, },

View File

@ -1,28 +1,29 @@
{ {
"globals": { "globals": {},
},
"rules": {
"@typescript-eslint/no-require-imports":"off"
},
"overrides": [ "overrides": [
{ {
"files": ["**/*.ts"], "files": ["**/*.ts"],
"parser": "@typescript-eslint/parser",
"parserOptions": { "ecmaVersion": "latest", "project": ["./tsconfig.json"] },
"plugins": ["@typescript-eslint"],
"env": { "env": {
"browser": true, "browser": true,
"commonjs": false, "commonjs": false,
"node": false, "node": false,
"es2021": true "es2021": true
}, },
"parser": "@typescript-eslint/parser",
"parserOptions": {
"ecmaVersion": "latest",
"project": ["./tsconfig.json"]
},
"plugins": [
"@typescript-eslint"
],
"extends": [ "extends": [
"airbnb-base", "airbnb-base",
"eslint:recommended", "eslint:recommended",
"plugin:@typescript-eslint/strict", "plugin:@typescript-eslint/eslint-recommended",
"plugin:@typescript-eslint/recommended", "plugin:@typescript-eslint/recommended",
"plugin:@typescript-eslint/recommended-requiring-type-checking", "plugin:@typescript-eslint/recommended-requiring-type-checking",
"plugin:@typescript-eslint/eslint-recommended", "plugin:@typescript-eslint/strict",
"plugin:import/recommended", "plugin:import/recommended",
"plugin:promise/recommended" "plugin:promise/recommended"
], ],
@ -30,12 +31,9 @@
"@typescript-eslint/ban-ts-comment":"off", "@typescript-eslint/ban-ts-comment":"off",
"@typescript-eslint/dot-notation":"off", "@typescript-eslint/dot-notation":"off",
"@typescript-eslint/no-empty-interface":"off", "@typescript-eslint/no-empty-interface":"off",
"@typescript-eslint/no-empty-object-type":"off",
"@typescript-eslint/no-inferrable-types":"off", "@typescript-eslint/no-inferrable-types":"off",
"@typescript-eslint/no-misused-promises":"off", "@typescript-eslint/no-misused-promises":"off",
"@typescript-eslint/no-require-imports":"off",
"@typescript-eslint/no-unnecessary-condition":"off", "@typescript-eslint/no-unnecessary-condition":"off",
"@typescript-eslint/no-unnecessary-type-assertion":"off",
"@typescript-eslint/no-unsafe-argument":"off", "@typescript-eslint/no-unsafe-argument":"off",
"@typescript-eslint/no-unsafe-assignment":"off", "@typescript-eslint/no-unsafe-assignment":"off",
"@typescript-eslint/no-unsafe-call":"off", "@typescript-eslint/no-unsafe-call":"off",
@ -74,15 +72,20 @@
}, },
{ {
"files": ["**/*.d.ts"], "files": ["**/*.d.ts"],
"parser": "@typescript-eslint/parser",
"parserOptions": { "ecmaVersion": "latest", "project": ["./tsconfig.json"] },
"plugins": ["@typescript-eslint"],
"env": { "env": {
"browser": true, "browser": true,
"commonjs": false, "commonjs": false,
"node": false, "node": false,
"es2021": true "es2021": true
}, },
"parser": "@typescript-eslint/parser",
"parserOptions": {
"ecmaVersion": "latest",
"project": ["./tsconfig.json"]
},
"plugins": [
"@typescript-eslint"
],
"extends": [ "extends": [
"airbnb-base", "airbnb-base",
"eslint:recommended", "eslint:recommended",
@ -117,14 +120,18 @@
}, },
{ {
"files": ["**/*.js"], "files": ["**/*.js"],
"parserOptions": { "sourceType": "module", "ecmaVersion": "latest" },
"plugins": [],
"env": { "env": {
"browser": true, "browser": true,
"commonjs": true, "commonjs": true,
"node": true, "node": true,
"es2021": true "es2021": true
}, },
"parserOptions": {
"sourceType": "module",
"ecmaVersion": "latest"
},
"plugins": [
],
"extends": [ "extends": [
"airbnb-base", "airbnb-base",
"eslint:recommended", "eslint:recommended",
@ -154,40 +161,42 @@
}, },
{ {
"files": ["**/*.json"], "files": ["**/*.json"],
"parserOptions": { "ecmaVersion": "latest" },
"plugins": ["json"],
"env": { "env": {
"browser": false, "browser": false,
"commonjs": false, "commonjs": false,
"node": false, "node": false,
"es2021": false "es2021": false
}, },
"extends": [] "parserOptions": {
"ecmaVersion": "latest"
},
"plugins": [
"json"
],
"extends": [
"plugin:json/recommended"
]
}, },
{ {
"files": ["**/*.md"], "files": ["**/*.html"],
"plugins": ["markdown"], "env": {
"processor": "markdown/markdown", "browser": true,
"commonjs": false,
"node": false,
"es2021": false
},
"parserOptions": {
"sourceType": "module",
"ecmaVersion": "latest"
},
"parser": "@html-eslint/parser",
"extends": ["plugin:@html-eslint/recommended"],
"plugins": [
"html", "@html-eslint"
],
"rules": { "rules": {
"no-undef":"off" "@html-eslint/element-newline":"off",
} "@html-eslint/indent": ["error", 2]
},
{
"files": ["**/*.md/*.js"],
"rules": {
"@typescript-eslint/no-unused-vars":"off",
"@typescript-eslint/triple-slash-reference":"off",
"import/newline-after-import":"off",
"import/no-unresolved":"off",
"no-console":"off",
"no-global-assign":"off",
"no-multi-spaces":"off",
"no-restricted-globals":"off",
"no-undef":"off",
"no-unused-vars":"off",
"node/no-missing-import":"off",
"node/no-missing-require":"off",
"promise/catch-or-return":"off"
} }
} }
], ],
@ -198,7 +207,6 @@
"demo/helpers/*.js", "demo/helpers/*.js",
"demo/typescript/*.js", "demo/typescript/*.js",
"demo/faceid/*.js", "demo/faceid/*.js",
"demo/tracker/*.js",
"typedoc" "typedoc"
] ]
} }

5
.gitignore vendored
View File

@ -1,9 +1,4 @@
node_modules/ node_modules/
types/lib types/lib
pnpm-lock.yaml pnpm-lock.yaml
package-lock.json
*.swp *.swp
samples/**/*.mp4
samples/**/*.webm
temp
tmp

View File

@ -1,7 +1,6 @@
{ {
"MD012": false, "MD012": false,
"MD013": false, "MD013": false,
"MD029": false,
"MD033": false, "MD033": false,
"MD036": false, "MD036": false,
"MD041": false "MD041": false

3
.npmrc
View File

@ -1,5 +1,4 @@
force=true force=true
omit=dev production=true
legacy-peer-deps=true legacy-peer-deps=true
strict-peer-dependencies=false strict-peer-dependencies=false
node-options='--no-deprecation'

View File

@ -1,6 +1,6 @@
# @vladmandic/human # @vladmandic/human
Version: **3.3.6** Version: **2.11.1**
Description: **Human: AI-powered 3D Face Detection & Rotation Tracking, Face Description & Recognition, Body Pose Tracking, 3D Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction, Gesture Recognition** Description: **Human: AI-powered 3D Face Detection & Rotation Tracking, Face Description & Recognition, Body Pose Tracking, 3D Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction, Gesture Recognition**
Author: **Vladimir Mandic <mandic00@live.com>** Author: **Vladimir Mandic <mandic00@live.com>**
@ -9,132 +9,11 @@
## Changelog ## Changelog
### **3.3.6** 2025/08/26 mandic00@live.com ### **HEAD -> main** 2022/10/09 mandic00@live.com
### **origin/main** 2025/02/05 mandic00@live.com ### **origin/main** 2022/10/02 mandic00@live.com
- full rebuild
### **3.3.5** 2025/02/05 mandic00@live.com
- rebuild
- add human.draw.tensor method
### **3.3.4** 2024/10/24 mandic00@live.com
### **3.3.3** 2024/10/14 mandic00@live.com
- add loaded property to model stats and mark models not loaded correctly.
- release build
### **3.3.2** 2024/09/11 mandic00@live.com
- full rebuild
### **3.3.1** 2024/09/11 mandic00@live.com
- add config.face.detector.square option
- human 3.3 alpha test run
- human 3.3 alpha with new build environment
- release rebuild
- fix flazeface tensor scale and update build platform
### **3.2.2** 2024/04/17 mandic00@live.com
### **release: 3.2.1** 2024/02/15 mandic00@live.com
### **3.2.1** 2024/02/15 mandic00@live.com
### **3.2.0** 2023/12/06 mandic00@live.com
- set browser false when navigator object is empty
- https://github.com/vladmandic/human/issues/402
### **release: 3.1.2** 2023/09/18 mandic00@live.com
- full rebuild
### **3.1.2** 2023/09/18 mandic00@live.com
- major toolkit upgrade
- full rebuild
- major toolkit upgrade
### **3.1.1** 2023/08/05 mandic00@live.com
- fixes plus tfjs upgrade for new release
### **3.0.7** 2023/06/12 mandic00@live.com
- full rebuild
- fix memory leak in histogramequalization
- initial work on tracker
### **3.0.6** 2023/03/21 mandic00@live.com
- add optional crop to multiple models
- fix movenet-multipose
- add electron detection
- fix gender-ssrnet-imdb
- add movenet-multipose workaround
- rebuild and publish
- add face.detector.minsize configurable setting
- add affectnet
### **3.0.5** 2023/02/02 mandic00@live.com
- add gear-e models
- detect react-native
- redo blazeface annotations
### **3.0.4** 2023/01/29 mandic00@live.com
- make naviator calls safe
- fix facedetector-only configs
### **3.0.3** 2023/01/07 mandic00@live.com
- full rebuild
### **3.0.2** 2023/01/06 mandic00@live.com
- default face.rotation disabled
### **release: 3.0.1** 2022/11/22 mandic00@live.com
### **3.0.1** 2022/11/22 mandic00@live.com
- support dynamic loads
- polish demos
- add facedetect demo and fix model async load
- enforce markdown linting
- cleanup git history
- default empty result
- refactor draw and models namespaces
- refactor distance
- add basic anthropometry
- added webcam id specification
- include external typedefs
- prepare external typedefs
- rebuild all
- include project files for types
- architectural improvements
- refresh dependencies
- add named exports
- add draw label templates
- reduce dev dependencies
- tensor rank strong typechecks
- rebuild dependencies
### **2.11.1** 2022/10/09 mandic00@live.com
- add rvm segmentation model
- add human.webcam methods - add human.webcam methods
- create funding.yml - create funding.yml
- fix rotation interpolation - fix rotation interpolation
@ -146,7 +25,9 @@
### **2.10.2** 2022/09/11 mandic00@live.com ### **2.10.2** 2022/09/11 mandic00@live.com
- add node.js esm compatibility (#292) - add node.js esm compatibility (#292)
- release
### **release: 2.10.1** 2022/09/07 mandic00@live.com
### **2.10.1** 2022/09/07 mandic00@live.com ### **2.10.1** 2022/09/07 mandic00@live.com
@ -182,7 +63,9 @@
### **2.9.2** 2022/08/08 mandic00@live.com ### **2.9.2** 2022/08/08 mandic00@live.com
- release rebuild
### **release: 2.9.1** 2022/07/25 mandic00@live.com
### **2.9.1** 2022/07/25 mandic00@live.com ### **2.9.1** 2022/07/25 mandic00@live.com
@ -229,6 +112,7 @@
- enable precompile as part of warmup - enable precompile as part of warmup
- prepare release beta - prepare release beta
- change default face crop - change default face crop
- face attention model is available in human-models
- beta release 2.7 - beta release 2.7
- refactor draw methods - refactor draw methods
- implement face attention model - implement face attention model
@ -540,6 +424,7 @@
- implemented human.next global interpolation method - implemented human.next global interpolation method
- finished draw buffering and smoothing and enabled by default - finished draw buffering and smoothing and enabled by default
- implemented service worker - implemented service worker
- quantized centernet
- release candidate - release candidate
- added usage restrictions - added usage restrictions
- quantize handdetect model - quantize handdetect model

170
README.md
View File

@ -4,6 +4,7 @@
![Last Commit](https://img.shields.io/github/last-commit/vladmandic/human?style=flat-square&svg=true) ![Last Commit](https://img.shields.io/github/last-commit/vladmandic/human?style=flat-square&svg=true)
![License](https://img.shields.io/github/license/vladmandic/human?style=flat-square&svg=true) ![License](https://img.shields.io/github/license/vladmandic/human?style=flat-square&svg=true)
![GitHub Status Checks](https://img.shields.io/github/checks-status/vladmandic/human/main?style=flat-square&svg=true) ![GitHub Status Checks](https://img.shields.io/github/checks-status/vladmandic/human/main?style=flat-square&svg=true)
![Vulnerabilities](https://img.shields.io/snyk/vulnerabilities/github/vladmandic/human?style=flat-square&svg=true)
# Human Library # Human Library
@ -11,8 +12,7 @@
**Body Pose Tracking, 3D Hand & Finger Tracking, Iris Analysis,** **Body Pose Tracking, 3D Hand & Finger Tracking, Iris Analysis,**
**Age & Gender & Emotion Prediction, Gaze Tracking, Gesture Recognition, Body Segmentation** **Age & Gender & Emotion Prediction, Gaze Tracking, Gesture Recognition, Body Segmentation**
<br> JavaScript module using TensorFlow/JS Machine Learning library
## Highlights ## Highlights
- Compatible with most server-side and client-side environments and frameworks - Compatible with most server-side and client-side environments and frameworks
@ -24,26 +24,19 @@
- Simple unified API - Simple unified API
- Built-in Image, Video and WebCam handling - Built-in Image, Video and WebCam handling
[*Jump to Quick Start*](#quick-start)
<br> <br>
## Compatibility ## Compatibility
**Browser**: - **Browser**:
- Compatible with both desktop and mobile platforms Compatible with both desktop and mobile platforms
- Compatible with *WebGPU*, *WebGL*, *WASM*, *CPU* backends Compatible with *CPU*, *WebGL*, *WASM* backends
- Compatible with *WebWorker* execution Compatible with *WebWorker* execution
- Compatible with *WebView* Compatible with *WebView*
- Primary platform: *Chromium*-based browsers - **NodeJS**:
- Secondary platform: *Firefox*, *Safari* Compatibile with *WASM* backend for executions on architectures where *tensorflow* binaries are not available
Compatible with *tfjs-node* using software execution via *tensorflow* shared libraries
**NodeJS**: Compatible with *tfjs-node* using GPU-accelerated execution via *tensorflow* shared libraries and nVidia CUDA
- Compatibile with *WASM* backend for executions on architectures where *tensorflow* binaries are not available
- Compatible with *tfjs-node* using software execution via *tensorflow* shared libraries
- Compatible with *tfjs-node* using GPU-accelerated execution via *tensorflow* shared libraries and nVidia CUDA
- Supported versions are from **14.x** to **22.x**
- NodeJS version **23.x** is not supported due to breaking changes and issues with `@tensorflow/tfjs`
<br> <br>
@ -73,9 +66,8 @@
- **Full** [[*Live*]](https://vladmandic.github.io/human/demo/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo): Main browser demo app that showcases all Human capabilities - **Full** [[*Live*]](https://vladmandic.github.io/human/demo/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo): Main browser demo app that showcases all Human capabilities
- **Simple** [[*Live*]](https://vladmandic.github.io/human/demo/typescript/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/typescript): Simple demo in WebCam processing demo in TypeScript - **Simple** [[*Live*]](https://vladmandic.github.io/human/demo/typescript/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/typescript): Simple demo in WebCam processing demo in TypeScript
- **Embedded** [[*Live*]](https://vladmandic.github.io/human/demo/video/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/video/index.html): Even simpler demo with tiny code embedded in HTML file - **Embedded** [[*Live*]](https://vladmandic.github.io/human/demo/video/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/video/index.html): Even simpler demo with tiny code embedded in HTML file
- **Face Detect** [[*Live*]](https://vladmandic.github.io/human/demo/facedetect/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facedetect): Extract faces from images and processes details - **Face Match** [[*Live*]](https://vladmandic.github.io/human/demo/facematch/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facematch): Extract faces from images, calculates face descriptors and simmilarities and matches them to known database
- **Face Match** [[*Live*]](https://vladmandic.github.io/human/demo/facematch/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facematch): Extract faces from images, calculates face descriptors and similarities and matches them to known database
- **Face ID** [[*Live*]](https://vladmandic.github.io/human/demo/faceid/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/faceid): Runs multiple checks to validate webcam input before performing face match to faces in IndexDB - **Face ID** [[*Live*]](https://vladmandic.github.io/human/demo/faceid/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/faceid): Runs multiple checks to validate webcam input before performing face match to faces in IndexDB
- **Multi-thread** [[*Live*]](https://vladmandic.github.io/human/demo/multithread/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/multithread): Runs each Human module in a separate web worker for highest possible performance - **Multi-thread** [[*Live*]](https://vladmandic.github.io/human/demo/multithread/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/multithread): Runs each Human module in a separate web worker for highest possible performance
- **NextJS** [[*Live*]](https://vladmandic.github.io/human-next/out/index.html) [[*Details*]](https://github.com/vladmandic/human-next): Use Human with TypeScript, NextJS and ReactJS - **NextJS** [[*Live*]](https://vladmandic.github.io/human-next/out/index.html) [[*Details*]](https://github.com/vladmandic/human-next): Use Human with TypeScript, NextJS and ReactJS
@ -89,15 +81,14 @@
*NodeJS demos may require extra dependencies which are used to decode inputs* *NodeJS demos may require extra dependencies which are used to decode inputs*
*See header of each demo to see its dependencies as they are not automatically installed with `Human`* *See header of each demo to see its dependencies as they are not automatically installed with `Human`*
- **Main** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node.js): Process images from files, folders or URLs using native methods - **Main** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Process images from files, folders or URLs using native methods
- **Canvas** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-canvas.js): Process image from file or URL and draw results to a new image file using `node-canvas` - **Canvas** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Process image from file or URL and draw results to a new image file using `node-canvas`
- **Video** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-video.js): Processing of video input using `ffmpeg` - **Video** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Processing of video input using `ffmpeg`
- **WebCam** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-webcam.js): Processing of webcam screenshots using `fswebcam` - **WebCam** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Processing of webcam screenshots using `fswebcam`
- **Events** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-event.js): Showcases usage of `Human` eventing to get notifications on processing - **Events** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Showcases usage of `Human` eventing to get notifications on processing
- **Similarity** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs/node-similarity.js): Compares two input images for similarity of detected faces - **Similarity** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Compares two input images for similarity of detected faces
- **Face Match** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facematch/node-match.js): Parallel processing of face **match** in multiple child worker threads - **Face Match** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facematch): Parallel processing of face **match** in multiple child worker threads
- **Multiple Workers** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/multithread/node-multiprocess.js): Runs multiple parallel `human` by dispaching them to pool of pre-created worker processes - **Multiple Workers** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Runs multiple parallel `human` by dispaching them to pool of pre-created worker processes
- **Dynamic Load** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Loads Human dynamically with multiple different desired backends
## Project pages ## Project pages
@ -116,7 +107,6 @@
- [**Usage & Functions**](https://github.com/vladmandic/human/wiki/Usage) - [**Usage & Functions**](https://github.com/vladmandic/human/wiki/Usage)
- [**Configuration Details**](https://github.com/vladmandic/human/wiki/Config) - [**Configuration Details**](https://github.com/vladmandic/human/wiki/Config)
- [**Result Details**](https://github.com/vladmandic/human/wiki/Result) - [**Result Details**](https://github.com/vladmandic/human/wiki/Result)
- [**Customizing Draw Methods**](https://github.com/vladmandic/human/wiki/Draw)
- [**Caching & Smoothing**](https://github.com/vladmandic/human/wiki/Caching) - [**Caching & Smoothing**](https://github.com/vladmandic/human/wiki/Caching)
- [**Input Processing**](https://github.com/vladmandic/human/wiki/Image) - [**Input Processing**](https://github.com/vladmandic/human/wiki/Image)
- [**Face Recognition & Face Description**](https://github.com/vladmandic/human/wiki/Embedding) - [**Face Recognition & Face Description**](https://github.com/vladmandic/human/wiki/Embedding)
@ -151,21 +141,24 @@
## App Examples ## App Examples
Visit [Examples gallery](https://vladmandic.github.io/human/samples/index.html) for more examples Visit [Examples gallery](https://vladmandic.github.io/human/samples/index.html) for more examples
[<img src="assets/samples.jpg" width="640"/>](assets/samples.jpg) <https://vladmandic.github.io/human/samples/index.html>
![samples](assets/samples.jpg)
<br> <br>
## Options ## Options
All options as presented in the demo application... All options as presented in the demo application...
[demo/index.html](demo/index.html) > [demo/index.html](demo/index.html)
[<img src="assets/screenshot-menu.png"/>](assets/screenshot-menu.png)
![Options visible in demo](assets/screenshot-menu.png)
<br> <br>
**Results Browser:** **Results Browser:**
[ *Demo -> Display -> Show Results* ]<br> [ *Demo -> Display -> Show Results* ]<br>
[<img src="assets/screenshot-results.png"/>](assets/screenshot-results.png) ![Results](assets/screenshot-results.png)
<br> <br>
@ -177,39 +170,33 @@ sorts them by similarity to selected face
and optionally matches detected face with database of known people to guess their names and optionally matches detected face with database of known people to guess their names
> [demo/facematch](demo/facematch/index.html) > [demo/facematch](demo/facematch/index.html)
[<img src="assets/screenshot-facematch.jpg" width="640"/>](assets/screenshot-facematch.jpg) ![Face Matching](assets/screenshot-facematch.jpg)
2. **Face Detect:** 2. **Face ID:**
Extracts all detect faces from loaded images on-demand and highlights face details on a selected face
> [demo/facedetect](demo/facedetect/index.html)
[<img src="assets/screenshot-facedetect.jpg" width="640"/>](assets/screenshot-facedetect.jpg)
3. **Face ID:**
Performs validation check on a webcam input to detect a real face and matches it to known faces stored in database Performs validation check on a webcam input to detect a real face and matches it to known faces stored in database
> [demo/faceid](demo/faceid/index.html) > [demo/faceid](demo/faceid/index.html)
[<img src="assets/screenshot-faceid.jpg" width="640"/>](assets/screenshot-faceid.jpg) ![Face Matching](assets/screenshot-faceid.jpg)
<br> <br>
4. **3D Rendering:** 3. **3D Rendering:**
> [human-motion](https://github.com/vladmandic/human-motion) > [human-motion](https://github.com/vladmandic/human-motion)
[<img src="https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-face.jpg" width="640"/>](https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-face.jpg) ![Face3D](https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-face.jpg)
[<img src="https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-body.jpg" width="640"/>](https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-body.jpg) ![Body3D](https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-body.jpg)
[<img src="https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-hand.jpg" width="640"/>](https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-hand.jpg) ![Hand3D](https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-hand.jpg)
<br> <br>
5. **VR Model Tracking:** 4. **VR Model Tracking:**
> [human-three-vrm](https://github.com/vladmandic/human-three-vrm) > [human-three-vrm](https://github.com/vladmandic/human-three-vrm)
> [human-bjs-vrm](https://github.com/vladmandic/human-bjs-vrm) > [human-bjs-vrm](https://github.com/vladmandic/human-bjs-vrm)
[<img src="https://github.com/vladmandic/human-three-vrm/raw/main/assets/human-vrm-screenshot.jpg" width="640"/>](https://github.com/vladmandic/human-three-vrm/raw/main/assets/human-vrm-screenshot.jpg) ![ThreeVRM](https://github.com/vladmandic/human-three-vrm/raw/main/assets/human-vrm-screenshot.jpg)
6. **Human as OS native application:** 5. **Human as OS native application:**
> [human-electron](https://github.com/vladmandic/human-electron) > [human-electron](https://github.com/vladmandic/human-electron)
<br> <br>
@ -217,7 +204,7 @@ Performs validation check on a webcam input to detect a real face and matches it
**468-Point Face Mesh Defails:** **468-Point Face Mesh Defails:**
(view in full resolution to see keypoints) (view in full resolution to see keypoints)
[<img src="assets/facemesh.png" width="400"/>](assets/facemesh.png) ![FaceMesh](assets/facemesh.png)
<br><hr><br> <br><hr><br>
@ -227,16 +214,33 @@ Simply load `Human` (*IIFE version*) directly from a cloud CDN in your HTML file
(pick one: `jsdelirv`, `unpkg` or `cdnjs`) (pick one: `jsdelirv`, `unpkg` or `cdnjs`)
```html ```html
<!DOCTYPE HTML>
<script src="https://cdn.jsdelivr.net/npm/@vladmandic/human/dist/human.js"></script> <script src="https://cdn.jsdelivr.net/npm/@vladmandic/human/dist/human.js"></script>
<script src="https://unpkg.dev/@vladmandic/human/dist/human.js"></script> <script src="https://unpkg.dev/@vladmandic/human/dist/human.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/human/3.0.0/human.js"></script> <script src="https://cdnjs.cloudflare.com/ajax/libs/human/2.1.5/human.js"></script>
``` ```
For details, including how to use `Browser ESM` version or `NodeJS` version of `Human`, see [**Installation**](https://github.com/vladmandic/human/wiki/Install) For details, including how to use `Browser ESM` version or `NodeJS` version of `Human`, see [**Installation**](https://github.com/vladmandic/human/wiki/Install)
<br> <br>
## Inputs
`Human` library can process all known input types:
- `Image`, `ImageData`, `ImageBitmap`, `Canvas`, `OffscreenCanvas`, `Tensor`,
- `HTMLImageElement`, `HTMLCanvasElement`, `HTMLVideoElement`, `HTMLMediaElement`
Additionally, `HTMLVideoElement`, `HTMLMediaElement` can be a standard `<video>` tag that links to:
- WebCam on user's system
- Any supported video type
e.g. `.mp4`, `.avi`, etc.
- Additional video types supported via *HTML5 Media Source Extensions*
e.g.: **HLS** (*HTTP Live Streaming*) using `hls.js` or **DASH** (*Dynamic Adaptive Streaming over HTTP*) using `dash.js`
- **WebRTC** media track using built-in support
<br>
## Code Examples ## Code Examples
Simple app that uses Human to process video input and Simple app that uses Human to process video input and
@ -245,7 +249,7 @@ draw output on screen using internal draw helper functions
```js ```js
// create instance of human with simple configuration using default values // create instance of human with simple configuration using default values
const config = { backend: 'webgl' }; const config = { backend: 'webgl' };
const human = new Human.Human(config); const human = new Human(config);
// select input HTMLVideoElement and output HTMLCanvasElement from page // select input HTMLVideoElement and output HTMLCanvasElement from page
const inputVideo = document.getElementById('video-id'); const inputVideo = document.getElementById('video-id');
const outputCanvas = document.getElementById('canvas-id'); const outputCanvas = document.getElementById('canvas-id');
@ -264,7 +268,6 @@ function detectVideo() {
human.draw.gesture(outputCanvas, result.gesture); human.draw.gesture(outputCanvas, result.gesture);
// and loop immediate to the next frame // and loop immediate to the next frame
requestAnimationFrame(detectVideo); requestAnimationFrame(detectVideo);
return result;
}); });
} }
@ -304,7 +307,7 @@ human.events.addEventListener('detect', () => { // event gets triggered when det
function detectVideo() { function detectVideo() {
human.detect(inputVideo) // run detection human.detect(inputVideo) // run detection
.then(() => requestAnimationFrame(detectVideo)); // upon detect complete start processing of the next frame .then(() => requestAnimationFrame(detectVideo)); // upon detect complete start processing of the next frame
} }
detectVideo(); // start loop detectVideo(); // start loop
@ -372,53 +375,6 @@ drawResults(); // start draw loop
And for even better results, you can run detection in a separate web worker thread And for even better results, you can run detection in a separate web worker thread
<br><hr><br>
## Inputs
`Human` library can process all known input types:
- `Image`, `ImageData`, `ImageBitmap`, `Canvas`, `OffscreenCanvas`, `Tensor`,
- `HTMLImageElement`, `HTMLCanvasElement`, `HTMLVideoElement`, `HTMLMediaElement`
Additionally, `HTMLVideoElement`, `HTMLMediaElement` can be a standard `<video>` tag that links to:
- WebCam on user's system
- Any supported video type
e.g. `.mp4`, `.avi`, etc.
- Additional video types supported via *HTML5 Media Source Extensions*
e.g.: **HLS** (*HTTP Live Streaming*) using `hls.js` or **DASH** (*Dynamic Adaptive Streaming over HTTP*) using `dash.js`
- **WebRTC** media track using built-in support
<br><hr><br>
## Detailed Usage
- [**Wiki Home**](https://github.com/vladmandic/human/wiki)
- [**List of all available methods, properies and namespaces**](https://github.com/vladmandic/human/wiki/Usage)
- [**TypeDoc API Specification - Main class**](https://vladmandic.github.io/human/typedoc/classes/Human.html)
- [**TypeDoc API Specification - Full**](https://vladmandic.github.io/human/typedoc/)
![typedoc](assets/screenshot-typedoc.png)
<br><hr><br>
## TypeDefs
`Human` is written using TypeScript strong typing and ships with full **TypeDefs** for all classes defined by the library bundled in `types/human.d.ts` and enabled by default
*Note*: This does not include embedded `tfjs`
If you want to use embedded `tfjs` inside `Human` (`human.tf` namespace) and still full **typedefs**, add this code:
> import type * as tfjs from '@vladmandic/human/dist/tfjs.esm';
> const tf = human.tf as typeof tfjs;
This is not enabled by default as `Human` does not ship with full **TFJS TypeDefs** due to size considerations
Enabling `tfjs` TypeDefs as above creates additional project (dev-only as only types are required) dependencies as defined in `@vladmandic/human/dist/tfjs.esm.d.ts`:
> @tensorflow/tfjs-core, @tensorflow/tfjs-converter, @tensorflow/tfjs-backend-wasm, @tensorflow/tfjs-backend-webgl
<br><hr><br> <br><hr><br>
## Default models ## Default models
@ -448,9 +404,9 @@ For more info, see [**Configuration Details**](https://github.com/vladmandic/hum
<br><hr><br> <br><hr><br>
`Human` library is written in [TypeScript](https://www.typescriptlang.org/docs/handbook/intro.html) **5.1** using [TensorFlow/JS](https://www.tensorflow.org/js/) **4.10** and conforming to latest `JavaScript` [ECMAScript version 2022](https://262.ecma-international.org/) standard `Human` library is written in `TypeScript` [4.8](https://www.typescriptlang.org/docs/handbook/intro.html)
Conforming to latest `JavaScript` [ECMAScript version 2022](https://262.ecma-international.org/) standard
Build target for distributables is `JavaScript` [EMCAScript version 2018](https://262.ecma-international.org/9.0/) Build target is `JavaScript` [EMCAScript version 2018](https://262.ecma-international.org/11.0/)
<br> <br>

51
TODO.md
View File

@ -2,6 +2,21 @@
## Work-in-Progress ## Work-in-Progress
<hr><br>
## Exploring
- **Optical flow** for intelligent temporal interpolation
<https://docs.opencv.org/3.3.1/db/d7f/tutorial_js_lucas_kanade.html>
- **CLAHE** advanced histogram equalization for optimization of badly lit scenes
- **TFLite** models
<https://js.tensorflow.org/api_tflite/0.0.1-alpha.4/>
- **Principal Components Analysis** for reduction of descriptor complexity
<https://github.com/vladmandic/human-match/blob/main/src/pca.js>
- **Temporal guidance** for face/body segmentation
<https://github.com/PeterL1n/RobustVideoMatting>
<hr><br> <hr><br>
## Known Issues & Limitations ## Known Issues & Limitations
@ -16,9 +31,10 @@ No issues with default model `FaceMesh`
`NanoDet` model is not supported when using `WASM` backend due to missing kernel op in **TFJS** `NanoDet` model is not supported when using `WASM` backend due to missing kernel op in **TFJS**
No issues with default model `MB3-CenterNet` No issues with default model `MB3-CenterNet`
## Body Detection using MoveNet-MultiPose ### WebGPU
Model does not return valid detection scores (all other functionality is not impacted) Experimental support only until support is officially added in Chromium
Enable via <chrome://flags/#enable-unsafe-webgpu>
### Firefox ### Firefox
@ -31,8 +47,31 @@ Enable via `about:config` -> `gfx.offscreencanvas.enabled`
No support for running in **web workers** as Safari still does not support `OffscreenCanvas` No support for running in **web workers** as Safari still does not support `OffscreenCanvas`
[Details](https://developer.mozilla.org/en-US/docs/Web/API/OffscreenCanvas#browser_compatibility) [Details](https://developer.mozilla.org/en-US/docs/Web/API/OffscreenCanvas#browser_compatibility)
## React-Native
`Human` support for **React-Native** is best-effort, but not part of the main development focus
<hr><br> <hr><br>
## Pending Release Changes
- New methods [`human.webcam.*`](https://vladmandic.github.io/human/typedoc/classes/WebCam.html)
Enables built-in configuration and control of **WebCam** streams
- New method [`human.video()`](https://vladmandic.github.io/human/typedoc/classes/Human.html#video)
Runs continous detection of an input **video**
instead of processing each frame manually using `human.detect()`
- New demo for **webcam** and **video** methods [*Live*](https://vladmandic.github.io/human/demo/video/index.html) | [*Code*](https://github.com/vladmandic/human/blob/main/demo/video/index.html)
*Full HTML and JavaScript code in less than a screen*
- Redesigned [`human.segmentation`](https://vladmandic.github.io/human/typedoc/classes/Human.html#segmentation)
*Breaking changes*
- New model `rvm` for high-quality body segmentation in real-time
*Not part of default deployment, download from [human-models](https://github.com/vladmandic/human-models/tree/main/models)*
- New demo for **segmentation** methods [*Live*](https://vladmandic.github.io/human/demo/segmentation/index.html) | [*Code*](https://github.com/vladmandic/human/blob/main/demo/segmentation/index.html)
*Full HTML and JavaScript code in less than a screen*
- New advanced demo using **BabylonJS and VRM** [*Live*](https://vladmandic.github.io/human-bjs-vrm) | [*Code*](https://github.com/vladmandic/human-bjs-vrm)
- Update **TypeDoc** generation [*Link*](https://vladmandic.github.io/human/typedoc)
- Update **TypeDefs** bundle generation [*Link*](https://github.com/vladmandic/human/blob/main/types/human.d.ts)
No external dependencies
- Fix model caching when using web workers
- Fix `face.rotation` when using interpolation
- Improve NodeJS resolver when using ESM
- Update demo `demo/typescript`
- Update demo `demo/faceid`
- Update demo `demo/nodejs/process-folder.js`
and re-process `/samples` [*Link*](https://vladmandic.github.io/human/samples)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 14 KiB

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

103
build.js
View File

@ -31,42 +31,25 @@ const apiExtractorIgnoreList = [ // eslint-disable-line no-unused-vars
'tsdoc-unnecessary-backslash', 'tsdoc-unnecessary-backslash',
]; ];
const regEx = [ function copy(src, dst) {
{ search: 'types="@webgpu/types/dist"', replace: 'path="../src/types/webgpu.d.ts"' }, if (!fs.existsSync(src)) return;
{ search: 'types="offscreencanvas"', replace: 'path="../src/types/offscreencanvas.d.ts"' },
];
function copyFile(src, dst) {
if (!fs.existsSync(src)) {
log.warn('Copy:', { input: src, output: dst });
return;
}
log.state('Copy:', { input: src, output: dst });
const buffer = fs.readFileSync(src); const buffer = fs.readFileSync(src);
fs.writeFileSync(dst, buffer); fs.writeFileSync(dst, buffer);
} }
function writeFile(str, dst) { function write(str, dst) {
log.state('Write:', { output: dst });
fs.writeFileSync(dst, str); fs.writeFileSync(dst, str);
} }
function regExFile(src, entries) { function filter(str, src) {
if (!fs.existsSync(src)) { if (!fs.existsSync(src)) return;
log.warn('Filter:', { src }); const buffer = fs.readFileSync(src, 'UTF-8');
return; const lines = buffer.split(/\r?\n/);
} const out = [];
log.state('Filter:', { input: src }); for (const line of lines) {
for (const entry of entries) { if (!line.includes(str)) out.push(line);
const buffer = fs.readFileSync(src, 'UTF-8');
const lines = buffer.split(/\r?\n/);
const out = [];
for (const line of lines) {
if (line.includes(entry.search)) out.push(line.replace(entry.search, entry.replace));
else out.push(line);
}
fs.writeFileSync(src, out.join('\n'));
} }
fs.writeFileSync(src, out.join('\n'));
} }
async function analyzeModels() { async function analyzeModels() {
@ -106,44 +89,44 @@ async function analyzeModels() {
async function main() { async function main() {
log.logFile(logFile); log.logFile(logFile);
log.data('Build', { name: packageJSON.name, version: packageJSON.version }); log.data('Build', { name: packageJSON.name, version: packageJSON.version });
// run production build // run production build
const build = new Build(); const build = new Build();
await build.run('production'); await build.run('production');
// patch tfjs typedefs // patch tfjs typedefs
copyFile('node_modules/@vladmandic/tfjs/types/tfjs-core.d.ts', 'types/tfjs-core.d.ts'); log.state('Copy:', { input: 'tfjs/tfjs.esm.d.ts' });
copyFile('node_modules/@vladmandic/tfjs/types/tfjs.d.ts', 'types/tfjs.esm.d.ts'); copy('tfjs/tfjs.esm.d.ts', 'types/lib/dist/tfjs.esm.d.ts');
copyFile('src/types/tsconfig.json', 'types/tsconfig.json');
copyFile('src/types/eslint.json', 'types/.eslintrc.json');
copyFile('src/types/tfjs.esm.d.ts', 'dist/tfjs.esm.d.ts');
regExFile('types/tfjs-core.d.ts', regEx);
// run api-extractor to create typedef rollup // run api-extractor to create typedef rollup
const extractorConfig = APIExtractor.ExtractorConfig.loadFileAndPrepare('.api-extractor.json'); const extractorConfig = APIExtractor.ExtractorConfig.loadFileAndPrepare('.api-extractor.json');
try { const extractorResult = APIExtractor.Extractor.invoke(extractorConfig, {
const extractorResult = APIExtractor.Extractor.invoke(extractorConfig, { localBuild: true,
localBuild: true, showVerboseMessages: false,
showVerboseMessages: false, messageCallback: (msg) => {
messageCallback: (msg) => { msg.handled = true;
msg.handled = true; if (msg.logLevel === 'none' || msg.logLevel === 'verbose' || msg.logLevel === 'info') return;
if (msg.logLevel === 'none' || msg.logLevel === 'verbose' || msg.logLevel === 'info') return; if (msg.sourceFilePath?.includes('/node_modules/')) return;
if (msg.sourceFilePath?.includes('/node_modules/')) return; // if (apiExtractorIgnoreList.reduce((prev, curr) => prev || msg.messageId.includes(curr), false)) return; // those are external issues outside of human control
// if (apiExtractorIgnoreList.reduce((prev, curr) => prev || msg.messageId.includes(curr), false)) return; // those are external issues outside of human control log.data('API', { level: msg.logLevel, category: msg.category, id: msg.messageId, file: msg.sourceFilePath, line: msg.sourceFileLine, text: msg.text });
log.data('API', { level: msg.logLevel, category: msg.category, id: msg.messageId, file: msg.sourceFilePath, line: msg.sourceFileLine, text: msg.text }); },
}, });
}); log.state('API-Extractor:', { succeeeded: extractorResult.succeeded, errors: extractorResult.errorCount, warnings: extractorResult.warningCount });
log.state('API-Extractor:', { succeeeded: extractorResult.succeeded, errors: extractorResult.errorCount, warnings: extractorResult.warningCount }); // distribute typedefs
} catch (err) { // log.state('Copy:', { input: 'types/human.d.ts' });
log.error('API-Extractor:', err); // copy('types/human.d.ts', 'dist/human.esm-nobundle.d.ts');
} // copy('types/human.d.ts', 'dist/human.esm.d.ts');
regExFile('types/human.d.ts', regEx); // copy('types/human.d.ts', 'dist/human.d.ts');
writeFile('export * from \'../types/human\';', 'dist/human.esm-nobundle.d.ts'); // copy('types/human.d.ts', 'dist/human.node-gpu.d.ts');
writeFile('export * from \'../types/human\';', 'dist/human.esm.d.ts'); // copy('types/human.d.ts', 'dist/human.node.d.ts');
writeFile('export * from \'../types/human\';', 'dist/human.d.ts'); // copy('types/human.d.ts', 'dist/human.node-wasm.d.ts');
writeFile('export * from \'../types/human\';', 'dist/human.node-gpu.d.ts'); log.state('Filter:', { input: 'types/human.d.ts' });
writeFile('export * from \'../types/human\';', 'dist/human.node.d.ts'); filter('reference types', 'types/human.d.ts');
writeFile('export * from \'../types/human\';', 'dist/human.node-wasm.d.ts'); log.state('Link:', { input: 'types/human.d.ts' });
write('export * from \'../types/human\';', 'dist/human.esm-nobundle.d.ts');
write('export * from \'../types/human\';', 'dist/human.esm.d.ts');
write('export * from \'../types/human\';', 'dist/human.d.ts');
write('export * from \'../types/human\';', 'dist/human.node-gpu.d.ts');
write('export * from \'../types/human\';', 'dist/human.node.d.ts');
write('export * from \'../types/human\';', 'dist/human.node-wasm.d.ts');
// export * from '../types/human';
// generate model signature // generate model signature
await analyzeModels(); await analyzeModels();

View File

@ -8,7 +8,6 @@ For details on other demos see Wiki: [**Demos**](https://github.com/vladmandic/h
`index.html`: Full demo using `Human` ESM module running in Browsers, `index.html`: Full demo using `Human` ESM module running in Browsers,
Includes: Includes:
- Selectable inputs: - Selectable inputs:
- Sample images - Sample images
- Image via drag & drop - Image via drag & drop
@ -38,14 +37,12 @@ Includes:
in `index.js:ui` in `index.js:ui`
```js ```js
const ui = {
console: true, // log messages to browser console console: true, // log messages to browser console
useWorker: true, // use web workers for processing useWorker: true, // use web workers for processing
buffered: true, // should output be buffered between frames buffered: true, // should output be buffered between frames
interpolated: true, // should output be interpolated for smoothness between frames interpolated: true, // should output be interpolated for smoothness between frames
results: false, // show results tree results: false, // show results tree
useWebRTC: false, // use webrtc as camera source instead of local webcam useWebRTC: false, // use webrtc as camera source instead of local webcam
};
``` ```
Demo implements several ways to use `Human` library, Demo implements several ways to use `Human` library,

View File

@ -1,160 +0,0 @@
/**
* Human demo for browsers
*
* Demo for face detection
*/
/** @type {Human} */
import { Human } from '../../dist/human.esm.js';
let loader;
const humanConfig = { // user configuration for human, used to fine-tune behavior
cacheSensitivity: 0,
debug: true,
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
filter: { enabled: true, equalization: false, flip: false },
face: {
enabled: true,
detector: { rotation: false, maxDetected: 100, minConfidence: 0.2, return: true, square: false },
iris: { enabled: true },
description: { enabled: true },
emotion: { enabled: true },
antispoof: { enabled: true },
liveness: { enabled: true },
},
body: { enabled: false },
hand: { enabled: false },
object: { enabled: false },
gesture: { enabled: false },
segmentation: { enabled: false },
};
const human = new Human(humanConfig); // new instance of human
export const showLoader = (msg) => { loader.setAttribute('msg', msg); loader.style.display = 'block'; };
export const hideLoader = () => loader.style.display = 'none';
class ComponentLoader extends HTMLElement { // watch for attributes
message = document.createElement('div');
static get observedAttributes() { return ['msg']; }
attributeChangedCallback(_name, _prevVal, currVal) {
this.message.innerHTML = currVal;
}
connectedCallback() { // triggered on insert
this.attachShadow({ mode: 'open' });
const css = document.createElement('style');
css.innerHTML = `
.loader-container { top: 450px; justify-content: center; position: fixed; width: 100%; }
.loader-message { font-size: 1.5rem; padding: 1rem; }
.loader { width: 300px; height: 300px; border: 3px solid transparent; border-radius: 50%; border-top: 4px solid #f15e41; animation: spin 4s linear infinite; position: relative; }
.loader::before, .loader::after { content: ""; position: absolute; top: 6px; bottom: 6px; left: 6px; right: 6px; border-radius: 50%; border: 4px solid transparent; }
.loader::before { border-top-color: #bad375; animation: 3s spin linear infinite; }
.loader::after { border-top-color: #26a9e0; animation: spin 1.5s linear infinite; }
@keyframes spin { from { transform: rotate(0deg); } to { transform: rotate(360deg); } }
`;
const container = document.createElement('div');
container.id = 'loader-container';
container.className = 'loader-container';
loader = document.createElement('div');
loader.id = 'loader';
loader.className = 'loader';
this.message.id = 'loader-message';
this.message.className = 'loader-message';
this.message.innerHTML = '';
container.appendChild(this.message);
container.appendChild(loader);
this.shadowRoot?.append(css, container);
loader = this; // eslint-disable-line @typescript-eslint/no-this-alias
}
}
customElements.define('component-loader', ComponentLoader);
function addFace(face, source) {
const deg = (rad) => Math.round((rad || 0) * 180 / Math.PI);
const canvas = document.createElement('canvas');
const emotion = face.emotion?.map((e) => `${Math.round(100 * e.score)}% ${e.emotion}`) || [];
const rotation = `pitch ${deg(face.rotation?.angle.pitch)}° | roll ${deg(face.rotation?.angle.roll)}° | yaw ${deg(face.rotation?.angle.yaw)}°`;
const gaze = `direction ${deg(face.rotation?.gaze.bearing)}° strength ${Math.round(100 * (face.rotation.gaze.strength || 0))}%`;
canvas.title = `
source: ${source}
score: ${Math.round(100 * face.boxScore)}% detection ${Math.round(100 * face.faceScore)}% analysis
age: ${face.age} years | gender: ${face.gender} score ${Math.round(100 * face.genderScore)}%
emotion: ${emotion.join(' | ')}
head rotation: ${rotation}
eyes gaze: ${gaze}
camera distance: ${face.distance}m | ${Math.round(100 * face.distance / 2.54)}in
check: ${Math.round(100 * face.real)}% real ${Math.round(100 * face.live)}% live
`.replace(/ /g, ' ');
canvas.onclick = (e) => {
e.preventDefault();
document.getElementById('description').innerHTML = canvas.title;
};
human.draw.tensor(face.tensor, canvas);
human.tf.dispose(face.tensor);
return canvas;
}
async function addFaces(imgEl) {
showLoader('human: busy');
const faceEl = document.getElementById('faces');
faceEl.innerHTML = '';
const res = await human.detect(imgEl);
console.log(res); // eslint-disable-line no-console
document.getElementById('description').innerHTML = `detected ${res.face.length} faces`;
for (const face of res.face) {
const canvas = addFace(face, imgEl.src.substring(0, 64));
faceEl.appendChild(canvas);
}
hideLoader();
}
function addImage(imageUri) {
const imgEl = new Image(256, 256);
imgEl.onload = () => {
const images = document.getElementById('images');
images.appendChild(imgEl); // add image if loaded ok
images.scroll(images?.offsetWidth, 0);
};
imgEl.onerror = () => console.error('addImage', { imageUri }); // eslint-disable-line no-console
imgEl.onclick = () => addFaces(imgEl);
imgEl.title = imageUri.substring(0, 64);
imgEl.src = encodeURI(imageUri);
}
async function initDragAndDrop() {
const reader = new FileReader();
reader.onload = async (e) => {
if (e.target.result.startsWith('data:image')) await addImage(e.target.result);
};
document.body.addEventListener('dragenter', (evt) => evt.preventDefault());
document.body.addEventListener('dragleave', (evt) => evt.preventDefault());
document.body.addEventListener('dragover', (evt) => evt.preventDefault());
document.body.addEventListener('drop', async (evt) => {
evt.preventDefault();
evt.dataTransfer.dropEffect = 'copy';
for (const f of evt.dataTransfer.files) reader.readAsDataURL(f);
});
document.body.onclick = (e) => {
if (e.target.localName !== 'canvas') document.getElementById('description').innerHTML = '';
};
}
async function main() {
showLoader('loading models');
await human.load();
showLoader('compiling models');
await human.warmup();
showLoader('loading images');
const images = ['group-1.jpg', 'group-2.jpg', 'group-3.jpg', 'group-4.jpg', 'group-5.jpg', 'group-6.jpg', 'group-7.jpg', 'solvay1927.jpg', 'stock-group-1.jpg', 'stock-group-2.jpg', 'stock-models-6.jpg', 'stock-models-7.jpg'];
const imageUris = images.map((a) => `../../samples/in/${a}`);
for (let i = 0; i < imageUris.length; i++) addImage(imageUris[i]);
initDragAndDrop();
hideLoader();
}
window.onload = main;

View File

@ -1,43 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Human</title>
<!-- <meta http-equiv="content-type" content="text/html; charset=utf-8"> -->
<meta name="viewport" content="width=device-width, shrink-to-fit=yes">
<meta name="keywords" content="Human">
<meta name="application-name" content="Human">
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="theme-color" content="#000000">
<link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../../assets/icon.png">
<script src="./facedetect.js" type="module"></script>
<style>
img { object-fit: contain; }
img:hover { filter: grayscale(1); transform: scale(1.08); transition : all 0.3s ease; }
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
html { font-family: 'Lato', 'Segoe UI'; font-size: 24px; font-variant: small-caps; }
body { margin: 24px; background: black; color: white; overflow: hidden; text-align: -webkit-center; width: 100vw; height: 100vh; }
::-webkit-scrollbar { height: 8px; border: 0; border-radius: 0; }
::-webkit-scrollbar-thumb { background: grey }
::-webkit-scrollbar-track { margin: 3px; }
canvas { width: 192px; height: 192px; margin: 2px; padding: 2px; cursor: grab; transform: scale(1.00); transition : all 0.3s ease; }
canvas:hover { filter: grayscale(1); transform: scale(1.08); transition : all 0.3s ease; }
</style>
</head>
<body>
<component-loader></component-loader>
<div style="display: flex">
<div>
<div style="margin: 24px">select image to show detected faces<br>drag & drop to add your images</div>
<div id="images" style="display: flex; width: 98vw; overflow-x: auto; overflow-y: hidden; scroll-behavior: smooth"></div>
</div>
</div>
<div id="list" style="height: 10px"></div>
<div style="margin: 24px">hover or click on face to show details</div>
<div id="faces" style="overflow-y: auto"></div>
<div id="description" style="white-space: pre;"></div>
</body>
</html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -11,13 +11,12 @@ import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
import * as indexDb from './indexdb'; // methods to deal with indexdb import * as indexDb from './indexdb'; // methods to deal with indexdb
const humanConfig = { // user configuration for human, used to fine-tune behavior const humanConfig = { // user configuration for human, used to fine-tune behavior
cacheSensitivity: 0.01, cacheSensitivity: 0,
modelBasePath: '../../models', modelBasePath: '../../models',
filter: { enabled: true, equalization: true }, // lets run with histogram equilizer filter: { equalization: true }, // lets run with histogram equilizer
debug: true,
face: { face: {
enabled: true, enabled: true,
detector: { rotation: true, return: true, mask: false }, // return tensor is used to get detected face image detector: { rotation: true, return: true, cropFactor: 1.6, mask: false }, // return tensor is used to get detected face image
description: { enabled: true }, // default model for face descriptor extraction is faceres description: { enabled: true }, // default model for face descriptor extraction is faceres
// mobilefacenet: { enabled: true, modelPath: 'https://vladmandic.github.io/human-models/models/mobilefacenet.json' }, // alternative model // mobilefacenet: { enabled: true, modelPath: 'https://vladmandic.github.io/human-models/models/mobilefacenet.json' }, // alternative model
// insightface: { enabled: true, modelPath: 'https://vladmandic.github.io/insightface/models/insightface-mobilenet-swish.json' }, // alternative model // insightface: { enabled: true, modelPath: 'https://vladmandic.github.io/insightface/models/insightface-mobilenet-swish.json' }, // alternative model
@ -42,10 +41,9 @@ const options = {
blinkMin: 10, // minimum duration of a valid blink blinkMin: 10, // minimum duration of a valid blink
blinkMax: 800, // maximum duration of a valid blink blinkMax: 800, // maximum duration of a valid blink
threshold: 0.5, // minimum similarity threshold: 0.5, // minimum similarity
distanceMin: 0.4, // closest that face is allowed to be to the cammera in cm
distanceMax: 1.0, // farthest that face is allowed to be to the cammera in cm
mask: humanConfig.face.detector.mask, mask: humanConfig.face.detector.mask,
rotation: humanConfig.face.detector.rotation, rotation: humanConfig.face.detector.rotation,
cropFactor: humanConfig.face.detector.cropFactor,
...matchOptions, ...matchOptions,
}; };
@ -58,7 +56,6 @@ const ok: Record<string, { status: boolean | undefined, val: number }> = { // mu
faceSize: { status: false, val: 0 }, faceSize: { status: false, val: 0 },
antispoofCheck: { status: false, val: 0 }, antispoofCheck: { status: false, val: 0 },
livenessCheck: { status: false, val: 0 }, livenessCheck: { status: false, val: 0 },
distance: { status: false, val: 0 },
age: { status: false, val: 0 }, age: { status: false, val: 0 },
gender: { status: false, val: 0 }, gender: { status: false, val: 0 },
timeout: { status: true, val: 0 }, timeout: { status: true, val: 0 },
@ -76,7 +73,6 @@ const allOk = () => ok.faceCount.status
&& ok.faceConfidence.status && ok.faceConfidence.status
&& ok.antispoofCheck.status && ok.antispoofCheck.status
&& ok.livenessCheck.status && ok.livenessCheck.status
&& ok.distance.status
&& ok.descriptor.status && ok.descriptor.status
&& ok.age.status && ok.age.status
&& ok.gender.status; && ok.gender.status;
@ -191,8 +187,6 @@ async function validationLoop(): Promise<H.FaceResult> { // main screen refresh
ok.livenessCheck.status = ok.livenessCheck.val >= options.minConfidence; ok.livenessCheck.status = ok.livenessCheck.val >= options.minConfidence;
ok.faceSize.val = Math.min(human.result.face[0].box[2], human.result.face[0].box[3]); ok.faceSize.val = Math.min(human.result.face[0].box[2], human.result.face[0].box[3]);
ok.faceSize.status = ok.faceSize.val >= options.minSize; ok.faceSize.status = ok.faceSize.val >= options.minSize;
ok.distance.val = human.result.face[0].distance || 0;
ok.distance.status = (ok.distance.val >= options.distanceMin) && (ok.distance.val <= options.distanceMax);
ok.descriptor.val = human.result.face[0].embedding?.length || 0; ok.descriptor.val = human.result.face[0].embedding?.length || 0;
ok.descriptor.status = ok.descriptor.val > 0; ok.descriptor.status = ok.descriptor.val > 0;
ok.age.val = human.result.face[0].age || 0; ok.age.val = human.result.face[0].age || 0;
@ -239,8 +233,8 @@ async function detectFace() {
dom.canvas.getContext('2d')?.clearRect(0, 0, options.minSize, options.minSize); dom.canvas.getContext('2d')?.clearRect(0, 0, options.minSize, options.minSize);
if (!current?.face?.tensor || !current?.face?.embedding) return false; if (!current?.face?.tensor || !current?.face?.embedding) return false;
console.log('face record:', current.face); // eslint-disable-line no-console console.log('face record:', current.face); // eslint-disable-line no-console
log(`detected face: ${current.face.gender} ${current.face.age || 0}y distance ${100 * (current.face.distance || 0)}cm/${Math.round(100 * (current.face.distance || 0) / 2.54)}in`); log(`detected face: ${current.face.gender} ${current.face.age || 0}y distance ${current.face.iris || 0}cm/${Math.round(100 * (current.face.iris || 0) / 2.54) / 100}in`);
await human.draw.tensor(current.face.tensor, dom.canvas); human.tf.browser.toPixels(current.face.tensor as unknown as H.TensorLike, dom.canvas);
if (await indexDb.count() === 0) { if (await indexDb.count() === 0) {
log('face database is empty: nothing to compare face with'); log('face database is empty: nothing to compare face with');
document.body.style.background = 'black'; document.body.style.background = 'black';
@ -249,7 +243,7 @@ async function detectFace() {
} }
const db = await indexDb.load(); const db = await indexDb.load();
const descriptors = db.map((rec) => rec.descriptor).filter((desc) => desc.length > 0); const descriptors = db.map((rec) => rec.descriptor).filter((desc) => desc.length > 0);
const res = human.match.find(current.face.embedding, descriptors, matchOptions); const res = human.match(current.face.embedding, descriptors, matchOptions);
current.record = db[res.index] || null; current.record = db[res.index] || null;
if (current.record) { if (current.record) {
log(`best match: ${current.record.name} | id: ${current.record.id} | similarity: ${Math.round(1000 * res.similarity) / 10}%`); log(`best match: ${current.record.name} | id: ${current.record.id} | similarity: ${Math.round(1000 * res.similarity) / 10}%`);
@ -281,8 +275,8 @@ async function main() { // main entry point
await detectionLoop(); // start detection loop await detectionLoop(); // start detection loop
startTime = human.now(); startTime = human.now();
current.face = await validationLoop(); // start validation loop current.face = await validationLoop(); // start validation loop
dom.canvas.width = current.face?.tensor?.shape[1] || options.minSize; dom.canvas.width = current.face.tensor?.shape[1] || options.minSize;
dom.canvas.height = current.face?.tensor?.shape[0] || options.minSize; dom.canvas.height = current.face.tensor?.shape[0] || options.minSize;
dom.source.width = dom.canvas.width; dom.source.width = dom.canvas.width;
dom.source.height = dom.canvas.height; dom.source.height = dom.canvas.height;
dom.canvas.style.width = ''; dom.canvas.style.width = '';

View File

@ -11,7 +11,7 @@
## Browser Face Recognition Demo ## Browser Face Recognition Demo
- `demo/facematch`: Demo for Browsers that uses all face description and embedding features to - `demo/facematch`: Demo for Browsers that uses all face description and embedding features to
detect, extract and identify all faces plus calculate similarity between them detect, extract and identify all faces plus calculate simmilarity between them
It highlights functionality such as: It highlights functionality such as:
@ -72,13 +72,12 @@ Non-linear performance that increases with number of worker threads due to commu
> node node-match > node node-match
<!-- eslint-skip -->
```js ```js
INFO: options: { dbFile: './faces.json', dbMax: 10000, threadPoolSize: 6, workerSrc: './node-match-worker.js', debug: false, minThreshold: 0.9, descLength: 1024 } 2021-10-13 07:53:36 INFO: options: { dbFile: './faces.json', dbMax: 10000, threadPoolSize: 6, workerSrc: './node-match-worker.js', debug: false, minThreshold: 0.9, descLength: 1024 }
DATA: created shared buffer: { maxDescriptors: 10000, totalBytes: 40960000, totalElements: 10240000 } 2021-10-13 07:53:36 DATA: created shared buffer: { maxDescriptors: 10000, totalBytes: 40960000, totalElements: 10240000 }
DATA: db loaded: { existingRecords: 0, newRecords: 5700 } 2021-10-13 07:53:36 DATA: db loaded: { existingRecords: 0, newRecords: 5700 }
INFO: starting worker thread pool: { totalWorkers: 6, alreadyActive: 0 } 2021-10-13 07:53:36 INFO: starting worker thread pool: { totalWorkers: 6, alreadyActive: 0 }
STATE: submitted: { matchJobs: 100, poolSize: 6, activeWorkers: 6 } 2021-10-13 07:53:36 STATE: submitted: { matchJobs: 100, poolSize: 6, activeWorkers: 6 }
STATE: { matchJobsFinished: 100, totalTimeMs: 1769, averageTimeMs: 17.69 } 2021-10-13 07:53:38 STATE: { matchJobsFinished: 100, totalTimeMs: 1769, averageTimeMs: 17.69 }
INFO: closing workers: { poolSize: 6, activeWorkers: 6 } 2021-10-13 07:53:38 INFO: closing workers: { poolSize: 6, activeWorkers: 6 }
``` ```

View File

@ -1,7 +1,7 @@
/** /**
* Human demo for browsers * Human demo for browsers
* *
* Demo for face descriptor analysis and face similarity analysis * Demo for face descriptor analysis and face simmilarity analysis
*/ */
/** @type {Human} */ /** @type {Human} */
@ -11,7 +11,7 @@ const userConfig = {
backend: 'humangl', backend: 'humangl',
async: true, async: true,
warmup: 'none', warmup: 'none',
cacheSensitivity: 0.01, cacheSensitivity: 0,
debug: true, debug: true,
modelBasePath: '../../models/', modelBasePath: '../../models/',
deallocate: true, deallocate: true,
@ -22,6 +22,7 @@ const userConfig = {
}, },
face: { face: {
enabled: true, enabled: true,
// detector: { rotation: false, return: true, maxDetected: 50, iouThreshold: 0.206, minConfidence: 0.122 },
detector: { return: true, rotation: true, maxDetected: 50, iouThreshold: 0.01, minConfidence: 0.2 }, detector: { return: true, rotation: true, maxDetected: 50, iouThreshold: 0.01, minConfidence: 0.2 },
mesh: { enabled: true }, mesh: { enabled: true },
iris: { enabled: false }, iris: { enabled: false },
@ -63,16 +64,25 @@ async function loadFaceMatchDB() {
} }
} }
async function selectFaceCanvas(face) { async function SelectFaceCanvas(face) {
// if we have face image tensor, enhance it and display it // if we have face image tensor, enhance it and display it
let embedding; let embedding;
document.getElementById('orig').style.filter = 'blur(16px)'; document.getElementById('orig').style.filter = 'blur(16px)';
if (face.tensor) { if (face.tensor) {
title('Sorting Faces by Similarity'); title('Sorting Faces by Similarity');
const c = document.getElementById('orig'); const enhanced = human.enhance(face);
await human.draw.tensor(face.tensor, c); if (enhanced) {
const c = document.getElementById('orig');
const squeeze = human.tf.squeeze(enhanced);
const normalize = human.tf.div(squeeze, 255);
await human.tf.browser.toPixels(normalize, c);
human.tf.dispose([enhanced, squeeze, normalize]);
const ctx = c.getContext('2d');
ctx.font = 'small-caps 0.4rem "Lato"';
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
}
const arr = db.map((rec) => rec.embedding); const arr = db.map((rec) => rec.embedding);
const res = await human.match.find(face.embedding, arr); const res = await human.match(face.embedding, arr);
log('Match:', db[res.index].name); log('Match:', db[res.index].name);
const emotion = face.emotion[0] ? `${Math.round(100 * face.emotion[0].score)}% ${face.emotion[0].emotion}` : 'N/A'; const emotion = face.emotion[0] ? `${Math.round(100 * face.emotion[0].score)}% ${face.emotion[0].emotion}` : 'N/A';
document.getElementById('desc').innerHTML = ` document.getElementById('desc').innerHTML = `
@ -93,11 +103,11 @@ async function selectFaceCanvas(face) {
for (const canvas of canvases) { for (const canvas of canvases) {
// calculate similarity from selected face to current one in the loop // calculate similarity from selected face to current one in the loop
const current = all[canvas.tag.sample][canvas.tag.face]; const current = all[canvas.tag.sample][canvas.tag.face];
const similarity = human.match.similarity(face.embedding, current.embedding); const similarity = human.similarity(face.embedding, current.embedding);
canvas.tag.similarity = similarity; canvas.tag.similarity = similarity;
// get best match // get best match
// draw the canvas // draw the canvas
await human.draw.tensor(current.tensor, canvas); await human.tf.browser.toPixels(current.tensor, canvas);
const ctx = canvas.getContext('2d'); const ctx = canvas.getContext('2d');
ctx.font = 'small-caps 1rem "Lato"'; ctx.font = 'small-caps 1rem "Lato"';
ctx.fillStyle = 'rgba(0, 0, 0, 1)'; ctx.fillStyle = 'rgba(0, 0, 0, 1)';
@ -110,7 +120,7 @@ async function selectFaceCanvas(face) {
ctx.font = 'small-caps 1rem "Lato"'; ctx.font = 'small-caps 1rem "Lato"';
const start = human.now(); const start = human.now();
const arr = db.map((rec) => rec.embedding); const arr = db.map((rec) => rec.embedding);
const res = await human.match.find(current.embedding, arr); const res = await human.match(current.embedding, arr);
time += (human.now() - start); time += (human.now() - start);
if (res.similarity > minScore) ctx.fillText(`DB: ${(100 * res.similarity).toFixed(1)}% ${db[res.index].name}`, 4, canvas.height - 30); if (res.similarity > minScore) ctx.fillText(`DB: ${(100 * res.similarity).toFixed(1)}% ${db[res.index].name}`, 4, canvas.height - 30);
} }
@ -125,7 +135,7 @@ async function selectFaceCanvas(face) {
title('Selected Face'); title('Selected Face');
} }
async function addFaceCanvas(index, res, fileName) { async function AddFaceCanvas(index, res, fileName) {
all[index] = res.face; all[index] = res.face;
for (const i in res.face) { for (const i in res.face) {
if (!res.face[i].tensor) continue; // did not get valid results if (!res.face[i].tensor) continue; // did not get valid results
@ -144,25 +154,25 @@ async function addFaceCanvas(index, res, fileName) {
gender: ${Math.round(100 * res.face[i].genderScore)}% ${res.face[i].gender} gender: ${Math.round(100 * res.face[i].genderScore)}% ${res.face[i].gender}
emotion: ${emotion} emotion: ${emotion}
`.replace(/ /g, ' '); `.replace(/ /g, ' ');
await human.draw.tensor(res.face[i].tensor, canvas); await human.tf.browser.toPixels(res.face[i].tensor, canvas);
const ctx = canvas.getContext('2d'); const ctx = canvas.getContext('2d');
if (!ctx) return; if (!ctx) return false;
ctx.font = 'small-caps 0.8rem "Lato"'; ctx.font = 'small-caps 0.8rem "Lato"';
ctx.fillStyle = 'rgba(255, 255, 255, 1)'; ctx.fillStyle = 'rgba(255, 255, 255, 1)';
ctx.fillText(`${res.face[i].age}y ${(100 * (res.face[i].genderScore || 0)).toFixed(1)}% ${res.face[i].gender}`, 4, canvas.height - 6); ctx.fillText(`${res.face[i].age}y ${(100 * (res.face[i].genderScore || 0)).toFixed(1)}% ${res.face[i].gender}`, 4, canvas.height - 6);
const arr = db.map((rec) => rec.embedding); const arr = db.map((rec) => rec.embedding);
const result = human.match.find(res.face[i].embedding, arr); const result = human.match(res.face[i].embedding, arr);
ctx.font = 'small-caps 1rem "Lato"'; ctx.font = 'small-caps 1rem "Lato"';
if (result.similarity && res.similarity > minScore) ctx.fillText(`${(100 * result.similarity).toFixed(1)}% ${db[result.index].name}`, 4, canvas.height - 30); if (result.similarity && res.similarity > minScore) ctx.fillText(`${(100 * result.similarity).toFixed(1)}% ${db[result.index].name}`, 4, canvas.height - 30);
document.getElementById('faces').appendChild(canvas); document.getElementById('faces').appendChild(canvas);
canvas.addEventListener('click', (evt) => { canvas.addEventListener('click', (evt) => {
log('Select:', 'Image:', evt.target.tag.sample, 'Face:', evt.target.tag.face, 'Source:', evt.target.tag.source, all[evt.target.tag.sample][evt.target.tag.face]); log('Select:', 'Image:', evt.target.tag.sample, 'Face:', evt.target.tag.face, 'Source:', evt.target.tag.source, all[evt.target.tag.sample][evt.target.tag.face]);
selectFaceCanvas(all[evt.target.tag.sample][evt.target.tag.face]); SelectFaceCanvas(all[evt.target.tag.sample][evt.target.tag.face]);
}); });
} }
} }
async function addImageElement(index, image, length) { async function AddImageElement(index, image, length) {
const faces = all.reduce((prev, curr) => prev += curr.length, 0); const faces = all.reduce((prev, curr) => prev += curr.length, 0);
title(`Analyzing Input Images<br> ${Math.round(100 * index / length)}% [${index} / ${length}]<br>Found ${faces} Faces`); title(`Analyzing Input Images<br> ${Math.round(100 * index / length)}% [${index} / ${length}]<br>Found ${faces} Faces`);
return new Promise((resolve) => { return new Promise((resolve) => {
@ -171,7 +181,7 @@ async function addImageElement(index, image, length) {
document.getElementById('images').appendChild(img); // and finally we can add it document.getElementById('images').appendChild(img); // and finally we can add it
human.detect(img, userConfig) human.detect(img, userConfig)
.then((res) => { // eslint-disable-line promise/always-return .then((res) => { // eslint-disable-line promise/always-return
addFaceCanvas(index, res, image); // then wait until image is analyzed AddFaceCanvas(index, res, image); // then wait until image is analyzed
resolve(true); resolve(true);
}) })
.catch(() => log('human detect error')); .catch(() => log('human detect error'));
@ -212,23 +222,18 @@ async function main() {
// could not dynamically enumerate images so using static list // could not dynamically enumerate images so using static list
if (images.length === 0) { if (images.length === 0) {
images = [ images = [
'ai-face.jpg', 'ai-upper.jpg', 'ai-body.jpg', 'solvay1927.jpg', 'ai-body.jpg', 'solvay1927.jpg', 'ai-upper.jpg',
'person-carolina.jpg', 'person-celeste.jpg', 'person-leila1.jpg', 'person-leila2.jpg', 'person-lexi.jpg', 'person-linda.jpg', 'person-nicole.jpg', 'person-tasia.jpg',
'person-tetiana.jpg', 'person-vlado1.jpg', 'person-vlado5.jpg', 'person-vlado.jpg', 'person-christina.jpg', 'person-lauren.jpg',
'group-1.jpg', 'group-2.jpg', 'group-3.jpg', 'group-4.jpg', 'group-5.jpg', 'group-6.jpg', 'group-7.jpg', 'group-1.jpg', 'group-2.jpg', 'group-3.jpg', 'group-4.jpg', 'group-5.jpg', 'group-6.jpg', 'group-7.jpg',
'person-celeste.jpg', 'person-christina.jpg', 'person-lauren.jpg', 'person-lexi.jpg', 'person-linda.jpg', 'person-nicole.jpg', 'person-tasia.jpg', 'person-tetiana.jpg', 'person-vlado.jpg', 'person-vlado1.jpg', 'person-vlado5.jpg', 'daz3d-brianna.jpg', 'daz3d-chiyo.jpg', 'daz3d-cody.jpg', 'daz3d-drew-01.jpg', 'daz3d-drew-02.jpg', 'daz3d-ella-01.jpg', 'daz3d-ella-02.jpg', 'daz3d-gillian.jpg',
'stock-group-1.jpg', 'stock-group-2.jpg', 'daz3d-hye-01.jpg', 'daz3d-hye-02.jpg', 'daz3d-kaia.jpg', 'daz3d-karen.jpg', 'daz3d-kiaria-01.jpg', 'daz3d-kiaria-02.jpg', 'daz3d-lilah-01.jpg', 'daz3d-lilah-02.jpg',
'stock-models-1.jpg', 'stock-models-2.jpg', 'stock-models-3.jpg', 'stock-models-4.jpg', 'stock-models-5.jpg', 'stock-models-6.jpg', 'stock-models-7.jpg', 'stock-models-8.jpg', 'stock-models-9.jpg', 'daz3d-lilah-03.jpg', 'daz3d-lila.jpg', 'daz3d-lindsey.jpg', 'daz3d-megah.jpg', 'daz3d-selina-01.jpg', 'daz3d-selina-02.jpg', 'daz3d-snow.jpg',
'stock-teen-1.jpg', 'stock-teen-2.jpg', 'stock-teen-3.jpg', 'stock-teen-4.jpg', 'stock-teen-5.jpg', 'stock-teen-6.jpg', 'stock-teen-7.jpg', 'stock-teen-8.jpg', 'daz3d-sunshine.jpg', 'daz3d-taia.jpg', 'daz3d-tuesday-01.jpg', 'daz3d-tuesday-02.jpg', 'daz3d-tuesday-03.jpg', 'daz3d-zoe.jpg', 'daz3d-ginnifer.jpg',
'stock-models-10.jpg', 'stock-models-11.jpg', 'stock-models-12.jpg', 'stock-models-13.jpg', 'stock-models-14.jpg', 'stock-models-15.jpg', 'stock-models-16.jpg', 'daz3d-_emotions01.jpg', 'daz3d-_emotions02.jpg', 'daz3d-_emotions03.jpg', 'daz3d-_emotions04.jpg', 'daz3d-_emotions05.jpg',
'cgi-model-1.jpg', 'cgi-model-2.jpg', 'cgi-model-3.jpg', 'cgi-model-4.jpg', 'cgi-model-5.jpg', 'cgi-model-6.jpg', 'cgi-model-7.jpg', 'cgi-model-8.jpg', 'cgi-model-9.jpg',
'cgi-model-10.jpg', 'cgi-model-11.jpg', 'cgi-model-12.jpg', 'cgi-model-13.jpg', 'cgi-model-14.jpg', 'cgi-model-15.jpg', 'cgi-model-18.jpg', 'cgi-model-19.jpg',
'cgi-model-20.jpg', 'cgi-model-21.jpg', 'cgi-model-22.jpg', 'cgi-model-23.jpg', 'cgi-model-24.jpg', 'cgi-model-25.jpg', 'cgi-model-26.jpg', 'cgi-model-27.jpg', 'cgi-model-28.jpg', 'cgi-model-29.jpg',
'cgi-model-30.jpg', 'cgi-model-31.jpg', 'cgi-model-33.jpg', 'cgi-model-34.jpg',
'cgi-multiangle-1.jpg', 'cgi-multiangle-2.jpg', 'cgi-multiangle-3.jpg', 'cgi-multiangle-4.jpg', 'cgi-multiangle-6.jpg', 'cgi-multiangle-7.jpg', 'cgi-multiangle-8.jpg', 'cgi-multiangle-9.jpg', 'cgi-multiangle-10.jpg', 'cgi-multiangle-11.jpg',
'stock-emotions-a-1.jpg', 'stock-emotions-a-2.jpg', 'stock-emotions-a-3.jpg', 'stock-emotions-a-4.jpg', 'stock-emotions-a-5.jpg', 'stock-emotions-a-6.jpg', 'stock-emotions-a-7.jpg', 'stock-emotions-a-8.jpg',
'stock-emotions-b-1.jpg', 'stock-emotions-b-2.jpg', 'stock-emotions-b-3.jpg', 'stock-emotions-b-4.jpg', 'stock-emotions-b-5.jpg', 'stock-emotions-b-6.jpg', 'stock-emotions-b-7.jpg', 'stock-emotions-b-8.jpg',
]; ];
// add prefix for gitpages // add prefix for gitpages
images = images.map((a) => `../../samples/in/${a}`); images = images.map((a) => `/human/samples/in/${a}`);
log('Adding static image list:', images); log('Adding static image list:', images);
} else { } else {
log('Discovered images:', images); log('Discovered images:', images);
@ -237,7 +242,7 @@ async function main() {
// images = ['/samples/in/person-lexi.jpg', '/samples/in/person-carolina.jpg', '/samples/in/solvay1927.jpg']; // images = ['/samples/in/person-lexi.jpg', '/samples/in/person-carolina.jpg', '/samples/in/solvay1927.jpg'];
const t0 = human.now(); const t0 = human.now();
for (let i = 0; i < images.length; i++) await addImageElement(i, images[i], images.length); for (let i = 0; i < images.length; i++) await AddImageElement(i, images[i], images.length);
const t1 = human.now(); const t1 = human.now();
// print stats // print stats
@ -251,7 +256,7 @@ async function main() {
title(''); title('');
log('Ready'); log('Ready');
human.validate(userConfig); human.validate(userConfig);
human.match.similarity([], []); human.similarity([], []);
} }
window.onload = main; window.onload = main;

View File

@ -38,8 +38,7 @@ function match(descBuffer, options = { order: 2, multiplier: 20 }) {
if (best < threshold || best === 0) break; // short circuit if (best < threshold || best === 0) break; // short circuit
} }
best = (options.order === 2) ? Math.sqrt(best) : best ** (1 / options.order); best = (options.order === 2) ? Math.sqrt(best) : best ** (1 / options.order);
const similarity = Math.round(100 * Math.max(0, 100 - best) / 100.0) / 100; return { index, distance: best, similarity: Math.max(0, 100 - best) / 100.0 };
return { index, distance: best, similarity };
} }
threads.parentPort?.on('message', (msg) => { threads.parentPort?.on('message', (msg) => {
@ -61,11 +60,11 @@ threads.parentPort?.on('message', (msg) => {
} }
if (typeof msg.debug !== 'undefined') { // set verbose logging if (typeof msg.debug !== 'undefined') { // set verbose logging
debug = msg.debug; debug = msg.debug;
// if (debug) threads.parentPort?.postMessage(`debug: ${debug}`); if (debug) threads.parentPort?.postMessage(`debug: ${debug}`);
} }
if (typeof msg.threshold !== 'undefined') { // set minimum similarity threshold if (typeof msg.threshold !== 'undefined') { // set minimum similarity threshold
threshold = msg.threshold; threshold = msg.threshold;
// if (debug) threads.parentPort?.postMessage(`threshold: ${threshold}`); if (debug) threads.parentPort?.postMessage(`threshold: ${threshold}`);
} }
if (typeof msg.shutdown !== 'undefined') { // got message to close worker if (typeof msg.shutdown !== 'undefined') { // got message to close worker
if (debug) threads.parentPort?.postMessage('shutting down'); if (debug) threads.parentPort?.postMessage('shutting down');

View File

@ -15,7 +15,7 @@ const options = {
dbMax: 10000, // maximum number of records to hold in memory dbMax: 10000, // maximum number of records to hold in memory
threadPoolSize: 12, // number of worker threads to create in thread pool threadPoolSize: 12, // number of worker threads to create in thread pool
workerSrc: './node-match-worker.js', // code that executes in the worker thread workerSrc: './node-match-worker.js', // code that executes in the worker thread
debug: true, // verbose messages debug: false, // verbose messages
minThreshold: 0.5, // match returns first record that meets the similarity threshold, set to 0 to always scan all records minThreshold: 0.5, // match returns first record that meets the similarity threshold, set to 0 to always scan all records
descLength: 1024, // descriptor length descLength: 1024, // descriptor length
}; };
@ -176,7 +176,7 @@ async function main() {
data.requestID++; // increase request id data.requestID++; // increase request id
if (testOptions.fuzDescriptors) match(fuzDescriptor(descriptor)); // fuz descriptor for harder match if (testOptions.fuzDescriptors) match(fuzDescriptor(descriptor)); // fuz descriptor for harder match
else match(descriptor); else match(descriptor);
if (options.debug) log.debug('submited job', data.requestID); // we already know what we're searching for so we can compare results if (options.debug) log.info('submited job', data.requestID); // we already know what we're searching for so we can compare results
} }
log.state('submitted:', { matchJobs: testOptions.maxJobs, poolSize: data.workers.length, activeWorkers: data.workers.filter((worker) => !!worker).length }); log.state('submitted:', { matchJobs: testOptions.maxJobs, poolSize: data.workers.length, activeWorkers: data.workers.filter((worker) => !!worker).length });
} }

View File

@ -28,10 +28,10 @@ async function updateCached(req) {
.then((update) => { .then((update) => {
// update cache if request is ok // update cache if request is ok
if (update.ok) { if (update.ok) {
caches caches // eslint-disable-line promise/no-nesting
.open(cacheName) .open(cacheName)
.then((cache) => cache.put(req, update)) .then((cache) => cache.put(req, update))
.catch((err) => log('cache update error', err)); // eslint-disable-line promise/no-nesting .catch((err) => log('cache update error', err));
} }
return true; return true;
}) })
@ -76,8 +76,8 @@ async function getCached(evt) {
function cacheInit() { function cacheInit() {
caches.open(cacheName) caches.open(cacheName)
.then((cache) => cache.addAll(cacheFiles) .then((cache) => cache.addAll(cacheFiles) // eslint-disable-line promise/no-nesting
.then( // eslint-disable-line promise/no-nesting .then(
() => log('cache refresh:', cacheFiles.length, 'files'), () => log('cache refresh:', cacheFiles.length, 'files'),
(err) => log('cache error', err), (err) => log('cache error', err),
)) ))

View File

@ -18,10 +18,7 @@
* ui={}: contains all variables exposed in the UI * ui={}: contains all variables exposed in the UI
*/ */
// WARNING!!! // test url <https://human.local/?worker=false&async=false&bench=false&draw=true&warmup=full&backend=humangl>
// This demo is written using older code style and a lot of manual setup
// Newer versions of Human have richer functionality allowing for much cleaner & easier usage
// It is recommended to use other demos such as `demo/typescript` for usage examples
import { Human } from '../dist/human.esm.js'; // equivalent of @vladmandic/human import { Human } from '../dist/human.esm.js'; // equivalent of @vladmandic/human
import Menu from './helpers/menu.js'; import Menu from './helpers/menu.js';
@ -86,7 +83,7 @@ const ui = {
facing: true, // camera facing front or back facing: true, // camera facing front or back
baseBackground: 'rgba(50, 50, 50, 1)', // 'grey' baseBackground: 'rgba(50, 50, 50, 1)', // 'grey'
columns: 2, // when processing sample images create this many columns columns: 2, // when processing sample images create this many columns
useWorker: false, // use web workers for processing useWorker: true, // use web workers for processing
worker: 'index-worker.js', worker: 'index-worker.js',
maxFPSframes: 10, // keep fps history for how many frames maxFPSframes: 10, // keep fps history for how many frames
modelsPreload: false, // preload human models on startup modelsPreload: false, // preload human models on startup
@ -185,7 +182,7 @@ function status(msg) {
prevStatus = msg; prevStatus = msg;
} else { } else {
const video = document.getElementById('video'); const video = document.getElementById('video');
const playing = isLive(video) && !video.paused; // eslint-disable-line no-use-before-define const playing = (video.srcObject !== null) && !video.paused;
document.getElementById('play').style.display = playing ? 'none' : 'block'; document.getElementById('play').style.display = playing ? 'none' : 'block';
document.getElementById('loader').style.display = 'none'; document.getElementById('loader').style.display = 'none';
div.innerText = ''; div.innerText = '';
@ -195,6 +192,7 @@ function status(msg) {
async function videoPlay(videoElement = document.getElementById('video')) { async function videoPlay(videoElement = document.getElementById('video')) {
document.getElementById('btnStartText').innerHTML = 'pause video'; document.getElementById('btnStartText').innerHTML = 'pause video';
await videoElement.play(); await videoElement.play();
// status();
} }
async function videoPause() { async function videoPause() {
@ -222,13 +220,21 @@ async function calcSimmilarity(result) {
compare.original = result; compare.original = result;
log('setting face compare baseline:', result.face[0]); log('setting face compare baseline:', result.face[0]);
if (result.face[0].tensor) { if (result.face[0].tensor) {
const c = document.getElementById('orig'); const enhanced = human.enhance(result.face[0]);
human.draw.tensor(result.face[0].tensor, c); if (enhanced) {
const c = document.getElementById('orig');
const squeeze = human.tf.squeeze(enhanced);
const norm = human.tf.div(squeeze, 255);
human.tf.browser.toPixels(norm, c);
human.tf.dispose(enhanced);
human.tf.dispose(squeeze);
human.tf.dispose(norm);
}
} else { } else {
document.getElementById('compare-canvas').getContext('2d').drawImage(compare.original.canvas, 0, 0, 200, 200); document.getElementById('compare-canvas').getContext('2d').drawImage(compare.original.canvas, 0, 0, 200, 200);
} }
} }
const similarity = human.match.similarity(compare.original.face[0].embedding, result.face[0].embedding); const similarity = human.similarity(compare.original.face[0].embedding, result.face[0].embedding);
document.getElementById('similarity').innerText = `similarity: ${Math.trunc(1000 * similarity) / 10}%`; document.getElementById('similarity').innerText = `similarity: ${Math.trunc(1000 * similarity) / 10}%`;
} }
@ -513,6 +519,17 @@ function runHumanDetect(input, canvas, timestamp) {
human.detect(input, userConfig) human.detect(input, userConfig)
.then((result) => { .then((result) => {
status(); status();
/*
setTimeout(async () => { // simulate gl context lost 2sec after initial detection
const ext = human.gl && human.gl.gl ? human.gl.gl.getExtension('WEBGL_lose_context') : {};
if (ext && ext.loseContext) {
log('simulate context lost:', human.env.webgl, human.gl, ext);
human.gl.gl.getExtension('WEBGL_lose_context').loseContext();
await videoPause();
status('Exception: WebGL');
}
}, 2000);
*/
if (result.performance && result.performance.total) ui.detectFPS.push(1000 / result.performance.total); if (result.performance && result.performance.total) ui.detectFPS.push(1000 / result.performance.total);
if (ui.detectFPS.length > ui.maxFPSframes) ui.detectFPS.shift(); if (ui.detectFPS.length > ui.maxFPSframes) ui.detectFPS.shift();
if (ui.bench) { if (ui.bench) {
@ -606,15 +623,21 @@ async function processImage(input, title) {
async function processVideo(input, title) { async function processVideo(input, title) {
status(`processing video: ${title}`); status(`processing video: ${title}`);
// const video = document.getElementById('video-file') || document.createElement('video');
const video = document.getElementById('video'); const video = document.getElementById('video');
const canvas = document.getElementById('canvas'); const canvas = document.getElementById('canvas');
// video.id = 'video-file';
// video.controls = true;
// video.loop = true;
// video.style.display = 'none';
// document.body.appendChild(video);
video.addEventListener('error', () => status(`video loading error: ${video.error.message}`)); video.addEventListener('error', () => status(`video loading error: ${video.error.message}`));
video.addEventListener('canplay', async () => { video.addEventListener('canplay', async () => {
for (const m of Object.values(menu)) m.hide(); for (const m of Object.values(menu)) m.hide();
document.getElementById('samples-container').style.display = 'none'; document.getElementById('samples-container').style.display = 'none';
canvas.style.display = 'block'; canvas.style.display = 'block';
await videoPlay(); await videoPlay();
runHumanDetect(video, canvas); if (!ui.detectThread) runHumanDetect(video, canvas);
}); });
video.srcObject = null; video.srcObject = null;
video.src = input; video.src = input;
@ -627,8 +650,9 @@ async function detectVideo() {
const canvas = document.getElementById('canvas'); const canvas = document.getElementById('canvas');
canvas.style.display = 'block'; canvas.style.display = 'block';
cancelAnimationFrame(ui.detectThread); cancelAnimationFrame(ui.detectThread);
if (isLive(video) && !video.paused) { if ((video.srcObject !== null) && !video.paused) {
await videoPause(); await videoPause();
// if (ui.drawThread) cancelAnimationFrame(ui.drawThread);
} else { } else {
const cameraError = await setupCamera(); const cameraError = await setupCamera();
if (!cameraError) { if (!cameraError) {
@ -770,7 +794,6 @@ function setupMenu() {
async function resize() { async function resize() {
window.onresize = null; window.onresize = null;
log('resize');
// best setting for mobile, ignored for desktop // best setting for mobile, ignored for desktop
// can set dynamic value such as Math.min(1, Math.round(100 * window.innerWidth / 960) / 100); // can set dynamic value such as Math.min(1, Math.round(100 * window.innerWidth / 960) / 100);
const viewportScale = 0.7; const viewportScale = 0.7;
@ -977,7 +1000,8 @@ async function main() {
if (ui.modelsPreload && !ui.useWorker) { if (ui.modelsPreload && !ui.useWorker) {
status('loading'); status('loading');
await human.load(userConfig); // this is not required, just pre-loads all models await human.load(userConfig); // this is not required, just pre-loads all models
log('demo loaded models:', human.models.loaded()); const loaded = Object.keys(human.models).filter((a) => human.models[a]);
log('demo loaded models:', loaded);
} else { } else {
await human.init(); await human.init();
} }

View File

@ -16,7 +16,6 @@
node demo/nodejs/node-multiprocess.js node demo/nodejs/node-multiprocess.js
``` ```
<!-- eslint-skip -->
```json ```json
2021-06-01 08:54:19 INFO: @vladmandic/human version 2.0.0 2021-06-01 08:54:19 INFO: @vladmandic/human version 2.0.0
2021-06-01 08:54:19 INFO: User: vlado Platform: linux Arch: x64 Node: v16.0.0 2021-06-01 08:54:19 INFO: User: vlado Platform: linux Arch: x64 Node: v16.0.0

View File

@ -9,10 +9,10 @@
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>"> <meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>"> <meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="theme-color" content="#000000"> <meta name="theme-color" content="#000000">
<link rel="manifest" href="../../manifest.webmanifest"> <link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon"> <link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../../assets/icon.png"> <link rel="apple-touch-icon" href="../../assets/icon.png">
<script src="../multithread/index.js" type="module"></script> <script src="./index.js" type="module"></script>
<style> <style>
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') } @font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; } html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; }

View File

@ -8,7 +8,7 @@
import { Human } from '../../dist/human.esm.js'; // equivalent of @vladmandic/human import { Human } from '../../dist/human.esm.js'; // equivalent of @vladmandic/human
import GLBench from '../helpers/gl-bench.js'; import GLBench from '../helpers/gl-bench.js';
const workerJS = '../multithread/worker.js'; const workerJS = './worker.js';
const config = { const config = {
main: { // processes input and runs gesture analysis main: { // processes input and runs gesture analysis

View File

@ -8,8 +8,8 @@
const fs = require('fs'); const fs = require('fs');
const path = require('path'); const path = require('path');
const childProcess = require('child_process'); // eslint-disable-line camelcase
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
const childProcess = require('child_process'); // eslint-disable-line camelcase
// note that main process does not import human or tfjs at all, it's all done from worker process // note that main process does not import human or tfjs at all, it's all done from worker process
const workerFile = 'demo/multithread/node-multiprocess-worker.js'; const workerFile = 'demo/multithread/node-multiprocess-worker.js';

View File

@ -28,8 +28,7 @@ or you can pass a path to image to analyze, either on local filesystem or using
node demo/nodejs/node.js node demo/nodejs/node.js
``` ```
<!-- eslint-skip --> ```json
```js
2021-06-01 08:52:15 INFO: @vladmandic/human version 2.0.0 2021-06-01 08:52:15 INFO: @vladmandic/human version 2.0.0
2021-06-01 08:52:15 INFO: User: vlado Platform: linux Arch: x64 Node: v16.0.0 2021-06-01 08:52:15 INFO: User: vlado Platform: linux Arch: x64 Node: v16.0.0
2021-06-01 08:52:15 INFO: Current folder: /home/vlado/dev/human 2021-06-01 08:52:15 INFO: Current folder: /home/vlado/dev/human
@ -83,7 +82,7 @@ node demo/nodejs/node.js
detector: { modelPath: 'handdetect.json' }, detector: { modelPath: 'handdetect.json' },
skeleton: { modelPath: 'handskeleton.json' } skeleton: { modelPath: 'handskeleton.json' }
}, },
object: { enabled: true, modelPath: 'centernet.json', minConfidence: 0.2, iouThreshold: 0.4, maxDetected: 10, skipFrames: 19 } object: { enabled: true, modelPath: 'mb3-centernet.json', minConfidence: 0.2, iouThreshold: 0.4, maxDetected: 10, skipFrames: 19 }
} }
08:52:15.673 Human: version: 2.0.0 08:52:15.673 Human: version: 2.0.0
08:52:15.674 Human: tfjs version: 3.6.0 08:52:15.674 Human: tfjs version: 3.6.0
@ -97,7 +96,7 @@ node demo/nodejs/node.js
08:52:15.847 Human: load model: file://models/handdetect.json 08:52:15.847 Human: load model: file://models/handdetect.json
08:52:15.847 Human: load model: file://models/handskeleton.json 08:52:15.847 Human: load model: file://models/handskeleton.json
08:52:15.914 Human: load model: file://models/movenet-lightning.json 08:52:15.914 Human: load model: file://models/movenet-lightning.json
08:52:15.957 Human: load model: file://models/centernet.json 08:52:15.957 Human: load model: file://models/mb3-centernet.json
08:52:16.015 Human: load model: file://models/faceres.json 08:52:16.015 Human: load model: file://models/faceres.json
08:52:16.015 Human: tf engine state: 50796152 bytes 1318 tensors 08:52:16.015 Human: tf engine state: 50796152 bytes 1318 tensors
2021-06-01 08:52:16 INFO: Loaded: [ 'face', 'movenet', 'handpose', 'emotion', 'centernet', 'faceres', [length]: 6 ] 2021-06-01 08:52:16 INFO: Loaded: [ 'face', 'movenet', 'handpose', 'emotion', 'centernet', 'faceres', [length]: 6 ]

View File

@ -1,70 +0,0 @@
const fs = require('fs');
const process = require('process');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
const Human = require('../../dist/human.node.js');
const humanConfig = {
debug: false,
face: {
enabled: true,
detector: { enabled: true, modelPath: 'blazeface.json' },
description: { enabled: true, modelPath: 'faceres.json' },
// gear: { enabled: true, modelPath: '/home/vlado/dev/human-models/models/gear.json' },
// ssrnet: { enabled: true, modelPathAge: '/home/vlado/dev/human-models/models/age.json', modelPathGender: '/home/vlado/dev/human-models/models/gender.json' },
emotion: { enabled: false },
mesh: { enabled: false },
iris: { enabled: false },
antispoof: { enabled: false },
liveness: { enabled: false },
},
body: { enabled: false },
hand: { enabled: false },
gesture: { enabled: false },
};
const human = new Human.Human(humanConfig);
const ageThreshold = 18;
async function detect(inputFile) {
try {
const buffer = fs.readFileSync(inputFile);
const tensor = human.tf.node.decodeImage(buffer);
const result = await human.detect(tensor);
human.tf.dispose(tensor);
if (!result || !result.face || result.face.length === 0) return false;
let msg = ` file=${inputFile} resolution=${tensor.shape}`;
for (const face of result.face) {
msg = ` file=${inputFile} resolution=${tensor.shape} age=${face.age} gender=${face.gender} confidence=${face.genderScore}`;
if (face.age < ageThreshold) {
log.warn('fail:' + msg);
return true;
}
}
log.info('pass: ' + msg);
return false;
} catch (err) {
log.error(`error: file=${inputFile}: ${err}`);
}
return false;
}
async function main() {
log.info(`Human: version=${human.version} tf=${tf.version_core}`);
process.noDeprecation = true;
if (process.argv.length < 3) return;
await human.load();
await human.warmup();
const t0 = performance.now();
const args = process.argv.slice(2);
let pass = 0;
let fail = 0;
for (const arg of args) {
const ok = await detect(arg);
if (ok) pass++;
else fail++;
}
const t1 = performance.now();
log.info(`Human: files=${args.length} pass=${pass} fail=${fail} time=${Math.round(t1 - t0)} fps=${Math.round(10000 * args.length / (t1 - t0)) / 10}`);
}
main();

View File

@ -1,66 +0,0 @@
/**
* Human simple demo for NodeJS
*/
const childProcess = require('child_process'); // eslint-disable-line camelcase
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
const canvas = require('canvas'); // eslint-disable-line node/no-unpublished-require
const config = {
cacheSensitivity: 0.01,
wasmPlatformFetch: true,
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
};
const count = 10;
async function loadImage(input) {
const inputImage = await canvas.loadImage(input);
const inputCanvas = new canvas.Canvas(inputImage.width, inputImage.height);
const inputCtx = inputCanvas.getContext('2d');
inputCtx.drawImage(inputImage, 0, 0);
const imageData = inputCtx.getImageData(0, 0, inputCanvas.width, inputCanvas.height);
process.send({ input, resolution: [inputImage.width, inputImage.height] });
return imageData;
}
async function runHuman(module, backend) {
if (backend === 'wasm') require('@tensorflow/tfjs-backend-wasm'); // eslint-disable-line node/no-unpublished-require, global-require
const Human = require('../../dist/' + module); // eslint-disable-line global-require, import/no-dynamic-require
config.backend = backend;
const human = new Human.Human(config);
human.env.Canvas = canvas.Canvas;
human.env.Image = canvas.Image;
human.env.ImageData = canvas.ImageData;
process.send({ human: human.version, module });
await human.init();
process.send({ desired: human.config.backend, wasm: human.env.wasm, tfjs: human.tf.version.tfjs, tensorflow: human.env.tensorflow });
const imageData = await loadImage('samples/in/ai-body.jpg');
const t0 = human.now();
await human.load();
const t1 = human.now();
await human.warmup();
const t2 = human.now();
for (let i = 0; i < count; i++) await human.detect(imageData);
const t3 = human.now();
process.send({ backend: human.tf.getBackend(), load: Math.round(t1 - t0), warmup: Math.round(t2 - t1), detect: Math.round(t3 - t2), count, memory: human.tf.memory().numBytes });
}
async function executeWorker(args) {
return new Promise((resolve) => {
const worker = childProcess.fork(process.argv[1], args);
worker.on('message', (msg) => log.data(msg));
worker.on('exit', () => resolve(true));
});
}
async function main() {
if (process.argv[2]) {
await runHuman(process.argv[2], process.argv[3]);
} else {
await executeWorker(['human.node.js', 'tensorflow']);
await executeWorker(['human.node-gpu.js', 'tensorflow']);
await executeWorker(['human.node-wasm.js', 'wasm']);
}
}
main();

View File

@ -24,7 +24,6 @@ const config = { // just enable all and leave default settings
async function main() { async function main() {
log.header(); log.header();
process.noDeprecation = true;
globalThis.Canvas = canvas.Canvas; // patch global namespace with canvas library globalThis.Canvas = canvas.Canvas; // patch global namespace with canvas library
globalThis.ImageData = canvas.ImageData; // patch global namespace with canvas library globalThis.ImageData = canvas.ImageData; // patch global namespace with canvas library
@ -36,13 +35,12 @@ async function main() {
log.info('Human:', human.version, 'TF:', tf.version_core); log.info('Human:', human.version, 'TF:', tf.version_core);
await human.load(); // pre-load models await human.load(); // pre-load models
log.info('Loaded models:', human.models.loaded()); log.info('Loaded models:', Object.keys(human.models).filter((a) => human.models[a]));
log.info('Memory state:', human.tf.engine().memory()); log.info('Memory state:', human.tf.engine().memory());
// parse cmdline // parse cmdline
const input = process.argv[2]; const input = process.argv[2];
let output = process.argv[3]; const output = process.argv[3];
if (!output.toLowerCase().endsWith('.jpg')) output += '.jpg';
if (process.argv.length !== 4) log.error('Parameters: <input-image> <output-image> missing'); if (process.argv.length !== 4) log.error('Parameters: <input-image> <output-image> missing');
else if (!fs.existsSync(input) && !input.startsWith('http')) log.error(`File not found: ${process.argv[2]}`); else if (!fs.existsSync(input) && !input.startsWith('http')) log.error(`File not found: ${process.argv[2]}`);
else { else {

View File

@ -5,6 +5,8 @@
const fs = require('fs'); const fs = require('fs');
const process = require('process'); const process = require('process');
let fetch; // fetch is dynamically imported later
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// in nodejs environments tfjs-node is required to be loaded before human // in nodejs environments tfjs-node is required to be loaded before human
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
@ -36,13 +38,13 @@ async function detect(input) {
let buffer; let buffer;
log.info('Loading image:', input); log.info('Loading image:', input);
if (input.startsWith('http:') || input.startsWith('https:')) { if (input.startsWith('http:') || input.startsWith('https:')) {
fetch = (await import('node-fetch')).default; // eslint-disable-line node/no-unpublished-import
const res = await fetch(input); const res = await fetch(input);
if (res && res.ok) buffer = Buffer.from(await res.arrayBuffer()); if (res && res.ok) buffer = await res.buffer();
else log.error('Invalid image URL:', input, res.status, res.statusText, res.headers.get('content-type')); else log.error('Invalid image URL:', input, res.status, res.statusText, res.headers.get('content-type'));
} else { } else {
buffer = fs.readFileSync(input); buffer = fs.readFileSync(input);
} }
log.data('Image bytes:', buffer?.length, 'buffer:', buffer?.slice(0, 32));
// decode image using tfjs-node so we don't need external depenencies // decode image using tfjs-node so we don't need external depenencies
if (!buffer) return; if (!buffer) return;
@ -65,7 +67,8 @@ async function main() {
}); });
human.events.addEventListener('load', () => { human.events.addEventListener('load', () => {
log.info('Event Loaded:', human.models.loaded(), human.tf.engine().memory()); const loaded = Object.keys(human.models).filter((a) => human.models[a]);
log.info('Event Loaded:', loaded, human.tf.engine().memory());
}); });
human.events.addEventListener('image', () => { human.events.addEventListener('image', () => {
@ -77,7 +80,7 @@ async function main() {
const persons = human.result.persons; const persons = human.result.persons;
for (let i = 0; i < persons.length; i++) { for (let i = 0; i < persons.length; i++) {
const face = persons[i].face; const face = persons[i].face;
const faceTxt = face ? `score:${face.score} age:${face.age} gender:${face.gender} iris:${face.distance}` : null; const faceTxt = face ? `score:${face.score} age:${face.age} gender:${face.gender} iris:${face.iris}` : null;
const body = persons[i].body; const body = persons[i].body;
const bodyTxt = body ? `score:${body.score} keypoints:${body.keypoints?.length}` : null; const bodyTxt = body ? `score:${body.score} keypoints:${body.keypoints?.length}` : null;
log.data(` #${i}: Face:${faceTxt} Body:${bodyTxt} LeftHand:${persons[i].hands.left ? 'yes' : 'no'} RightHand:${persons[i].hands.right ? 'yes' : 'no'} Gestures:${persons[i].gestures.length}`); log.data(` #${i}: Face:${faceTxt} Body:${bodyTxt} LeftHand:${persons[i].hands.left ? 'yes' : 'no'} RightHand:${persons[i].hands.right ? 'yes' : 'no'} Gestures:${persons[i].gestures.length}`);

View File

@ -16,7 +16,8 @@ const humanConfig = {
}; };
async function main(inputFile) { async function main(inputFile) {
global.fetch = (await import('node-fetch')).default; // eslint-disable-line node/no-unpublished-import, import/no-unresolved, node/no-missing-import, node/no-extraneous-import // @ts-ignore
global.fetch = (await import('node-fetch')).default; // eslint-disable-line node/no-unpublished-import
const human = new Human.Human(humanConfig); // create instance of human using default configuration const human = new Human.Human(humanConfig); // create instance of human using default configuration
log.info('Human:', human.version, 'TF:', tf.version_core); log.info('Human:', human.version, 'TF:', tf.version_core);
await human.load(); // optional as models would be loaded on-demand first time they are required await human.load(); // optional as models would be loaded on-demand first time they are required

View File

@ -27,7 +27,8 @@ async function init() {
await human.tf.ready(); await human.tf.ready();
log.info('Human:', human.version, 'TF:', tf.version_core); log.info('Human:', human.version, 'TF:', tf.version_core);
await human.load(); await human.load();
log.info('Loaded:', human.models.loaded()); const loaded = Object.keys(human.models).filter((a) => human.models[a]);
log.info('Loaded:', loaded);
log.info('Memory state:', human.tf.engine().memory()); log.info('Memory state:', human.tf.engine().memory());
} }
@ -45,12 +46,10 @@ async function detect(input) {
} }
async function main() { async function main() {
process.noDeprecation = true;
log.configure({ inspect: { breakLength: 265 } }); log.configure({ inspect: { breakLength: 265 } });
log.header(); log.header();
if (process.argv.length !== 4) { if (process.argv.length !== 4) {
log.error('Parameters: <first image> <second image> missing'); throw new Error('Parameters: <first image> <second image> missing');
return;
} }
await init(); await init();
const res1 = await detect(process.argv[2]); const res1 = await detect(process.argv[2]);
@ -58,7 +57,7 @@ async function main() {
if (!res1 || !res1.face || res1.face.length === 0 || !res2 || !res2.face || res2.face.length === 0) { if (!res1 || !res1.face || res1.face.length === 0 || !res2 || !res2.face || res2.face.length === 0) {
throw new Error('Could not detect face descriptors'); throw new Error('Could not detect face descriptors');
} }
const similarity = human.match.similarity(res1.face[0].embedding, res2.face[0].embedding, { order: 2 }); const similarity = human.similarity(res1.face[0].embedding, res2.face[0].embedding, { order: 2 });
log.data('Similarity: ', similarity); log.data('Similarity: ', similarity);
} }

View File

@ -13,11 +13,9 @@ const Human = require('../../dist/human.node.js'); // use this when using human
const humanConfig = { const humanConfig = {
// add any custom config here // add any custom config here
debug: true, debug: true,
body: { enabled: false },
}; };
async function detect(inputFile) { async function detect(inputFile) {
process.noDeprecation = true;
const human = new Human.Human(humanConfig); // create instance of human using default configuration const human = new Human.Human(humanConfig); // create instance of human using default configuration
console.log('Human:', human.version, 'TF:', tf.version_core); // eslint-disable-line no-console console.log('Human:', human.version, 'TF:', tf.version_core); // eslint-disable-line no-console
await human.load(); // optional as models would be loaded on-demand first time they are required await human.load(); // optional as models would be loaded on-demand first time they are required

View File

@ -11,11 +11,10 @@
* Working version of `ffmpeg` must be present on the system * Working version of `ffmpeg` must be present on the system
*/ */
const process = require('process');
const spawn = require('child_process').spawn; const spawn = require('child_process').spawn;
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
// in nodejs environments tfjs-node is required to be loaded before human // in nodejs environments tfjs-node is required to be loaded before human
// const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases) // const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
const Pipe2Jpeg = require('pipe2jpeg'); // eslint-disable-line node/no-missing-require, import/no-unresolved const Pipe2Jpeg = require('pipe2jpeg'); // eslint-disable-line node/no-missing-require, import/no-unresolved
// const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases) // const human = require('@vladmandic/human'); // use this when human is installed as module (majority of use cases)
@ -23,8 +22,7 @@ const Human = require('../../dist/human.node.js'); // use this when using human
let count = 0; // counter let count = 0; // counter
let busy = false; // busy flag let busy = false; // busy flag
let inputFile = './test.mp4'; const inputFile = './test.mp4';
if (process.argv.length === 3) inputFile = process.argv[2];
const humanConfig = { const humanConfig = {
modelBasePath: 'file://models/', modelBasePath: 'file://models/',
@ -61,27 +59,24 @@ const ffmpegParams = [
'pipe:1', // output to unix pipe that is then captured by pipe2jpeg 'pipe:1', // output to unix pipe that is then captured by pipe2jpeg
]; ];
async function detect(jpegBuffer) { async function process(jpegBuffer) {
if (busy) return; // skip processing if busy if (busy) return; // skip processing if busy
busy = true; busy = true;
const tensor = human.tf.node.decodeJpeg(jpegBuffer, 3); // decode jpeg buffer to raw tensor const tensor = human.tf.node.decodeJpeg(jpegBuffer, 3); // decode jpeg buffer to raw tensor
log.state('input frame:', ++count, 'size:', jpegBuffer.length, 'decoded shape:', tensor.shape);
const res = await human.detect(tensor); const res = await human.detect(tensor);
human.tf.dispose(tensor); // must dispose tensor log.data('gesture', JSON.stringify(res.gesture));
// start custom processing here // do processing here
log.data('frame', { frame: ++count, size: jpegBuffer.length, shape: tensor.shape, face: res?.face?.length, body: res?.body?.length, hand: res?.hand?.length, gesture: res?.gesture?.length }); tf.dispose(tensor); // must dispose tensor
if (res?.face?.[0]) log.data('person', { score: [res.face[0].boxScore, res.face[0].faceScore], age: res.face[0].age || 0, gender: [res.face[0].genderScore || 0, res.face[0].gender], emotion: res.face[0].emotion?.[0] });
// at the of processing mark loop as not busy so it can process next frame
busy = false; busy = false;
} }
async function main() { async function main() {
log.header(); log.header();
process.noDeprecation = true;
await human.tf.ready(); await human.tf.ready();
// pre-load models // pre-load models
log.info({ human: human.version, tf: human.tf.version_core }); log.info('human:', human.version, 'tf:', tf.version_core);
log.info({ input: inputFile }); pipe2jpeg.on('jpeg', (jpegBuffer) => process(jpegBuffer));
pipe2jpeg.on('data', (jpegBuffer) => detect(jpegBuffer));
const ffmpeg = spawn('ffmpeg', ffmpegParams, { stdio: ['ignore', 'pipe', 'ignore'] }); const ffmpeg = spawn('ffmpeg', ffmpegParams, { stdio: ['ignore', 'pipe', 'ignore'] });
ffmpeg.on('error', (error) => log.error('ffmpeg error:', error)); ffmpeg.on('error', (error) => log.error('ffmpeg error:', error));

View File

@ -1,11 +1,15 @@
/** /**
* Human demo for NodeJS * Human demo for NodeJS
*
* Requires [node-fetch](https://www.npmjs.com/package/node-fetch) to provide `fetch` functionality in NodeJS environment
*/ */
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
const fs = require('fs'); const fs = require('fs');
const path = require('path'); const path = require('path');
const process = require('process'); const process = require('process');
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
let fetch; // fetch is dynamically imported later
// in nodejs environments tfjs-node is required to be loaded before human // in nodejs environments tfjs-node is required to be loaded before human
const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require const tf = require('@tensorflow/tfjs-node'); // eslint-disable-line node/no-unpublished-require
@ -49,7 +53,8 @@ async function init() {
log.info('Human:', human.version); log.info('Human:', human.version);
// log.info('Active Configuration', human.config); // log.info('Active Configuration', human.config);
await human.load(); await human.load();
log.info('Loaded:', human.models.loaded()); const loaded = Object.keys(human.models).filter((a) => human.models[a]);
log.info('Loaded:', loaded);
// log.info('Memory state:', human.tf.engine().memory()); // log.info('Memory state:', human.tf.engine().memory());
log.data(tf.backend().binding ? tf.backend().binding.TF_Version : null); log.data(tf.backend().binding ? tf.backend().binding.TF_Version : null);
} }
@ -60,12 +65,11 @@ async function detect(input) {
log.info('Loading image:', input); log.info('Loading image:', input);
if (input.startsWith('http:') || input.startsWith('https:')) { if (input.startsWith('http:') || input.startsWith('https:')) {
const res = await fetch(input); const res = await fetch(input);
if (res && res.ok) buffer = Buffer.from(await res.arrayBuffer()); if (res && res.ok) buffer = await res.buffer();
else log.error('Invalid image URL:', input, res.status, res.statusText, res.headers.get('content-type')); else log.error('Invalid image URL:', input, res.status, res.statusText, res.headers.get('content-type'));
} else { } else {
buffer = fs.readFileSync(input); buffer = fs.readFileSync(input);
} }
log.data('Image bytes:', buffer?.length, 'buffer:', buffer?.slice(0, 32));
// decode image using tfjs-node so we don't need external depenencies // decode image using tfjs-node so we don't need external depenencies
// can also be done using canvas.js or some other 3rd party image library // can also be done using canvas.js or some other 3rd party image library
@ -92,7 +96,7 @@ async function detect(input) {
try { try {
result = await human.detect(tensor, myConfig); result = await human.detect(tensor, myConfig);
} catch (err) { } catch (err) {
log.error('caught', err); log.error('caught');
} }
// dispose image tensor as we no longer need it // dispose image tensor as we no longer need it
@ -104,7 +108,7 @@ async function detect(input) {
for (let i = 0; i < result.face.length; i++) { for (let i = 0; i < result.face.length; i++) {
const face = result.face[i]; const face = result.face[i];
const emotion = face.emotion.reduce((prev, curr) => (prev.score > curr.score ? prev : curr)); const emotion = face.emotion.reduce((prev, curr) => (prev.score > curr.score ? prev : curr));
log.data(` Face: #${i} boxScore:${face.boxScore} faceScore:${face.faceScore} age:${face.age} genderScore:${face.genderScore} gender:${face.gender} emotionScore:${emotion.score} emotion:${emotion.emotion} distance:${face.distance}`); log.data(` Face: #${i} boxScore:${face.boxScore} faceScore:${face.faceScore} age:${face.age} genderScore:${face.genderScore} gender:${face.gender} emotionScore:${emotion.score} emotion:${emotion.emotion} iris:${face.iris}`);
} }
} else { } else {
log.data(' Face: N/A'); log.data(' Face: N/A');
@ -187,8 +191,8 @@ async function test() {
async function main() { async function main() {
log.configure({ inspect: { breakLength: 265 } }); log.configure({ inspect: { breakLength: 265 } });
log.header(); log.header();
process.noDeprecation = true;
log.info('Current folder:', process.env.PWD); log.info('Current folder:', process.env.PWD);
fetch = (await import('node-fetch')).default; // eslint-disable-line node/no-unpublished-import
await init(); await init();
const f = process.argv[2]; const f = process.argv[2];
if (process.argv.length !== 3) { if (process.argv.length !== 3) {

View File

@ -20,7 +20,7 @@ const config = { // just enable all and leave default settings
modelBasePath: 'file://models', modelBasePath: 'file://models',
debug: true, debug: true,
softwareKernels: true, // slower but enhanced precision since face rotation can work in software mode in nodejs environments softwareKernels: true, // slower but enhanced precision since face rotation can work in software mode in nodejs environments
cacheSensitivity: 0.01, cacheSensitivity: 0,
face: { enabled: true, detector: { maxDetected: 100, minConfidence: 0.1 } }, face: { enabled: true, detector: { maxDetected: 100, minConfidence: 0.1 } },
object: { enabled: true, maxDetected: 100, minConfidence: 0.1 }, object: { enabled: true, maxDetected: 100, minConfidence: 0.1 },
gesture: { enabled: true }, gesture: { enabled: true },
@ -77,11 +77,10 @@ async function main() {
globalThis.ImageData = canvas.ImageData; // patch global namespace with canvas library globalThis.ImageData = canvas.ImageData; // patch global namespace with canvas library
log.info('Human:', human.version, 'TF:', tf.version_core); log.info('Human:', human.version, 'TF:', tf.version_core);
process.noDeprecation = true;
const configErrors = await human.validate(); const configErrors = await human.validate();
if (configErrors.length > 0) log.error('Configuration errors:', configErrors); if (configErrors.length > 0) log.error('Configuration errors:', configErrors);
await human.load(); // pre-load models await human.load(); // pre-load models
log.info('Loaded models:', human.models.loaded()); log.info('Loaded models:', Object.keys(human.models).filter((a) => human.models[a]));
const inDir = process.argv[2]; const inDir = process.argv[2];
const outDir = process.argv[3]; const outDir = process.argv[3];

View File

@ -20,7 +20,7 @@
body { font-size: 1rem; font-family: "CenturyGothic", "Segoe UI", sans-serif; font-variant: small-caps; width: -webkit-fill-available; height: 100%; background: black; color: white; overflow: hidden; margin: 0; } body { font-size: 1rem; font-family: "CenturyGothic", "Segoe UI", sans-serif; font-variant: small-caps; width: -webkit-fill-available; height: 100%; background: black; color: white; overflow: hidden; margin: 0; }
select { font-size: 1rem; font-family: "CenturyGothic", "Segoe UI", sans-serif; font-variant: small-caps; background: gray; color: white; border: none; } select { font-size: 1rem; font-family: "CenturyGothic", "Segoe UI", sans-serif; font-variant: small-caps; background: gray; color: white; border: none; }
</style> </style>
<script src="../segmentation/index.js" type="module"></script> <script src="index.js" type="module"></script>
</head> </head>
<body> <body>
<noscript><h1>javascript is required</h1></noscript> <noscript><h1>javascript is required</h1></noscript>
@ -46,9 +46,9 @@
<main> <main>
<div id="main" class="main"> <div id="main" class="main">
<video id="webcam" style="position: fixed; top: 0; left: 0; width: 50vw; height: 50vh"></video> <video id="webcam" style="position: fixed; top: 0; left: 0; width: 50vw; height: 50vh"></video>
<img id="background" alt="background" style="position: fixed; top: 0; right: 0; width: 50vw; height: 50vh" controls></img> <video id="video" style="position: fixed; top: 0; right: 0; width: 50vw; height: 50vh" controls></video>
<canvas id="output" style="position: fixed; bottom: 0; left: 0; height: 50vh"></canvas> <canvas id="output" style="position: fixed; bottom: 0; left: 0; width: 50vw; height: 50vh"></canvas>
<canvas id="merge" style="position: fixed; bottom: 0; right: 0; height: 50vh"></canvas> <canvas id="merge" style="position: fixed; bottom: 0; right: 0; width: 50vw; height: 50vh"></canvas>
</div> </div>
</main> </main>
<footer> <footer>

View File

@ -25,8 +25,6 @@ const humanConfig = { // user configuration for human, used to fine-tune behavio
}, },
}; };
const backgroundImage = '../../samples/in/background.jpg';
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
const log = (...msg) => console.log(...msg); // eslint-disable-line no-console const log = (...msg) => console.log(...msg); // eslint-disable-line no-console
@ -34,7 +32,7 @@ const log = (...msg) => console.log(...msg); // eslint-disable-line no-console
async function main() { async function main() {
// gather dom elements // gather dom elements
const dom = { const dom = {
background: document.getElementById('background'), video: document.getElementById('video'),
webcam: document.getElementById('webcam'), webcam: document.getElementById('webcam'),
output: document.getElementById('output'), output: document.getElementById('output'),
merge: document.getElementById('merge'), merge: document.getElementById('merge'),
@ -46,7 +44,7 @@ async function main() {
// set defaults // set defaults
dom.fps.innerText = 'initializing'; dom.fps.innerText = 'initializing';
dom.ratio.valueAsNumber = human.config.segmentation.ratio; dom.ratio.valueAsNumber = human.config.segmentation.ratio;
dom.background.src = backgroundImage; dom.video.src = '../assets/rijeka.mp4';
dom.composite.innerHTML = ['source-atop', 'color', 'color-burn', 'color-dodge', 'copy', 'darken', 'destination-atop', 'destination-in', 'destination-out', 'destination-over', 'difference', 'exclusion', 'hard-light', 'hue', 'lighten', 'lighter', 'luminosity', 'multiply', 'overlay', 'saturation', 'screen', 'soft-light', 'source-in', 'source-out', 'source-over', 'xor'].map((gco) => `<option value="${gco}">${gco}</option>`).join(''); // eslint-disable-line max-len dom.composite.innerHTML = ['source-atop', 'color', 'color-burn', 'color-dodge', 'copy', 'darken', 'destination-atop', 'destination-in', 'destination-out', 'destination-over', 'difference', 'exclusion', 'hard-light', 'hue', 'lighten', 'lighter', 'luminosity', 'multiply', 'overlay', 'saturation', 'screen', 'soft-light', 'source-in', 'source-out', 'source-over', 'xor'].map((gco) => `<option value="${gco}">${gco}</option>`).join(''); // eslint-disable-line max-len
const ctxMerge = dom.merge.getContext('2d'); const ctxMerge = dom.merge.getContext('2d');
@ -54,8 +52,8 @@ async function main() {
log('platform:', human.env.platform, '| agent:', human.env.agent); log('platform:', human.env.platform, '| agent:', human.env.agent);
await human.load(); // preload all models await human.load(); // preload all models
log('backend:', human.tf.getBackend(), '| available:', human.env.backends); log('backend:', human.tf.getBackend(), '| available:', human.env.backends);
log('models stats:', human.models.stats()); log('models stats:', human.getModelStats());
log('models loaded:', human.models.loaded()); log('models loaded:', Object.values(human.models).filter((model) => model !== null).length);
await human.warmup(); // warmup function to initialize backend for future faster detection await human.warmup(); // warmup function to initialize backend for future faster detection
const numTensors = human.tf.engine().state.numTensors; const numTensors = human.tf.engine().state.numTensors;
@ -68,8 +66,7 @@ async function main() {
dom.merge.height = human.webcam.height; dom.merge.height = human.webcam.height;
loop(); // eslint-disable-line no-use-before-define loop(); // eslint-disable-line no-use-before-define
}; };
await human.webcam.start({ element: dom.webcam, crop: true, width: 960, height: 720 }); // use human webcam helper methods and associate webcam stream with a dom element
await human.webcam.start({ element: dom.webcam, crop: true, width: window.innerWidth / 2, height: window.innerHeight / 2 }); // use human webcam helper methods and associate webcam stream with a dom element
if (!human.webcam.track) dom.fps.innerText = 'webcam error'; if (!human.webcam.track) dom.fps.innerText = 'webcam error';
// processing loop // processing loop
@ -85,10 +82,10 @@ async function main() {
return; return;
} }
dom.fps.innerText = `fps: ${Math.round(10000 / (t1 - t0)) / 10}`; // mark performance dom.fps.innerText = `fps: ${Math.round(10000 / (t1 - t0)) / 10}`; // mark performance
human.draw.tensor(rgba, dom.output); // draw raw output human.tf.browser.toPixels(rgba, dom.output); // draw raw output
human.tf.dispose(rgba); // dispose tensors human.tf.dispose(rgba); // dispose tensors
ctxMerge.globalCompositeOperation = 'source-over'; ctxMerge.globalCompositeOperation = 'source-over';
ctxMerge.drawImage(dom.background, 0, 0); // draw original video to first stacked canvas ctxMerge.drawImage(dom.video, 0, 0); // draw original video to first stacked canvas
ctxMerge.globalCompositeOperation = dom.composite.value; ctxMerge.globalCompositeOperation = dom.composite.value;
ctxMerge.drawImage(dom.output, 0, 0); // draw processed output to second stacked canvas ctxMerge.drawImage(dom.output, 0, 0); // draw processed output to second stacked canvas
if (numTensors !== human.tf.engine().state.numTensors) log({ leak: human.tf.engine().state.numTensors - numTensors }); // check for memory leaks if (numTensors !== human.tf.engine().state.numTensors) log({ leak: human.tf.engine().state.numTensors - numTensors }); // check for memory leaks

View File

@ -1,28 +0,0 @@
## Tracker
### Based on
<https://github.com/opendatacam/node-moving-things-tracker>
### Build
- remove reference to `lodash`:
> `isEqual` in <tracker.js>
- replace external lib:
> curl https://raw.githubusercontent.com/ubilabs/kd-tree-javascript/master/kdTree.js -o lib/kdTree-min.js
- build with `esbuild`:
> node_modules/.bin/esbuild --bundle tracker.js --format=esm --platform=browser --target=esnext --keep-names --tree-shaking=false --analyze --outfile=/home/vlado/dev/human/demo/tracker/tracker.js --banner:js="/* eslint-disable */"
### Usage
computeDistance(item1, item2)
disableKeepInMemory()
enableKeepInMemory()
getAllTrackedItems()
getJSONDebugOfTrackedItems(roundInt = true)
getJSONOfAllTrackedItems()
getJSONOfTrackedItems(roundInt = true)
getTrackedItemsInMOTFormat(frameNb)
reset()
setParams(newParams)
updateTrackedItemsWithNewFrame(detectionsOfThisFrame, frameNb)

View File

@ -1,65 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Human</title>
<meta name="viewport" content="width=device-width" id="viewport">
<meta name="keywords" content="Human">
<meta name="application-name" content="Human">
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="theme-color" content="#000000">
<link rel="manifest" href="../manifest.webmanifest">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../../assets/icon.png">
<script src="./index.js" type="module"></script>
<style>
html { font-family: 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
body { margin: 0; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
body::-webkit-scrollbar { display: none; }
input[type="file"] { font-family: 'Segoe UI'; font-size: 14px; font-variant: small-caps; }
::-webkit-file-upload-button { background: #333333; color: white; border: 0; border-radius: 0; padding: 6px 16px; box-shadow: 4px 4px 4px #222222; font-family: 'Segoe UI'; font-size: 14px; font-variant: small-caps; }
</style>
</head>
<body>
<div style="display: flex">
<video id="video" playsinline style="width: 25vw" controls controlslist="nofullscreen nodownload noremoteplayback" disablepictureinpicture loop></video>
<canvas id="canvas" style="width: 75vw"></canvas>
</div>
<div class="uploader" style="padding: 8px">
<input type="file" name="inputvideo" id="inputvideo" accept="video/*"></input>
<input type="checkbox" id="interpolation" name="interpolation"></input>
<label for="tracker">interpolation</label>
</div>
<form id="config" style="padding: 8px; line-height: 1.6rem;">
tracker |
<input type="checkbox" id="tracker" name="tracker" checked></input>
<label for="tracker">enabled</label> |
<input type="checkbox" id="keepInMemory" name="keepInMemory"></input>
<label for="keepInMemory">keepInMemory</label> |
<br>
tracker source |
<input type="radio" id="box-face" name="box" value="face" checked>
<label for="box-face">face</label> |
<input type="radio" id="box-body" name="box" value="body">
<label for="box-face">body</label> |
<input type="radio" id="box-object" name="box" value="object">
<label for="box-face">object</label> |
<br>
tracker config |
<input type="range" id="unMatchedFramesTolerance" name="unMatchedFramesTolerance" min="0" max="300" step="1", value="60"></input>
<label for="unMatchedFramesTolerance">unMatchedFramesTolerance</label> |
<input type="range" id="iouLimit" name="unMatchedFramesTolerance" min="0" max="1" step="0.01", value="0.1"></input>
<label for="iouLimit">iouLimit</label> |
<input type="range" id="distanceLimit" name="unMatchedFramesTolerance" min="0" max="1" step="0.01", value="0.1"></input>
<label for="distanceLimit">distanceLimit</label> |
<input type="radio" id="matchingAlgorithm-kdTree" name="matchingAlgorithm" value="kdTree" checked>
<label for="matchingAlgorithm-kdTree">kdTree</label> |
<input type="radio" id="matchingAlgorithm-munkres" name="matchingAlgorithm" value="munkres">
<label for="matchingAlgorithm-kdTree">munkres</label> |
</form>
<pre id="status" style="position: absolute; top: 12px; right: 20px; background-color: grey; padding: 8px; box-shadow: 2px 2px black"></pre>
<pre id="log" style="padding: 8px"></pre>
<div id="performance" style="position: absolute; bottom: 0; width: 100%; padding: 8px; font-size: 0.8rem;"></div>
</body>
</html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -1,208 +0,0 @@
/**
* Human demo for browsers
* @default Human Library
* @summary <https://github.com/vladmandic/human>
* @author <https://github.com/vladmandic>
* @copyright <https://github.com/vladmandic>
* @license MIT
*/
import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
import tracker from './tracker.js';
const humanConfig: Partial<H.Config> = { // user configuration for human, used to fine-tune behavior
debug: true,
backend: 'webgl',
// cacheSensitivity: 0,
// cacheModels: false,
// warmup: 'none',
modelBasePath: 'https://vladmandic.github.io/human-models/models',
filter: { enabled: true, equalization: false, flip: false },
face: {
enabled: true,
detector: { rotation: false, maxDetected: 10, minConfidence: 0.3 },
mesh: { enabled: true },
attention: { enabled: false },
iris: { enabled: false },
description: { enabled: false },
emotion: { enabled: false },
antispoof: { enabled: false },
liveness: { enabled: false },
},
body: { enabled: false, maxDetected: 6, modelPath: 'movenet-multipose.json' },
hand: { enabled: false },
object: { enabled: false, maxDetected: 10 },
segmentation: { enabled: false },
gesture: { enabled: false },
};
interface TrackerConfig {
unMatchedFramesTolerance: number, // number of frame when an object is not matched before considering it gone; ignored if fastDelete is set
iouLimit: number, // exclude things from beeing matched if their IOU less than; 1 means total overlap; 0 means no overlap
fastDelete: boolean, // remove new objects immediately if they could not be matched in the next frames; if set, ignores unMatchedFramesTolerance
distanceLimit: number, // distance limit for matching; if values need to be excluded from matching set their distance to something greater than the distance limit
matchingAlgorithm: 'kdTree' | 'munkres', // algorithm used to match tracks with new detections
}
interface TrackerResult {
id: number,
confidence: number,
bearing: number,
isZombie: boolean,
name: string,
x: number,
y: number,
w: number,
h: number,
}
const trackerConfig: TrackerConfig = {
unMatchedFramesTolerance: 100,
iouLimit: 0.05,
fastDelete: false,
distanceLimit: 1e4,
matchingAlgorithm: 'kdTree',
};
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
const dom = { // grab instances of dom objects so we dont have to look them up later
video: document.getElementById('video') as HTMLVideoElement,
canvas: document.getElementById('canvas') as HTMLCanvasElement,
log: document.getElementById('log') as HTMLPreElement,
fps: document.getElementById('status') as HTMLPreElement,
tracker: document.getElementById('tracker') as HTMLInputElement,
interpolation: document.getElementById('interpolation') as HTMLInputElement,
config: document.getElementById('config') as HTMLFormElement,
ctx: (document.getElementById('canvas') as HTMLCanvasElement).getContext('2d') as CanvasRenderingContext2D,
};
const timestamp = { detect: 0, draw: 0, tensors: 0, start: 0 }; // holds information used to calculate performance and possible memory leaks
const fps = { detectFPS: 0, drawFPS: 0, frames: 0, averageMs: 0 }; // holds calculated fps information for both detect and screen refresh
const log = (...msg) => { // helper method to output messages
dom.log.innerText += msg.join(' ') + '\n';
console.log(...msg); // eslint-disable-line no-console
};
const status = (msg) => dom.fps.innerText = msg; // print status element
async function detectionLoop() { // main detection loop
if (!dom.video.paused && dom.video.readyState >= 2) {
if (timestamp.start === 0) timestamp.start = human.now();
// log('profiling data:', await human.profile(dom.video));
await human.detect(dom.video, humanConfig); // actual detection; were not capturing output in a local variable as it can also be reached via human.result
const tensors = human.tf.memory().numTensors; // check current tensor usage for memory leaks
if (tensors - timestamp.tensors !== 0) log('allocated tensors:', tensors - timestamp.tensors); // printed on start and each time there is a tensor leak
timestamp.tensors = tensors;
fps.detectFPS = Math.round(1000 * 1000 / (human.now() - timestamp.detect)) / 1000;
fps.frames++;
fps.averageMs = Math.round(1000 * (human.now() - timestamp.start) / fps.frames) / 1000;
}
timestamp.detect = human.now();
requestAnimationFrame(detectionLoop); // start new frame immediately
}
function drawLoop() { // main screen refresh loop
if (!dom.video.paused && dom.video.readyState >= 2) {
const res: H.Result = dom.interpolation.checked ? human.next(human.result) : human.result; // interpolate results if enabled
let tracking: H.FaceResult[] | H.BodyResult[] | H.ObjectResult[] = [];
if (human.config.face.enabled) tracking = res.face;
else if (human.config.body.enabled) tracking = res.body;
else if (human.config.object.enabled) tracking = res.object;
else log('unknown object type');
let data: TrackerResult[] = [];
if (dom.tracker.checked) {
const items = tracking.map((obj) => ({
x: obj.box[0] + obj.box[2] / 2,
y: obj.box[1] + obj.box[3] / 2,
w: obj.box[2],
h: obj.box[3],
name: obj.label || (human.config.face.enabled ? 'face' : 'body'),
confidence: obj.score,
}));
tracker.updateTrackedItemsWithNewFrame(items, fps.frames);
data = tracker.getJSONOfTrackedItems(true) as TrackerResult[];
}
human.draw.canvas(dom.video, dom.canvas); // copy input video frame to output canvas
for (let i = 0; i < tracking.length; i++) {
// @ts-ignore
const name = tracking[i].label || (human.config.face.enabled ? 'face' : 'body');
dom.ctx.strokeRect(tracking[i].box[0], tracking[i].box[1], tracking[i].box[1], tracking[i].box[2]);
dom.ctx.fillText(`id: ${tracking[i].id} ${Math.round(100 * tracking[i].score)}% ${name}`, tracking[i].box[0] + 4, tracking[i].box[1] + 16);
if (data[i]) {
dom.ctx.fillText(`t: ${data[i].id} ${Math.round(100 * data[i].confidence)}% ${data[i].name} ${data[i].isZombie ? 'zombie' : ''}`, tracking[i].box[0] + 4, tracking[i].box[1] + 34);
}
}
}
const now = human.now();
fps.drawFPS = Math.round(1000 * 1000 / (now - timestamp.draw)) / 1000;
timestamp.draw = now;
status(dom.video.paused ? 'paused' : `fps: ${fps.detectFPS.toFixed(1).padStart(5, ' ')} detect | ${fps.drawFPS.toFixed(1).padStart(5, ' ')} draw`); // write status
setTimeout(drawLoop, 30); // use to slow down refresh from max refresh rate to target of 30 fps
}
async function handleVideo(file: File) {
const url = URL.createObjectURL(file);
dom.video.src = url;
await dom.video.play();
log('loaded video:', file.name, 'resolution:', [dom.video.videoWidth, dom.video.videoHeight], 'duration:', dom.video.duration);
dom.canvas.width = dom.video.videoWidth;
dom.canvas.height = dom.video.videoHeight;
dom.ctx.strokeStyle = 'white';
dom.ctx.fillStyle = 'white';
dom.ctx.font = '16px Segoe UI';
dom.video.playbackRate = 0.25;
}
function initInput() {
document.body.addEventListener('dragenter', (evt) => evt.preventDefault());
document.body.addEventListener('dragleave', (evt) => evt.preventDefault());
document.body.addEventListener('dragover', (evt) => evt.preventDefault());
document.body.addEventListener('drop', async (evt) => {
evt.preventDefault();
if (evt.dataTransfer) evt.dataTransfer.dropEffect = 'copy';
const file = evt.dataTransfer?.files?.[0];
if (file) await handleVideo(file);
log(dom.video.readyState);
});
(document.getElementById('inputvideo') as HTMLInputElement).onchange = async (evt) => {
evt.preventDefault();
const file = evt.target?.['files']?.[0];
if (file) await handleVideo(file);
};
dom.config.onchange = () => {
trackerConfig.distanceLimit = (document.getElementById('distanceLimit') as HTMLInputElement).valueAsNumber;
trackerConfig.iouLimit = (document.getElementById('iouLimit') as HTMLInputElement).valueAsNumber;
trackerConfig.unMatchedFramesTolerance = (document.getElementById('unMatchedFramesTolerance') as HTMLInputElement).valueAsNumber;
trackerConfig.unMatchedFramesTolerance = (document.getElementById('unMatchedFramesTolerance') as HTMLInputElement).valueAsNumber;
trackerConfig.matchingAlgorithm = (document.getElementById('matchingAlgorithm-kdTree') as HTMLInputElement).checked ? 'kdTree' : 'munkres';
tracker.setParams(trackerConfig);
if ((document.getElementById('keepInMemory') as HTMLInputElement).checked) tracker.enableKeepInMemory();
else tracker.disableKeepInMemory();
tracker.reset();
log('tracker config change', JSON.stringify(trackerConfig));
humanConfig.face!.enabled = (document.getElementById('box-face') as HTMLInputElement).checked; // eslint-disable-line @typescript-eslint/no-non-null-assertion
humanConfig.body!.enabled = (document.getElementById('box-body') as HTMLInputElement).checked; // eslint-disable-line @typescript-eslint/no-non-null-assertion
humanConfig.object!.enabled = (document.getElementById('box-object') as HTMLInputElement).checked; // eslint-disable-line @typescript-eslint/no-non-null-assertion
};
dom.tracker.onchange = (evt) => {
log('tracker', (evt.target as HTMLInputElement).checked ? 'enabled' : 'disabled');
tracker.setParams(trackerConfig);
tracker.reset();
};
}
async function main() { // main entry point
log('human version:', human.version, '| tfjs version:', human.tf.version['tfjs-core']);
log('platform:', human.env.platform, '| agent:', human.env.agent);
status('loading...');
await human.load(); // preload all models
log('backend:', human.tf.getBackend(), '| available:', human.env.backends);
log('models loaded:', human.models.loaded());
status('initializing...');
await human.warmup(); // warmup function to initialize backend for future faster detection
initInput(); // initialize input
await detectionLoop(); // start detection loop
drawLoop(); // start draw loop
}
window.onload = main;

File diff suppressed because it is too large Load Diff

View File

@ -21,7 +21,7 @@
</style> </style>
</head> </head>
<body> <body>
<canvas id="canvas" style="margin: 0 auto; width: 100vw"></canvas> <canvas id="canvas" style="margin: 0 auto; width: 100%"></canvas>
<video id="video" playsinline style="display: none"></video> <video id="video" playsinline style="display: none"></video>
<pre id="status" style="position: absolute; top: 12px; right: 20px; background-color: grey; padding: 8px; box-shadow: 2px 2px black"></pre> <pre id="status" style="position: absolute; top: 12px; right: 20px; background-color: grey; padding: 8px; box-shadow: 2px 2px black"></pre>
<pre id="log" style="padding: 8px"></pre> <pre id="log" style="padding: 8px"></pre>

View File

@ -4,6 +4,6 @@
author: <https://github.com/vladmandic>' author: <https://github.com/vladmandic>'
*/ */
import*as m from"../../dist/human.esm.js";var v=1920,b={debug:!0,backend:"webgl",modelBasePath:"https://vladmandic.github.io/human-models/models/",filter:{enabled:!0,equalization:!1,flip:!1},face:{enabled:!0,detector:{rotation:!1},mesh:{enabled:!0},attention:{enabled:!1},iris:{enabled:!0},description:{enabled:!0},emotion:{enabled:!0},antispoof:{enabled:!0},liveness:{enabled:!0}},body:{enabled:!1},hand:{enabled:!1},object:{enabled:!1},segmentation:{enabled:!1},gesture:{enabled:!0}},e=new m.Human(b);e.env.perfadd=!1;e.draw.options.font='small-caps 18px "Lato"';e.draw.options.lineHeight=20;e.draw.options.drawPoints=!0;var a={video:document.getElementById("video"),canvas:document.getElementById("canvas"),log:document.getElementById("log"),fps:document.getElementById("status"),perf:document.getElementById("performance")},n={detect:0,draw:0,tensors:0,start:0},s={detectFPS:0,drawFPS:0,frames:0,averageMs:0},o=(...t)=>{a.log.innerText+=t.join(" ")+` import*as i from"../../dist/human.esm.js";var m={modelBasePath:"../../models",filter:{enabled:!0,equalization:!1,flip:!1},face:{enabled:!0,detector:{rotation:!1},mesh:{enabled:!0},attention:{enabled:!1},iris:{enabled:!0},description:{enabled:!0},emotion:{enabled:!0}},body:{enabled:!0},hand:{enabled:!0},object:{enabled:!1},segmentation:{enabled:!1},gesture:{enabled:!0}},e=new i.Human(m);e.env.perfadd=!1;e.draw.options.font='small-caps 18px "Lato"';e.draw.options.lineHeight=20;var a={video:document.getElementById("video"),canvas:document.getElementById("canvas"),log:document.getElementById("log"),fps:document.getElementById("status"),perf:document.getElementById("performance")},n={detect:0,draw:0,tensors:0,start:0},s={detectFPS:0,drawFPS:0,frames:0,averageMs:0},o=(...t)=>{a.log.innerText+=t.join(" ")+`
`,console.log(...t)},i=t=>a.fps.innerText=t,g=t=>a.perf.innerText="tensors:"+e.tf.memory().numTensors.toString()+" | performance: "+JSON.stringify(t).replace(/"|{|}/g,"").replace(/,/g," | ");async function f(){if(!a.video.paused){n.start===0&&(n.start=e.now()),await e.detect(a.video);let t=e.tf.memory().numTensors;t-n.tensors!==0&&o("allocated tensors:",t-n.tensors),n.tensors=t,s.detectFPS=Math.round(1e3*1e3/(e.now()-n.detect))/1e3,s.frames++,s.averageMs=Math.round(1e3*(e.now()-n.start)/s.frames)/1e3,s.frames%100===0&&!a.video.paused&&o("performance",{...s,tensors:n.tensors})}n.detect=e.now(),requestAnimationFrame(f)}async function u(){var d,r,c;if(!a.video.paused){let l=e.next(e.result),w=await e.image(a.video);e.draw.canvas(w.canvas,a.canvas);let p={bodyLabels:`person confidence [score] and ${(c=(r=(d=e.result)==null?void 0:d.body)==null?void 0:r[0])==null?void 0:c.keypoints.length} keypoints`};await e.draw.all(a.canvas,l,p),g(l.performance)}let t=e.now();s.drawFPS=Math.round(1e3*1e3/(t-n.draw))/1e3,n.draw=t,i(a.video.paused?"paused":`fps: ${s.detectFPS.toFixed(1).padStart(5," ")} detect | ${s.drawFPS.toFixed(1).padStart(5," ")} draw`),setTimeout(u,30)}async function h(){let d=(await e.webcam.enumerate())[0].deviceId,r=await e.webcam.start({element:a.video,crop:!1,width:v,id:d});o(r),a.canvas.width=e.webcam.width,a.canvas.height=e.webcam.height,a.canvas.onclick=async()=>{e.webcam.paused?await e.webcam.play():e.webcam.pause()}}async function y(){o("human version:",e.version,"| tfjs version:",e.tf.version["tfjs-core"]),o("platform:",e.env.platform,"| agent:",e.env.agent),i("loading..."),await e.load(),o("backend:",e.tf.getBackend(),"| available:",e.env.backends),o("models stats:",e.models.stats()),o("models loaded:",e.models.loaded()),o("environment",e.env),i("initializing..."),await e.warmup(),await h(),await f(),await u()}window.onload=y; `,console.log(...t)},d=t=>a.fps.innerText=t,f=t=>a.perf.innerText="tensors:"+e.tf.memory().numTensors.toString()+" | performance: "+JSON.stringify(t).replace(/"|{|}/g,"").replace(/,/g," | ");async function l(){if(!a.video.paused){n.start===0&&(n.start=e.now()),await e.detect(a.video);let t=e.tf.memory().numTensors;t-n.tensors!==0&&o("allocated tensors:",t-n.tensors),n.tensors=t,s.detectFPS=Math.round(1e3*1e3/(e.now()-n.detect))/1e3,s.frames++,s.averageMs=Math.round(1e3*(e.now()-n.start)/s.frames)/1e3,s.frames%100===0&&!a.video.paused&&o("performance",{...s,tensors:n.tensors})}n.detect=e.now(),requestAnimationFrame(l)}async function c(){if(!a.video.paused){let r=e.next(e.result);e.config.filter.flip?e.draw.canvas(r.canvas,a.canvas):e.draw.canvas(a.video,a.canvas),await e.draw.all(a.canvas,r),f(r.performance)}let t=e.now();s.drawFPS=Math.round(1e3*1e3/(t-n.draw))/1e3,n.draw=t,d(a.video.paused?"paused":`fps: ${s.detectFPS.toFixed(1).padStart(5," ")} detect | ${s.drawFPS.toFixed(1).padStart(5," ")} draw`),setTimeout(c,30)}async function u(){await e.webcam.start({element:a.video,crop:!0}),a.canvas.width=e.webcam.width,a.canvas.height=e.webcam.height,a.canvas.onclick=async()=>{e.webcam.paused?await e.webcam.play():e.webcam.pause()}}async function w(){o("human version:",e.version,"| tfjs version:",e.tf.version["tfjs-core"]),o("platform:",e.env.platform,"| agent:",e.env.agent),d("loading..."),await e.load(),o("backend:",e.tf.getBackend(),"| available:",e.env.backends),o("models stats:",e.getModelStats()),o("models loaded:",Object.values(e.models).filter(t=>t!==null).length),d("initializing..."),await e.warmup(),await u(),await l(),await c()}window.onload=w;
//# sourceMappingURL=index.js.map //# sourceMappingURL=index.js.map

File diff suppressed because one or more lines are too long

View File

@ -9,20 +9,12 @@
import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human import * as H from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
const width = 1920; // used by webcam config as well as human maximum resultion // can be anything, but resolutions higher than 4k will disable internal optimizations
const humanConfig: Partial<H.Config> = { // user configuration for human, used to fine-tune behavior const humanConfig: Partial<H.Config> = { // user configuration for human, used to fine-tune behavior
debug: true, modelBasePath: '../../models',
backend: 'webgl',
// cacheSensitivity: 0,
// cacheModels: false,
// warmup: 'none',
// modelBasePath: '../../models',
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
filter: { enabled: true, equalization: false, flip: false }, filter: { enabled: true, equalization: false, flip: false },
face: { enabled: true, detector: { rotation: false }, mesh: { enabled: true }, attention: { enabled: false }, iris: { enabled: true }, description: { enabled: true }, emotion: { enabled: true }, antispoof: { enabled: true }, liveness: { enabled: true } }, face: { enabled: true, detector: { rotation: false }, mesh: { enabled: true }, attention: { enabled: false }, iris: { enabled: true }, description: { enabled: true }, emotion: { enabled: true } },
body: { enabled: false }, body: { enabled: true },
hand: { enabled: false }, hand: { enabled: true },
object: { enabled: false }, object: { enabled: false },
segmentation: { enabled: false }, segmentation: { enabled: false },
gesture: { enabled: true }, gesture: { enabled: true },
@ -33,7 +25,6 @@ const human = new H.Human(humanConfig); // create instance of human with overrid
human.env.perfadd = false; // is performance data showing instant or total values human.env.perfadd = false; // is performance data showing instant or total values
human.draw.options.font = 'small-caps 18px "Lato"'; // set font used to draw labels when using draw methods human.draw.options.font = 'small-caps 18px "Lato"'; // set font used to draw labels when using draw methods
human.draw.options.lineHeight = 20; human.draw.options.lineHeight = 20;
human.draw.options.drawPoints = true; // draw points on face mesh
// human.draw.options.fillPolygons = true; // human.draw.options.fillPolygons = true;
const dom = { // grab instances of dom objects so we dont have to look them up later const dom = { // grab instances of dom objects so we dont have to look them up later
@ -51,7 +42,7 @@ const log = (...msg) => { // helper method to output messages
console.log(...msg); // eslint-disable-line no-console console.log(...msg); // eslint-disable-line no-console
}; };
const status = (msg) => dom.fps.innerText = msg; // print status element const status = (msg) => dom.fps.innerText = msg; // print status element
const perf = (msg) => dom.perf.innerText = 'tensors:' + human.tf.memory().numTensors.toString() + ' | performance: ' + JSON.stringify(msg).replace(/"|{|}/g, '').replace(/,/g, ' | '); // print performance element const perf = (msg) => dom.perf.innerText = 'tensors:' + (human.tf.memory().numTensors as number).toString() + ' | performance: ' + JSON.stringify(msg).replace(/"|{|}/g, '').replace(/,/g, ' | '); // print performance element
async function detectionLoop() { // main detection loop async function detectionLoop() { // main detection loop
if (!dom.video.paused) { if (!dom.video.paused) {
@ -73,11 +64,9 @@ async function detectionLoop() { // main detection loop
async function drawLoop() { // main screen refresh loop async function drawLoop() { // main screen refresh loop
if (!dom.video.paused) { if (!dom.video.paused) {
const interpolated = human.next(human.result); // smoothen result using last-known results const interpolated = human.next(human.result); // smoothen result using last-known results
const processed = await human.image(dom.video); // get current video frame, but enhanced with human.filters if (human.config.filter.flip) human.draw.canvas(interpolated.canvas as HTMLCanvasElement, dom.canvas); // draw processed image to screen canvas
human.draw.canvas(processed.canvas as HTMLCanvasElement, dom.canvas); else human.draw.canvas(dom.video, dom.canvas); // draw original video to screen canvas // better than using procesed image as this loop happens faster than processing loop
await human.draw.all(dom.canvas, interpolated); // draw labels, boxes, lines, etc.
const opt: Partial<H.DrawOptions> = { bodyLabels: `person confidence [score] and ${human.result?.body?.[0]?.keypoints.length} keypoints` };
await human.draw.all(dom.canvas, interpolated, opt); // draw labels, boxes, lines, etc.
perf(interpolated.performance); // write performance data perf(interpolated.performance); // write performance data
} }
const now = human.now(); const now = human.now();
@ -88,10 +77,7 @@ async function drawLoop() { // main screen refresh loop
} }
async function webCam() { async function webCam() {
const devices = await human.webcam.enumerate(); await human.webcam.start({ element: dom.video, crop: true }); // use human webcam helper methods and associate webcam stream with a dom element
const id = devices[0].deviceId; // use first available video source
const webcamStatus = await human.webcam.start({ element: dom.video, crop: false, width, id }); // use human webcam helper methods and associate webcam stream with a dom element
log(webcamStatus);
dom.canvas.width = human.webcam.width; dom.canvas.width = human.webcam.width;
dom.canvas.height = human.webcam.height; dom.canvas.height = human.webcam.height;
dom.canvas.onclick = async () => { // pause when clicked on screen and resume on next click dom.canvas.onclick = async () => { // pause when clicked on screen and resume on next click
@ -106,9 +92,8 @@ async function main() { // main entry point
status('loading...'); status('loading...');
await human.load(); // preload all models await human.load(); // preload all models
log('backend:', human.tf.getBackend(), '| available:', human.env.backends); log('backend:', human.tf.getBackend(), '| available:', human.env.backends);
log('models stats:', human.models.stats()); log('models stats:', human.getModelStats());
log('models loaded:', human.models.loaded()); log('models loaded:', Object.values(human.models).filter((model) => model !== null).length);
log('environment', human.env);
status('initializing...'); status('initializing...');
await human.warmup(); // warmup function to initialize backend for future faster detection await human.warmup(); // warmup function to initialize backend for future faster detection
await webCam(); // start webcam await webCam(); // start webcam

File diff suppressed because one or more lines are too long

98941
dist/human.esm.js vendored

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

5291
dist/human.js vendored

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

30
dist/human.node.js vendored

File diff suppressed because one or more lines are too long

75509
dist/tfjs.esm.js vendored

File diff suppressed because one or more lines are too long

View File

@ -4,4 +4,4 @@
author: <https://github.com/vladmandic>' author: <https://github.com/vladmandic>'
*/ */
var e="4.22.0";var s="4.22.0";var t="4.22.0";var n="4.22.0";var r="4.22.0";var i="4.22.0";var h={tfjs:e,"tfjs-core":e,"tfjs-converter":s,"tfjs-backend-cpu":t,"tfjs-backend-webgl":n,"tfjs-backend-wasm":r,"tfjs-backend-webgpu":i};export{h as version}; var e="3.21.0";var s="3.21.0";var t="3.21.0";var i="3.21.0";var n="3.21.0";var r="3.21.0";var l="3.21.0";var V={tfjs:e,"tfjs-core":s,"tfjs-data":t,"tfjs-layers":i,"tfjs-converter":n,"tfjs-backend-webgl":r,"tfjs-backend-wasm":l};export{V as version};

Binary file not shown.

File diff suppressed because one or more lines are too long

Binary file not shown.

File diff suppressed because one or more lines are too long

View File

@ -1,38 +1,37 @@
{ {
"antispoof": 853098, "antispoof": 853098,
"blazeface": 538928, "blazeface": 538928,
"centernet": 4030290,
"emotion": 820516, "emotion": 820516,
"facemesh": 1477958, "facemesh": 1477958,
"faceres": 6978814, "faceres": 6978814,
"handlandmark-lite": 2023432, "handlandmark-full": 5431368,
"handtrack": 2964837, "handtrack": 2964837,
"iris": 2599092, "iris": 2599092,
"liveness": 592976, "liveness": 592976,
"mb3-centernet": 4030290,
"models": 0, "models": 0,
"movenet-lightning": 4650216, "movenet-lightning": 4650216,
"affectnet-mobilenet": 6920630,
"age": 161240, "age": 161240,
"blazeface-back": 538928, "blazeface-back": 538928,
"blazeface-front": 402048, "blazeface-front": 402048,
"blazepose-detector": 5928856, "blazepose-detector2d": 7499400,
"blazepose-full": 6339202, "blazepose-detector3d": 5928856,
"blazepose-heavy": 27502466, "blazepose-full": 6338290,
"blazepose-lite": 2726402, "blazepose-heavy": 27501554,
"blazepose-lite": 2725490,
"efficientpose": 5651240, "efficientpose": 5651240,
"faceboxes": 2013002, "faceboxes": 2013002,
"facemesh-attention-pinto": 2387598, "facemesh-attention-alt": 2387598,
"facemesh-attention": 2382414, "facemesh-attention": 2382414,
"facemesh-detection-full": 1026192, "facemesh-detection-full": 1026192,
"facemesh-detection-short": 201268, "facemesh-detection-short": 201268,
"facemesh-orig": 2955780,
"faceres-deep": 13957620, "faceres-deep": 13957620,
"gear-e1": 112438,
"gear-e2": 112438,
"gear": 1498916, "gear": 1498916,
"gender-ssrnet-imdb": 161236, "gender-ssrnet-imdb": 161236,
"gender": 201808, "gender": 201808,
"handdetect": 3515612, "handdetect": 3515612,
"handlandmark-full": 5431368, "handlandmark-lite": 2023432,
"handlandmark-sparse": 5286322, "handlandmark-sparse": 5286322,
"handskeleton": 5502280, "handskeleton": 5502280,
"meet": 372228, "meet": 372228,
@ -44,6 +43,7 @@
"posenet": 5032780, "posenet": 5032780,
"rvm": 3739355, "rvm": 3739355,
"selfie": 212886, "selfie": 212886,
"blazepose-detect": 5928804,
"anti-spoofing": 853098, "anti-spoofing": 853098,
"efficientpose-i-lite": 2269064, "efficientpose-i-lite": 2269064,
"efficientpose-ii-lite": 5651240, "efficientpose-ii-lite": 5651240,

View File

@ -1,6 +1,6 @@
{ {
"name": "@vladmandic/human", "name": "@vladmandic/human",
"version": "3.3.6", "version": "2.11.1",
"description": "Human: AI-powered 3D Face Detection & Rotation Tracking, Face Description & Recognition, Body Pose Tracking, 3D Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction, Gesture Recognition", "description": "Human: AI-powered 3D Face Detection & Rotation Tracking, Face Description & Recognition, Body Pose Tracking, 3D Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction, Gesture Recognition",
"sideEffects": false, "sideEffects": false,
"main": "dist/human.node.js", "main": "dist/human.node.js",
@ -8,24 +8,16 @@
"browser": "dist/human.esm.js", "browser": "dist/human.esm.js",
"types": "types/human.d.ts", "types": "types/human.d.ts",
"exports": { "exports": {
"node": "./dist/human.node.js", "node": {
"require": "./dist/human.node.js",
"import": "./dist/human.node.js",
"module": "./dist/human.node.js"
},
"require": "./dist/human.node.js",
"import": "./dist/human.esm.js",
"script": "./dist/human.js", "script": "./dist/human.js",
"module": "./dist/human.esm.js", "module": "./dist/human.esm.js",
"types": "./types/human.d.ts", "types": "./types/human.d.ts"
"dist/human": "./dist/human.js",
"dist/human.js": "./dist/human.js",
"dist/human.esm": "./dist/human.esm.js",
"dist/human.esm.js": "./dist/human.esm.js",
"dist/human.esm-nobundle": "./dist/human.esm-nobundle.js",
"dist/human.esm-nobundle.js": "./dist/human.esm-nobundle.js",
"dist/human.node": "./dist/human.node.js",
"dist/human.node.js": "./dist/human.node.js",
"dist/human.node-wasm": "./dist/human.node-wasm.js",
"dist/human.node-wasm.js": "./dist/human.node-wasm.js",
"dist/human.node-gpu": "./dist/human.node-gpu.js",
"dist/human.node-gpu.js": "./dist/human.node-gpu.js",
"require": "./dist/human.node.js",
"import": "./dist/human.esm.js"
}, },
"author": "Vladimir Mandic <mandic00@live.com>", "author": "Vladimir Mandic <mandic00@live.com>",
"bugs": { "bugs": {
@ -46,7 +38,7 @@
"clean": "build --profile clean", "clean": "build --profile clean",
"build": "rimraf test/build.log && node build.js", "build": "rimraf test/build.log && node build.js",
"test": "node --no-warnings --unhandled-rejections=strict --trace-uncaught test/node.js", "test": "node --no-warnings --unhandled-rejections=strict --trace-uncaught test/node.js",
"lint": "eslint *.json *.js src demo test models wiki", "lint": "eslint *.json *.js src demo test models",
"scan": "npx auditjs@latest ossi --dev --quiet" "scan": "npx auditjs@latest ossi --dev --quiet"
}, },
"keywords": [ "keywords": [
@ -74,40 +66,45 @@
"tensorflow" "tensorflow"
], ],
"devDependencies": { "devDependencies": {
"@html-eslint/eslint-plugin": "^0.46.1", "@html-eslint/eslint-plugin": "^0.15.0",
"@html-eslint/parser": "^0.46.0", "@html-eslint/parser": "^0.15.0",
"@microsoft/api-extractor": "^7.52.11", "@microsoft/api-extractor": "^7.32.0",
"@tensorflow/tfjs-backend-cpu": "^4.22.0", "@tensorflow/tfjs": "^3.21.0",
"@tensorflow/tfjs-backend-wasm": "^4.22.0", "@tensorflow/tfjs-backend-cpu": "^3.21.0",
"@tensorflow/tfjs-backend-webgl": "^4.22.0", "@tensorflow/tfjs-backend-wasm": "^3.21.0",
"@tensorflow/tfjs-backend-webgpu": "4.22.0", "@tensorflow/tfjs-backend-webgl": "^3.21.0",
"@tensorflow/tfjs-converter": "^4.22.0", "@tensorflow/tfjs-backend-webgpu": "0.0.1-alpha.14",
"@tensorflow/tfjs-core": "^4.22.0", "@tensorflow/tfjs-converter": "^3.21.0",
"@tensorflow/tfjs-data": "^4.22.0", "@tensorflow/tfjs-core": "^3.21.0",
"@tensorflow/tfjs-layers": "^4.22.0", "@tensorflow/tfjs-data": "^3.21.0",
"@tensorflow/tfjs-node": "^4.22.0", "@tensorflow/tfjs-layers": "^3.21.0",
"@tensorflow/tfjs-node-gpu": "^4.22.0", "@tensorflow/tfjs-node": "^3.21.1",
"@types/emscripten": "^1.40.1", "@tensorflow/tfjs-node-gpu": "^3.21.0",
"@types/node": "^24.3.0", "@tensorflow/tfjs-tflite": "0.0.1-alpha.9",
"@types/offscreencanvas": "^2019.7.3", "@types/emscripten": "^1.39.6",
"@typescript-eslint/eslint-plugin": "^8.41.0", "@types/node": "^18.8.3",
"@typescript-eslint/parser": "^8.41.0", "@types/offscreencanvas": "^2019.7.0",
"@vladmandic/build": "^0.10.3", "@typescript-eslint/eslint-plugin": "^5.39.0",
"@vladmandic/pilogger": "^0.5.2", "@typescript-eslint/parser": "^5.39.0",
"@vladmandic/build": "^0.7.14",
"@vladmandic/pilogger": "^0.4.6",
"@vladmandic/tfjs": "github:vladmandic/tfjs", "@vladmandic/tfjs": "github:vladmandic/tfjs",
"canvas": "^3.2.0", "@webgpu/types": "^0.1.22",
"esbuild": "^0.25.9", "canvas": "^2.10.1",
"eslint": "8.57.0", "esbuild": "^0.15.10",
"eslint": "8.25.0",
"eslint-config-airbnb-base": "^15.0.0", "eslint-config-airbnb-base": "^15.0.0",
"eslint-plugin-html": "^8.1.3", "eslint-plugin-html": "^7.1.0",
"eslint-plugin-import": "^2.32.0", "eslint-plugin-import": "^2.26.0",
"eslint-plugin-json": "^4.0.1", "eslint-plugin-json": "^3.1.0",
"eslint-plugin-markdown": "^5.1.0",
"eslint-plugin-node": "^11.1.0", "eslint-plugin-node": "^11.1.0",
"eslint-plugin-promise": "^7.2.1", "eslint-plugin-promise": "^6.0.1",
"rimraf": "^6.0.1", "long": "^5.2.0",
"tslib": "^2.8.1", "node-fetch": "^3.2.10",
"typedoc": "0.28.11", "rimraf": "^3.0.2",
"typescript": "5.9.2" "seedrandom": "^3.0.5",
"tslib": "^2.4.0",
"typedoc": "0.23.15",
"typescript": "4.8.4"
} }
} }

Binary file not shown.

Before

Width:  |  Height:  |  Size: 98 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 164 KiB

After

Width:  |  Height:  |  Size: 178 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 150 KiB

After

Width:  |  Height:  |  Size: 145 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 59 KiB

After

Width:  |  Height:  |  Size: 50 KiB

View File

@ -2,47 +2,64 @@
* BlazePose model implementation * BlazePose model implementation
*/ */
import * as tf from 'dist/tfjs.esm.js'; import * as tf from '../../dist/tfjs.esm.js';
import { loadModel } from '../tfjs/load'; import { loadModel } from '../tfjs/load';
import { constants } from '../tfjs/constants'; import { constants } from '../tfjs/constants';
import { log, now } from '../util/util'; import { log, now } from '../util/util';
import type { BodyKeypoint, BodyResult, BodyLandmark, Box, Point, BodyAnnotation } from '../result'; import type { BodyKeypoint, BodyResult, BodyLandmark, Box, Point, BodyAnnotation } from '../result';
import type { GraphModel, Tensor, Tensor4D } from '../tfjs/types'; import type { GraphModel, Tensor } from '../tfjs/types';
import type { Config } from '../config'; import type { Config } from '../config';
import * as coords from './blazeposecoords'; import * as coords from './blazeposecoords';
import { loadDetector, detectBoxes, DetectedBox } from './blazeposedetector'; import * as detect from './blazeposedetector';
import * as box from '../util/box'; import * as box from '../util/box';
import { env } from '../util/env';
const env = { initial: true };
// const models: [GraphModel | null, GraphModel | null] = [null, null]; // const models: [GraphModel | null, GraphModel | null] = [null, null];
let model: GraphModel | null; const models: { detector: GraphModel | null, landmarks: GraphModel | null } = { detector: null, landmarks: null };
let inputSize = 256; const inputSize: { detector: [number, number], landmarks: [number, number] } = { detector: [224, 224], landmarks: [256, 256] };
let skipped = Number.MAX_SAFE_INTEGER; let skipped = Number.MAX_SAFE_INTEGER;
const outputNodes: { detector: string[], landmarks: string[] } = { const outputNodes: { detector: string[], landmarks: string[] } = {
landmarks: ['ld_3d', 'activation_segmentation', 'activation_heatmap', 'world_3d', 'output_poseflag'], landmarks: ['ld_3d', 'activation_segmentation', 'activation_heatmap', 'world_3d', 'output_poseflag'],
detector: [], detector: [],
}; };
const cache: BodyResult[] = []; let cache: BodyResult | null = null;
let cropBox: Box | undefined;
let padding: [number, number][] = [[0, 0], [0, 0], [0, 0], [0, 0]]; let padding: [number, number][] = [[0, 0], [0, 0], [0, 0], [0, 0]];
let lastTime = 0; let lastTime = 0;
const sigmoid = (x) => (1 - (1 / (1 + Math.exp(x)))); const sigmoid = (x) => (1 - (1 / (1 + Math.exp(x))));
export const loadDetect = (config: Config): Promise<GraphModel> => loadDetector(config); export async function loadDetect(config: Config): Promise<GraphModel> {
if (env.initial) models.detector = null;
export async function loadPose(config: Config): Promise<GraphModel> { if (!models.detector && config.body['detector'] && config.body['detector'].modelPath || '') {
if (env.initial) model = null; models.detector = await loadModel(config.body['detector'].modelPath);
if (!model) { const inputs = models.detector?.['executor'] ? Object.values(models.detector.modelSignature['inputs']) : undefined;
model = await loadModel(config.body.modelPath); inputSize.detector[0] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[1].size) : 0;
const inputs = model?.['executor'] ? Object.values(model.modelSignature['inputs']) : undefined; inputSize.detector[1] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : 0;
// @ts-ignore model signature properties are not typed and inputs are unreliable for this model } else if (config.debug && models.detector) log('cached model:', models.detector['modelUrl']);
inputSize = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[1].size) : 0; detect.createAnchors();
} else if (config.debug) log('cached model:', model['modelUrl']); return models.detector as GraphModel;
return model;
} }
function prepareImage(input: Tensor4D, size: number, cropBox?: Box): Tensor { export async function loadPose(config: Config): Promise<GraphModel> {
if (env.initial) models.landmarks = null;
if (!models.landmarks) {
models.landmarks = await loadModel(config.body.modelPath);
const inputs = models.landmarks?.['executor'] ? Object.values(models.landmarks.modelSignature['inputs']) : undefined;
inputSize.landmarks[0] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[1].size) : 0;
inputSize.landmarks[1] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : 0;
} else if (config.debug) log('cached model:', models.landmarks['modelUrl']);
return models.landmarks;
}
export async function load(config: Config): Promise<[GraphModel | null, GraphModel | null]> {
if (!models.detector) await loadDetect(config);
if (!models.landmarks) await loadPose(config);
return [models.detector, models.landmarks];
}
function prepareImage(input: Tensor, size: number): Tensor {
const t: Record<string, Tensor> = {}; const t: Record<string, Tensor> = {};
if (!input?.shape?.[1] || !input?.shape?.[2]) return input; if (!input?.shape?.[1] || !input?.shape?.[2]) return input;
let final: Tensor; let final: Tensor;
@ -65,10 +82,10 @@ function prepareImage(input: Tensor4D, size: number, cropBox?: Box): Tensor {
[0, 0], // dont touch rbg [0, 0], // dont touch rbg
]; ];
t.pad = tf.pad(t.cropped || input, padding); // use cropped box if it exists t.pad = tf.pad(t.cropped || input, padding); // use cropped box if it exists
t.resize = tf.image.resizeBilinear(t.pad as Tensor4D, [size, size]); t.resize = tf.image.resizeBilinear(t.pad, [size, size]);
final = tf.div(t.resize, constants.tf255); final = tf.div(t.resize, constants.tf255);
} else if (input.shape[1] !== size) { // if input needs resizing } else if (input.shape[1] !== size) { // if input needs resizing
t.resize = tf.image.resizeBilinear(t.cropped as Tensor4D || input, [size, size]); t.resize = tf.image.resizeBilinear(t.cropped || input, [size, size]);
final = tf.div(t.resize, constants.tf255); final = tf.div(t.resize, constants.tf255);
} else { // if input is already in a correct resolution just normalize it } else { // if input is already in a correct resolution just normalize it
final = tf.div(t.cropped || input, constants.tf255); final = tf.div(t.cropped || input, constants.tf255);
@ -77,7 +94,7 @@ function prepareImage(input: Tensor4D, size: number, cropBox?: Box): Tensor {
return final; return final;
} }
function rescaleKeypoints(keypoints: BodyKeypoint[], outputSize: [number, number], cropBox?: Box): BodyKeypoint[] { function rescaleKeypoints(keypoints: BodyKeypoint[], outputSize: [number, number]): BodyKeypoint[] {
for (const kpt of keypoints) { // first rescale due to padding for (const kpt of keypoints) { // first rescale due to padding
kpt.position = [ kpt.position = [
Math.trunc(kpt.position[0] * (outputSize[0] + padding[2][0] + padding[2][1]) / outputSize[0] - padding[2][0]), Math.trunc(kpt.position[0] * (outputSize[0] + padding[2][0] + padding[2][1]) / outputSize[0] - padding[2][0]),
@ -87,12 +104,10 @@ function rescaleKeypoints(keypoints: BodyKeypoint[], outputSize: [number, number
kpt.positionRaw = [kpt.position[0] / outputSize[0], kpt.position[1] / outputSize[1], 2 * (kpt.position[2] as number) / (outputSize[0] + outputSize[1])]; kpt.positionRaw = [kpt.position[0] / outputSize[0], kpt.position[1] / outputSize[1], 2 * (kpt.position[2] as number) / (outputSize[0] + outputSize[1])];
} }
if (cropBox) { // second rescale due to cropping if (cropBox) { // second rescale due to cropping
const width = cropBox[2] - cropBox[0];
const height = cropBox[3] - cropBox[1];
for (const kpt of keypoints) { for (const kpt of keypoints) {
kpt.positionRaw = [ kpt.positionRaw = [
kpt.positionRaw[0] / height + cropBox[1], // correct offset due to crop kpt.positionRaw[0] + cropBox[1], // correct offset due to crop
kpt.positionRaw[1] / width + cropBox[0], // correct offset due to crop kpt.positionRaw[1] + cropBox[0], // correct offset due to crop
kpt.positionRaw[2] as number, kpt.positionRaw[2] as number,
]; ];
kpt.position = [ kpt.position = [
@ -125,9 +140,9 @@ async function detectLandmarks(input: Tensor, config: Config, outputSize: [numbe
* t.world: 39 keypoints [x,y,z] normalized to -1..1 * t.world: 39 keypoints [x,y,z] normalized to -1..1
* t.poseflag: body score * t.poseflag: body score
*/ */
if (!model?.['executor']) return null; if (!models.landmarks?.['executor']) return null;
const t: Record<string, Tensor> = {}; const t: Record<string, Tensor> = {};
[t.ld/* 1,195(39*5) */, t.segmentation/* 1,256,256,1 */, t.heatmap/* 1,64,64,39 */, t.world/* 1,117(39*3) */, t.poseflag/* 1,1 */] = model?.execute(input, outputNodes.landmarks) as Tensor[]; // run model [t.ld/* 1,195(39*5) */, t.segmentation/* 1,256,256,1 */, t.heatmap/* 1,64,64,39 */, t.world/* 1,117(39*3) */, t.poseflag/* 1,1 */] = models.landmarks?.execute(input, outputNodes.landmarks) as Tensor[]; // run model
const poseScore = (await t.poseflag.data())[0]; const poseScore = (await t.poseflag.data())[0];
const points = await t.ld.data(); const points = await t.ld.data();
const distances = await t.world.data(); const distances = await t.world.data();
@ -138,7 +153,7 @@ async function detectLandmarks(input: Tensor, config: Config, outputSize: [numbe
const score = sigmoid(points[depth * i + 3]); const score = sigmoid(points[depth * i + 3]);
const presence = sigmoid(points[depth * i + 4]); const presence = sigmoid(points[depth * i + 4]);
const adjScore = Math.trunc(100 * score * presence * poseScore) / 100; const adjScore = Math.trunc(100 * score * presence * poseScore) / 100;
const positionRaw: Point = [points[depth * i + 0] / inputSize, points[depth * i + 1] / inputSize, points[depth * i + 2] + 0]; const positionRaw: Point = [points[depth * i + 0] / inputSize.landmarks[0], points[depth * i + 1] / inputSize.landmarks[1], points[depth * i + 2] + 0];
const position: Point = [Math.trunc(outputSize[0] * positionRaw[0]), Math.trunc(outputSize[1] * positionRaw[1]), positionRaw[2] as number]; const position: Point = [Math.trunc(outputSize[0] * positionRaw[0]), Math.trunc(outputSize[1] * positionRaw[1]), positionRaw[2] as number];
const distance: Point = [distances[depth * i + 0], distances[depth * i + 1], distances[depth * i + 2] + 0]; const distance: Point = [distances[depth * i + 0], distances[depth * i + 1], distances[depth * i + 2] + 0];
keypointsRelative.push({ part: coords.kpt[i] as BodyLandmark, positionRaw, position, distance, score: adjScore }); keypointsRelative.push({ part: coords.kpt[i] as BodyLandmark, positionRaw, position, distance, score: adjScore });
@ -162,31 +177,52 @@ async function detectLandmarks(input: Tensor, config: Config, outputSize: [numbe
return body; return body;
} }
export async function predict(input: Tensor4D, config: Config): Promise<BodyResult[]> { /*
interface DetectedBox { box: Box, boxRaw: Box, score: number }
function rescaleBoxes(boxes: Array<DetectedBox>, outputSize: [number, number]): Array<DetectedBox> {
for (const b of boxes) {
b.box = [
Math.trunc(b.box[0] * (outputSize[0] + padding[2][0] + padding[2][1]) / outputSize[0]),
Math.trunc(b.box[1] * (outputSize[1] + padding[1][0] + padding[1][1]) / outputSize[1]),
Math.trunc(b.box[2] * (outputSize[0] + padding[2][0] + padding[2][1]) / outputSize[0]),
Math.trunc(b.box[3] * (outputSize[1] + padding[1][0] + padding[1][1]) / outputSize[1]),
];
b.boxRaw = [b.box[0] / outputSize[0], b.box[1] / outputSize[1], b.box[2] / outputSize[0], b.box[3] / outputSize[1]];
}
return boxes;
}
async function detectBoxes(input: Tensor, config: Config, outputSize: [number, number]) {
const t: Record<string, Tensor> = {};
t.res = models.detector?.execute(input, ['Identity']) as Tensor; //
t.logitsRaw = tf.slice(t.res, [0, 0, 0], [1, -1, 1]);
t.boxesRaw = tf.slice(t.res, [0, 0, 1], [1, -1, -1]);
t.logits = tf.squeeze(t.logitsRaw);
t.boxes = tf.squeeze(t.boxesRaw);
const boxes = await detect.decode(t.boxes, t.logits, config, outputSize);
rescaleBoxes(boxes, outputSize);
Object.keys(t).forEach((tensor) => tf.dispose(t[tensor]));
return boxes;
}
*/
export async function predict(input: Tensor, config: Config): Promise<BodyResult[]> {
const outputSize: [number, number] = [input.shape[2] || 0, input.shape[1] || 0]; const outputSize: [number, number] = [input.shape[2] || 0, input.shape[1] || 0];
const skipTime = (config.body.skipTime || 0) > (now() - lastTime); const skipTime = (config.body.skipTime || 0) > (now() - lastTime);
const skipFrame = skipped < (config.body.skipFrames || 0); const skipFrame = skipped < (config.body.skipFrames || 0);
if (config.skipAllowed && skipTime && skipFrame && cache !== null) { if (config.skipAllowed && skipTime && skipFrame && cache !== null) {
skipped++; skipped++;
} else { } else {
let boxes: DetectedBox[] = []; const t: Record<string, Tensor> = {};
if (config.body?.['detector']?.['enabled']) { /*
const preparedImage = prepareImage(input, 224); if (config.body['detector'] && config.body['detector']['enabled']) {
boxes = await detectBoxes(preparedImage, config, outputSize); t.detector = await prepareImage(input, 224);
tf.dispose(preparedImage); const boxes = await detectBoxes(t.detector, config, outputSize);
} else {
boxes = [{ box: [0, 0, 0, 0] as Box, boxRaw: [0, 0, 1, 1], score: 0 }]; // running without detector
}
for (let i = 0; i < boxes.length; i++) {
const preparedBox = prepareImage(input, 256, boxes[i]?.boxRaw); // padded and resized
cache.length = 0;
const bodyResult = await detectLandmarks(preparedBox, config, outputSize);
tf.dispose(preparedBox);
if (!bodyResult) continue;
bodyResult.id = i;
// bodyResult.score = 0; // TBD
cache.push(bodyResult);
} }
*/
t.landmarks = prepareImage(input, 256); // padded and resized
cache = await detectLandmarks(t.landmarks, config, outputSize);
/* /*
cropBox = [0, 0, 1, 1]; // reset crop coordinates cropBox = [0, 0, 1, 1]; // reset crop coordinates
if (cache?.boxRaw && config.skipAllowed) { if (cache?.boxRaw && config.skipAllowed) {
@ -201,8 +237,9 @@ export async function predict(input: Tensor4D, config: Config): Promise<BodyResu
} }
} }
*/ */
Object.keys(t).forEach((tensor) => tf.dispose(t[tensor]));
lastTime = now(); lastTime = now();
skipped = 0; skipped = 0;
} }
return cache; return cache ? [cache] : [];
} }

View File

@ -1,15 +1,11 @@
import * as tf from 'dist/tfjs.esm.js'; import * as tf from '../../dist/tfjs.esm.js';
import { log } from '../util/util'; import type { Tensor } from '../tfjs/types';
import { env } from '../util/env';
import { loadModel } from '../tfjs/load';
import type { Box } from '../result'; import type { Box } from '../result';
import type { Config } from '../config'; import type { Config } from '../config';
import type { GraphModel, Tensor, Tensor1D, Tensor2D } from '../tfjs/types';
export interface DetectedBox { box: Box, boxRaw: Box, score: number } interface DetectedBox { box: Box, boxRaw: Box, score: number }
let model: GraphModel | null; const inputSize = 224;
let inputSize = 224;
let anchorTensor: { x, y }; let anchorTensor: { x, y };
const numLayers = 5; const numLayers = 5;
const strides = [8, 16, 32, 32, 32]; const strides = [8, 16, 32, 32, 32];
@ -39,20 +35,8 @@ export function createAnchors() {
anchorTensor = { x: tf.tensor1d(anchors.map((a) => a.x)), y: tf.tensor1d(anchors.map((a) => a.y)) }; anchorTensor = { x: tf.tensor1d(anchors.map((a) => a.x)), y: tf.tensor1d(anchors.map((a) => a.y)) };
} }
export async function loadDetector(config: Config): Promise<GraphModel> {
if (env.initial) model = null;
if (!model && config.body['detector'] && config.body['detector'].modelPath || '') {
model = await loadModel(config.body['detector'].modelPath);
const inputs = model?.['executor'] ? Object.values(model.modelSignature['inputs']) : undefined;
// @ts-ignore model signature properties are not typed and inputs are unreliable for this model
inputSize = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[1].size) : 0;
} else if (config.debug && model) log('cached model:', model['modelUrl']);
createAnchors();
return model as GraphModel;
}
const cropFactor = [5.0, 5.0]; const cropFactor = [5.0, 5.0];
export function decodeBoxes(boxesTensor, anchor) { function decodeBoxes(boxesTensor, anchor): Tensor {
return tf.tidy(() => { return tf.tidy(() => {
const split = tf.split(boxesTensor, 12, 1); // first 4 are box data [x,y,w,h] and 4 are keypoints data [x,y] for total of 12 const split = tf.split(boxesTensor, 12, 1); // first 4 are box data [x,y,w,h] and 4 are keypoints data [x,y] for total of 12
let xCenter = tf.squeeze(split[0]); let xCenter = tf.squeeze(split[0]);
@ -65,41 +49,39 @@ export function decodeBoxes(boxesTensor, anchor) {
height = tf.mul(tf.div(height, inputSize), cropFactor[1]); height = tf.mul(tf.div(height, inputSize), cropFactor[1]);
const xMin = tf.sub(xCenter, tf.div(width, 2)); const xMin = tf.sub(xCenter, tf.div(width, 2));
const yMin = tf.sub(yCenter, tf.div(height, 2)); const yMin = tf.sub(yCenter, tf.div(height, 2));
const xMax = tf.add(xMin, width); const boxes = tf.stack([xMin, yMin, width, height], 1);
const yMax = tf.add(yMin, height);
const boxes = tf.stack([xMin, yMin, xMax, yMax], 1);
return boxes; return boxes;
}); });
} }
async function decodeResults(boxesTensor: Tensor, logitsTensor: Tensor, config: Config, outputSize: [number, number]): Promise<DetectedBox[]> { export async function decode(boxesTensor: Tensor, logitsTensor: Tensor, config: Config, outputSize: [number, number]): Promise<DetectedBox[]> {
const detectedBoxes: DetectedBox[] = [];
const t: Record<string, Tensor> = {}; const t: Record<string, Tensor> = {};
t.boxes = decodeBoxes(boxesTensor, anchorTensor); t.boxes = decodeBoxes(boxesTensor, anchorTensor);
t.scores = tf.sigmoid(logitsTensor); t.scores = tf.sigmoid(logitsTensor);
t.nms = await tf.image.nonMaxSuppressionAsync(t.boxes as Tensor2D, t.scores as Tensor1D, 1, config.body['detector']?.minConfidence || 0.1, config.body['detector']?.iouThreshold || 0.1); t.argmax = tf.argMax(t.scores);
const nms = await t.nms.data(); const i = (await t.argmax.data())[0];
const scores = await t.scores.data(); const scores = await t.scores.data();
const boxes = await t.boxes.array(); const detected: { box: Box, boxRaw: Box, score: number }[] = [];
for (const i of Array.from(nms)) { const minScore = config.body?.['detector']?.minConfidence || 0;
const score = scores[i]; if (scores[i] >= minScore) {
const boxes = await t.boxes.array();
const boxRaw: Box = boxes[i]; const boxRaw: Box = boxes[i];
const box: Box = [Math.round(boxRaw[0] * outputSize[0]), Math.round(boxRaw[1] * outputSize[1]), Math.round(boxRaw[2] * outputSize[0]), Math.round(boxRaw[3] * outputSize[1])]; const box: Box = [boxRaw[0] * outputSize[0], boxRaw[1] * outputSize[1], boxRaw[2] * outputSize[0], boxRaw[3] * outputSize[1]];
const detectedBox: DetectedBox = { score, boxRaw, box }; // console.log(box);
detectedBoxes.push(detectedBox); detected.push({ box, boxRaw, score: scores[i] });
} }
/*
t.nms = await tf.image.nonMaxSuppressionAsync(t.boxes, t.scores, 1, config.body.detector?.minConfidence || 0.1, config.body.detector?.iouThreshold || 0.1);
const boxes = t.boxes.arraySync();
const scores = t.scores.dataSync();
const nms = t.nms.dataSync();
const detected: Array<DetectedBox> = [];
for (const i of Array.from(nms)) {
const boxRaw: Box = boxes[i];
const box: Box = [boxRaw[0] * outputSize[0], boxRaw[0] * outputSize[1], boxRaw[3] * outputSize[0], boxRaw[2] * outputSize[1]];
detected.push({ box, boxRaw, score: scores[i] });
}
*/
Object.keys(t).forEach((tensor) => tf.dispose(t[tensor])); Object.keys(t).forEach((tensor) => tf.dispose(t[tensor]));
return detectedBoxes; return detected;
}
export async function detectBoxes(input: Tensor, config: Config, outputSize: [number, number]) {
const t: Record<string, Tensor> = {};
t.res = model?.execute(input, ['Identity']) as Tensor; //
t.logitsRaw = tf.slice(t.res, [0, 0, 0], [1, -1, 1]);
t.boxesRaw = tf.slice(t.res, [0, 0, 1], [1, -1, -1]);
t.logits = tf.squeeze(t.logitsRaw);
t.boxes = tf.squeeze(t.boxesRaw);
const boxes = await decodeResults(t.boxes, t.logits, config, outputSize);
Object.keys(t).forEach((tensor) => tf.dispose(t[tensor]));
return boxes;
} }

View File

@ -4,13 +4,13 @@
* Based on: [**EfficientPose**](https://github.com/daniegr/EfficientPose) * Based on: [**EfficientPose**](https://github.com/daniegr/EfficientPose)
*/ */
import * as tf from 'dist/tfjs.esm.js';
import { log, now } from '../util/util'; import { log, now } from '../util/util';
import * as tf from '../../dist/tfjs.esm.js';
import { loadModel } from '../tfjs/load'; import { loadModel } from '../tfjs/load';
import * as coords from './efficientposecoords'; import * as coords from './efficientposecoords';
import { constants } from '../tfjs/constants'; import { constants } from '../tfjs/constants';
import type { BodyResult, Point, BodyLandmark, BodyAnnotation } from '../result'; import type { BodyResult, Point, BodyLandmark, BodyAnnotation } from '../result';
import type { GraphModel, Tensor4D } from '../tfjs/types'; import type { GraphModel, Tensor } from '../tfjs/types';
import type { Config } from '../config'; import type { Config } from '../config';
import { env } from '../util/env'; import { env } from '../util/env';
@ -50,8 +50,8 @@ async function max2d(inputs, minScore): Promise<[number, number, number]> {
return [0, 0, newScore]; return [0, 0, newScore];
} }
export async function predict(image: Tensor4D, config: Config): Promise<BodyResult[]> { export async function predict(image: Tensor, config: Config): Promise<BodyResult[]> {
if (!model?.['executor'] || !model?.inputs[0].shape) return []; if (!model?.['executor']) return [];
const skipTime = (config.body.skipTime || 0) > (now() - lastTime); const skipTime = (config.body.skipTime || 0) > (now() - lastTime);
const skipFrame = skipped < (config.body.skipFrames || 0); const skipFrame = skipped < (config.body.skipFrames || 0);
if (config.skipAllowed && skipTime && skipFrame && Object.keys(cache.keypoints).length > 0) { if (config.skipAllowed && skipTime && skipFrame && Object.keys(cache.keypoints).length > 0) {
@ -61,7 +61,8 @@ export async function predict(image: Tensor4D, config: Config): Promise<BodyResu
skipped = 0; skipped = 0;
return new Promise(async (resolve) => { return new Promise(async (resolve) => {
const tensor = tf.tidy(() => { const tensor = tf.tidy(() => {
const resize = tf.image.resizeBilinear(image, [model?.inputs[0].shape?.[2] || 0, model?.inputs[0].shape?.[1] || 0], false); if (!model?.inputs[0].shape) return null;
const resize = tf.image.resizeBilinear(image, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
const enhance = tf.mul(resize, constants.tf2); const enhance = tf.mul(resize, constants.tf2);
const norm = tf.sub(enhance, constants.tf1); const norm = tf.sub(enhance, constants.tf1);
return norm; return norm;

View File

@ -4,9 +4,9 @@
* Based on: [**MoveNet**](https://blog.tensorflow.org/2021/05/next-generation-pose-detection-with-movenet-and-tensorflowjs.html) * Based on: [**MoveNet**](https://blog.tensorflow.org/2021/05/next-generation-pose-detection-with-movenet-and-tensorflowjs.html)
*/ */
import * as tf from 'dist/tfjs.esm.js';
import { log, now } from '../util/util'; import { log, now } from '../util/util';
import * as box from '../util/box'; import * as box from '../util/box';
import * as tf from '../../dist/tfjs.esm.js';
import * as coords from './movenetcoords'; import * as coords from './movenetcoords';
import * as fix from './movenetfix'; import * as fix from './movenetfix';
import { loadModel } from '../tfjs/load'; import { loadModel } from '../tfjs/load';
@ -39,8 +39,6 @@ export async function load(config: Config): Promise<GraphModel> {
} else if (config.debug) log('cached model:', model['modelUrl']); } else if (config.debug) log('cached model:', model['modelUrl']);
inputSize = (model?.['executor'] && model?.inputs?.[0].shape) ? model.inputs[0].shape[2] : 0; inputSize = (model?.['executor'] && model?.inputs?.[0].shape) ? model.inputs[0].shape[2] : 0;
if (inputSize < 64) inputSize = 256; if (inputSize < 64) inputSize = 256;
// @ts-ignore private property
if (tf.env().flagRegistry.WEBGL_USE_SHAPES_UNIFORMS) tf.env().set('WEBGL_USE_SHAPES_UNIFORMS', false); // default=false <https://github.com/tensorflow/tfjs/issues/5205>
return model; return model;
} }
@ -86,8 +84,8 @@ function parseMultiPose(res, config, image) {
const bodies: BodyResult[] = []; const bodies: BodyResult[] = [];
for (let id = 0; id < res[0].length; id++) { for (let id = 0; id < res[0].length; id++) {
const kpt = res[0][id]; const kpt = res[0][id];
const boxScore = Math.round(100 * kpt[51 + 4]) / 100; const totalScore = Math.round(100 * kpt[51 + 4]) / 100;
if (boxScore > config.body.minConfidence) { if (totalScore > config.body.minConfidence) {
const keypoints: BodyKeypoint[] = []; const keypoints: BodyKeypoint[] = [];
for (let i = 0; i < 17; i++) { for (let i = 0; i < 17; i++) {
const score = kpt[3 * i + 2]; const score = kpt[3 * i + 2];
@ -101,10 +99,10 @@ function parseMultiPose(res, config, image) {
}); });
} }
} }
// const newBox = box.calc(keypoints.map((pt) => pt.position), [image.shape[2], image.shape[1]]); const newBox = box.calc(keypoints.map((pt) => pt.position), [image.shape[2], image.shape[1]]);
// movenet-multipose has built-in box details // movenet-multipose has built-in box details
const boxRaw: Box = [kpt[51 + 1], kpt[51 + 0], kpt[51 + 3] - kpt[51 + 1], kpt[51 + 2] - kpt[51 + 0]]; // const boxRaw: Box = [kpt[51 + 1], kpt[51 + 0], kpt[51 + 3] - kpt[51 + 1], kpt[51 + 2] - kpt[51 + 0]];
const boxNorm: Box = [Math.trunc(boxRaw[0] * (image.shape[2] || 0)), Math.trunc(boxRaw[1] * (image.shape[1] || 0)), Math.trunc(boxRaw[2] * (image.shape[2] || 0)), Math.trunc(boxRaw[3] * (image.shape[1] || 0))]; // const box: Box = [Math.trunc(boxRaw[0] * (image.shape[2] || 0)), Math.trunc(boxRaw[1] * (image.shape[1] || 0)), Math.trunc(boxRaw[2] * (image.shape[2] || 0)), Math.trunc(boxRaw[3] * (image.shape[1] || 0))];
const annotations: Record<BodyAnnotation, Point[][]> = {} as Record<BodyAnnotation, Point[][]>; const annotations: Record<BodyAnnotation, Point[][]> = {} as Record<BodyAnnotation, Point[][]>;
for (const [name, indexes] of Object.entries(coords.connected)) { for (const [name, indexes] of Object.entries(coords.connected)) {
const pt: Point[][] = []; const pt: Point[][] = [];
@ -115,8 +113,7 @@ function parseMultiPose(res, config, image) {
} }
annotations[name] = pt; annotations[name] = pt;
} }
// const body: BodyResult = { id, score: totalScore, box: newBox.box, boxRaw: newBox.boxRaw, keypoints: [...keypoints], annotations }; const body: BodyResult = { id, score: totalScore, box: newBox.box, boxRaw: newBox.boxRaw, keypoints: [...keypoints], annotations };
const body: BodyResult = { id, score: boxScore, box: boxNorm, boxRaw, keypoints: [...keypoints], annotations };
fix.bodyParts(body); fix.bodyParts(body);
bodies.push(body); bodies.push(body);
} }
@ -138,6 +135,39 @@ export async function predict(input: Tensor, config: Config): Promise<BodyResult
return new Promise(async (resolve) => { return new Promise(async (resolve) => {
const t: Record<string, Tensor> = {}; const t: Record<string, Tensor> = {};
skipped = 0; skipped = 0;
// run detection on squared input and cached boxes
/*
cache.bodies = []; // reset bodies result
if (cache.boxes.length >= (config.body.maxDetected || 0)) { // if we have enough cached boxes run detection using cache
for (let i = 0; i < cache.boxes.length; i++) { // run detection based on cached boxes
t.crop = tf.image.cropAndResize(input, [cache.boxes[i]], [0], [inputSize, inputSize], 'bilinear');
t.cast = tf.cast(t.crop, 'int32');
// t.input = prepareImage(input);
t.res = model?.execute(t.cast) as Tensor;
const res = await t.res.array();
const newBodies = (t.res.shape[2] === 17) ? await parseSinglePose(res, config, input, cache.boxes[i]) : await parseMultiPose(res, config, input, cache.boxes[i]);
cache.bodies = cache.bodies.concat(newBodies);
Object.keys(t).forEach((tensor) => tf.dispose(t[tensor]));
}
}
if (cache.bodies.length !== config.body.maxDetected) { // did not find enough bodies based on cached boxes so run detection on full frame
t.input = prepareImage(input);
t.res = model?.execute(t.input) as Tensor;
const res = await t.res.array();
cache.bodies = (t.res.shape[2] === 17) ? await parseSinglePose(res, config, input, [0, 0, 1, 1]) : await parseMultiPose(res, config, input, [0, 0, 1, 1]);
for (const body of cache.bodies) rescaleBody(body, [input.shape[2] || 1, input.shape[1] || 1]);
Object.keys(t).forEach((tensor) => tf.dispose(t[tensor]));
}
cache.boxes.length = 0; // reset cache
for (let i = 0; i < cache.bodies.length; i++) {
if (cache.bodies[i].keypoints.length > (coords.kpt.length / 2)) { // only update cache if we detected at least half keypoints
const scaledBox = box.scale(cache.bodies[i].boxRaw, boxExpandFact);
const cropBox = box.crop(scaledBox);
cache.boxes.push(cropBox);
}
}
*/
// run detection on squared input and no cached boxes // run detection on squared input and no cached boxes
t.input = fix.padInput(input, inputSize); t.input = fix.padInput(input, inputSize);
t.res = model?.execute(t.input) as Tensor; t.res = model?.execute(t.input) as Tensor;

View File

@ -1,8 +1,8 @@
import * as tf from 'dist/tfjs.esm.js';
import type { BodyKeypoint, BodyResult } from '../result'; import type { BodyKeypoint, BodyResult } from '../result';
import * as box from '../util/box'; import * as box from '../util/box';
import * as coords from './movenetcoords'; import * as coords from './movenetcoords';
import type { Tensor, Tensor3D } from '../tfjs/types'; import * as tf from '../../dist/tfjs.esm.js';
import type { Tensor } from '../tfjs/types';
const maxJitter = 0.005; // default allowed jitter is within 0.5% const maxJitter = 0.005; // default allowed jitter is within 0.5%
@ -83,7 +83,7 @@ export function padInput(input: Tensor, inputSize: number): Tensor {
[0, 0], // dont touch rbg [0, 0], // dont touch rbg
]; ];
t.pad = tf.pad(input, cache.padding); t.pad = tf.pad(input, cache.padding);
t.resize = tf.image.resizeBilinear(t.pad as Tensor3D, [inputSize, inputSize]); t.resize = tf.image.resizeBilinear(t.pad, [inputSize, inputSize]);
const final = tf.cast(t.resize, 'int32'); const final = tf.cast(t.resize, 'int32');
Object.keys(t).forEach((tensor) => tf.dispose(t[tensor])); Object.keys(t).forEach((tensor) => tf.dispose(t[tensor]));
return final; return final;

View File

@ -4,11 +4,11 @@
* Based on: [**PoseNet**](https://medium.com/tensorflow/real-time-human-pose-estimation-in-the-browser-with-tensorflow-js-7dd0bc881cd5) * Based on: [**PoseNet**](https://medium.com/tensorflow/real-time-human-pose-estimation-in-the-browser-with-tensorflow-js-7dd0bc881cd5)
*/ */
import * as tf from 'dist/tfjs.esm.js';
import { log } from '../util/util'; import { log } from '../util/util';
import * as tf from '../../dist/tfjs.esm.js';
import { loadModel } from '../tfjs/load'; import { loadModel } from '../tfjs/load';
import type { BodyResult, BodyLandmark, Box } from '../result'; import type { BodyResult, BodyLandmark, Box } from '../result';
import type { Tensor, GraphModel, Tensor4D } from '../tfjs/types'; import type { Tensor, GraphModel } from '../tfjs/types';
import type { Config } from '../config'; import type { Config } from '../config';
import { env } from '../util/env'; import { env } from '../util/env';
import * as utils from './posenetutils'; import * as utils from './posenetutils';
@ -155,7 +155,7 @@ export function decode(offsets, scores, displacementsFwd, displacementsBwd, maxD
return poses; return poses;
} }
export async function predict(input: Tensor4D, config: Config): Promise<BodyResult[]> { export async function predict(input: Tensor, config: Config): Promise<BodyResult[]> {
/** posenet is mostly obsolete /** posenet is mostly obsolete
* caching is not implemented * caching is not implemented
*/ */

View File

@ -1,7 +1,7 @@
/* eslint-disable no-multi-spaces */ /* eslint-disable no-multi-spaces */
/** Possible TensorFlow backends */ /** Possible TensorFlow backends */
export type BackendEnum = '' | 'cpu' | 'wasm' | 'webgl' | 'humangl' | 'tensorflow' | 'webgpu' | 'none'; export type BackendEnum = '' | 'cpu' | 'wasm' | 'webgl' | 'humangl' | 'tensorflow' | 'webgpu';
/** Possible values for `human.warmup` */ /** Possible values for `human.warmup` */
export type WarmupEnum = '' | 'none' | 'face' | 'full' | 'body'; export type WarmupEnum = '' | 'none' | 'face' | 'full' | 'body';
@ -33,14 +33,8 @@ export interface FaceDetectorConfig extends GenericConfig {
maxDetected: number, maxDetected: number,
/** minimum confidence for a detected face before results are discarded */ /** minimum confidence for a detected face before results are discarded */
minConfidence: number, minConfidence: number,
/** minimum size in pixels of a detected face box before resutls are discared */
minSize: number,
/** minimum overlap between two detected faces before one is discarded */ /** minimum overlap between two detected faces before one is discarded */
iouThreshold: number, iouThreshold: number,
/** how much should face box be enlarged over the min/max facial coordinates */
scale: number,
/** automatically pad image to square */
square: boolean,
/** should child models perform on masked image of a face */ /** should child models perform on masked image of a face */
mask: boolean, mask: boolean,
/** should face detection return processed and cropped face tensor that can with an external model for addtional processing? /** should face detection return processed and cropped face tensor that can with an external model for addtional processing?
@ -55,10 +49,7 @@ export interface FaceMeshConfig extends GenericConfig {
} }
/** Iris part of face configuration */ /** Iris part of face configuration */
export interface FaceIrisConfig extends GenericConfig { export interface FaceIrisConfig extends GenericConfig {}
/** how much should iris box be enlarged over the min/max iris coordinates */
scale: number,
}
/** Attention part of face configuration */ /** Attention part of face configuration */
export interface FaceAttentionConfig extends GenericConfig {} export interface FaceAttentionConfig extends GenericConfig {}
@ -195,8 +186,6 @@ export interface FilterConfig {
return: boolean, return: boolean,
/** flip input as mirror image */ /** flip input as mirror image */
flip: boolean, flip: boolean,
/** apply auto-brighness */
autoBrightness: boolean,
/** range: -1 (darken) to 1 (lighten) */ /** range: -1 (darken) to 1 (lighten) */
brightness: number, brightness: number,
/** range: -1 (reduce contrast) to 1 (increase contrast) */ /** range: -1 (reduce contrast) to 1 (increase contrast) */
@ -361,7 +350,6 @@ const config: Config = {
height: 0, height: 0,
flip: false, flip: false,
return: true, return: true,
autoBrightness: true,
brightness: 0, brightness: 0,
contrast: 0, contrast: 0,
sharpness: 0, sharpness: 0,
@ -383,14 +371,12 @@ const config: Config = {
enabled: true, enabled: true,
detector: { detector: {
modelPath: 'blazeface.json', modelPath: 'blazeface.json',
rotation: false, rotation: true,
maxDetected: 1, maxDetected: 1,
skipFrames: 99, skipFrames: 99,
skipTime: 2500, skipTime: 2500,
minConfidence: 0.2, minConfidence: 0.2,
minSize: 0,
iouThreshold: 0.1, iouThreshold: 0.1,
scale: 1.4,
mask: false, mask: false,
return: false, return: false,
}, },
@ -405,7 +391,6 @@ const config: Config = {
}, },
iris: { iris: {
enabled: true, enabled: true,
scale: 2.3,
modelPath: 'iris.json', modelPath: 'iris.json',
}, },
emotion: { emotion: {
@ -456,12 +441,12 @@ const config: Config = {
modelPath: 'handtrack.json', modelPath: 'handtrack.json',
}, },
skeleton: { skeleton: {
modelPath: 'handlandmark-lite.json', modelPath: 'handlandmark-full.json',
}, },
}, },
object: { object: {
enabled: false, enabled: false,
modelPath: 'centernet.json', modelPath: 'mb3-centernet.json',
minConfidence: 0.2, minConfidence: 0.2,
iouThreshold: 0.4, iouThreshold: 0.4,
maxDetected: 10, maxDetected: 10,

View File

@ -1,5 +1,5 @@
import { mergeDeep } from '../util/util'; import { mergeDeep } from '../util/util';
import { getCanvasContext, rect, point, curves, colorDepth, replace, labels } from './primitives'; import { getCanvasContext, rect, point, curves, colorDepth } from './primitives';
import { options } from './options'; import { options } from './options';
import type { BodyResult } from '../result'; import type { BodyResult } from '../result';
import type { AnyCanvas, DrawOptions } from '../exports'; import type { AnyCanvas, DrawOptions } from '../exports';
@ -8,7 +8,7 @@ import type { AnyCanvas, DrawOptions } from '../exports';
export function body(inCanvas: AnyCanvas, result: BodyResult[], drawOptions?: Partial<DrawOptions>) { export function body(inCanvas: AnyCanvas, result: BodyResult[], drawOptions?: Partial<DrawOptions>) {
const localOptions: DrawOptions = mergeDeep(options, drawOptions); const localOptions: DrawOptions = mergeDeep(options, drawOptions);
if (!result || !inCanvas) return; if (!result || !inCanvas) return;
const ctx = getCanvasContext(inCanvas) as CanvasRenderingContext2D; const ctx = getCanvasContext(inCanvas);
if (!ctx) return; if (!ctx) return;
ctx.lineJoin = 'round'; ctx.lineJoin = 'round';
for (let i = 0; i < result.length; i++) { for (let i = 0; i < result.length; i++) {
@ -18,11 +18,13 @@ export function body(inCanvas: AnyCanvas, result: BodyResult[], drawOptions?: Pa
ctx.font = localOptions.font; ctx.font = localOptions.font;
if (localOptions.drawBoxes && result[i].box && result[i].box.length === 4) { if (localOptions.drawBoxes && result[i].box && result[i].box.length === 4) {
rect(ctx, result[i].box[0], result[i].box[1], result[i].box[2], result[i].box[3], localOptions); rect(ctx, result[i].box[0], result[i].box[1], result[i].box[2], result[i].box[3], localOptions);
if (localOptions.drawLabels && (localOptions.bodyLabels?.length > 0)) { if (localOptions.drawLabels) {
let l = localOptions.bodyLabels.slice(); if (localOptions.shadowColor && localOptions.shadowColor !== '') {
l = replace(l, '[id]', result[i].id.toFixed(0)); ctx.fillStyle = localOptions.shadowColor;
l = replace(l, '[score]', 100 * result[i].score); ctx.fillText(`body ${100 * result[i].score}%`, result[i].box[0] + 3, 1 + result[i].box[1] + localOptions.lineHeight, result[i].box[2]);
labels(ctx, l, result[i].box[0], result[i].box[1], localOptions); }
ctx.fillStyle = localOptions.labelColor;
ctx.fillText(`body ${100 * result[i].score}%`, result[i].box[0] + 2, 0 + result[i].box[1] + localOptions.lineHeight, result[i].box[2]);
} }
} }
if (localOptions.drawPoints && result[i].keypoints) { if (localOptions.drawPoints && result[i].keypoints) {
@ -32,14 +34,12 @@ export function body(inCanvas: AnyCanvas, result: BodyResult[], drawOptions?: Pa
point(ctx, result[i].keypoints[pt].position[0], result[i].keypoints[pt].position[1], 0, localOptions); point(ctx, result[i].keypoints[pt].position[0], result[i].keypoints[pt].position[1], 0, localOptions);
} }
} }
if (localOptions.drawLabels && (localOptions.bodyPartLabels?.length > 0) && result[i].keypoints) { if (localOptions.drawLabels && result[i].keypoints) {
ctx.font = localOptions.font; ctx.font = localOptions.font;
for (const pt of result[i].keypoints) { for (const pt of result[i].keypoints) {
if (!pt.score || (pt.score === 0)) continue; if (!pt.score || (pt.score === 0)) continue;
let l = localOptions.bodyPartLabels.slice(); ctx.fillStyle = colorDepth(pt.position[2], localOptions);
l = replace(l, '[label]', pt.part); ctx.fillText(`${pt.part} ${Math.trunc(100 * pt.score)}%`, pt.position[0] + 4, pt.position[1] + 4);
l = replace(l, '[score]', 100 * pt.score);
labels(ctx, l, pt.position[0], pt.position[1], localOptions);
} }
} }
if (localOptions.drawPolygons && result[i].keypoints && result[i].annotations) { if (localOptions.drawPolygons && result[i].keypoints && result[i].annotations) {

View File

@ -2,7 +2,6 @@
* Module that implements helper draw functions, exposed as human.draw * Module that implements helper draw functions, exposed as human.draw
*/ */
import * as tf from 'dist/tfjs.esm.js';
import { mergeDeep, now } from '../util/util'; import { mergeDeep, now } from '../util/util';
import { env } from '../util/env'; import { env } from '../util/env';
import { getCanvasContext, rect } from './primitives'; import { getCanvasContext, rect } from './primitives';
@ -12,10 +11,8 @@ import { body } from './body';
import { hand } from './hand'; import { hand } from './hand';
import { object } from './object'; import { object } from './object';
import { gesture } from './gesture'; import { gesture } from './gesture';
import { defaultLabels } from './labels';
import type { Result, PersonResult } from '../result'; import type { Result, PersonResult } from '../result';
import type { AnyCanvas, DrawOptions } from '../exports'; import type { AnyCanvas, DrawOptions } from '../exports';
import type { Tensor2D } from '../tfjs/types';
let drawTime = 0; let drawTime = 0;
@ -30,7 +27,7 @@ export { gesture } from './gesture';
export function person(inCanvas: AnyCanvas, result: PersonResult[], drawOptions?: Partial<DrawOptions>) { export function person(inCanvas: AnyCanvas, result: PersonResult[], drawOptions?: Partial<DrawOptions>) {
const localOptions: DrawOptions = mergeDeep(options, drawOptions); const localOptions: DrawOptions = mergeDeep(options, drawOptions);
if (!result || !inCanvas) return; if (!result || !inCanvas) return;
const ctx = getCanvasContext(inCanvas) as CanvasRenderingContext2D; const ctx = getCanvasContext(inCanvas);
if (!ctx) return; if (!ctx) return;
ctx.lineJoin = 'round'; ctx.lineJoin = 'round';
ctx.font = localOptions.font; ctx.font = localOptions.font;
@ -57,27 +54,11 @@ export function person(inCanvas: AnyCanvas, result: PersonResult[], drawOptions?
/** draw processed canvas */ /** draw processed canvas */
export function canvas(input: AnyCanvas | HTMLImageElement | HTMLVideoElement, output: AnyCanvas) { export function canvas(input: AnyCanvas | HTMLImageElement | HTMLVideoElement, output: AnyCanvas) {
if (!input || !output) return; if (!input || !output) return;
const ctx = getCanvasContext(output) as CanvasRenderingContext2D; const ctx = getCanvasContext(output);
if (!ctx) return; if (!ctx) return;
ctx.drawImage(input, 0, 0); ctx.drawImage(input, 0, 0);
} }
/** draw processed canvas */
export async function tensor(input: Tensor2D, output: HTMLCanvasElement) {
if (!input || !output) return;
if (!env.browser) return;
// const backend = tf.getBackend();
// if (backend === 'webgpu') tf.browser.draw(input, output);
// else await tf.browser.toPixels(input, output);
await tf.browser.toPixels(input, output);
// const ctx = getCanvasContext(output) as CanvasRenderingContext2D;
// if (!ctx) return;
// const image = await process(input);
// result.canvas = image.canvas;
// human.tf.dispose(image.tensor);
// ctx.drawImage(image.canvas, 0, 0);
}
/** meta-function that performs draw for: canvas, face, body, hand */ /** meta-function that performs draw for: canvas, face, body, hand */
export async function all(inCanvas: AnyCanvas, result: Result, drawOptions?: Partial<DrawOptions>) { export async function all(inCanvas: AnyCanvas, result: Result, drawOptions?: Partial<DrawOptions>) {
if (!result?.performance || !inCanvas) return null; if (!result?.performance || !inCanvas) return null;
@ -95,14 +76,3 @@ export async function all(inCanvas: AnyCanvas, result: Result, drawOptions?: Par
result.performance.draw = drawTime; result.performance.draw = drawTime;
return promise; return promise;
} }
/** sets default label templates for face/body/hand/object/gestures */
export function init() {
options.faceLabels = defaultLabels.face;
options.bodyLabels = defaultLabels.body;
options.bodyPartLabels = defaultLabels.bodyPart;
options.handLabels = defaultLabels.hand;
options.fingerLabels = defaultLabels.finger;
options.objectLabels = defaultLabels.object;
options.gestureLabels = defaultLabels.gesture;
}

View File

@ -1,66 +1,77 @@
import { TRI468 as triangulation } from '../face/facemeshcoords'; import { TRI468 as triangulation } from '../face/facemeshcoords';
import { mergeDeep } from '../util/util'; import { mergeDeep } from '../util/util';
import { getCanvasContext, rad2deg, rect, point, lines, arrow, labels, replace } from './primitives'; import { getCanvasContext, rad2deg, rect, point, lines, arrow } from './primitives';
import { options } from './options'; import { options } from './options';
import * as facemeshConstants from '../face/constants'; import * as facemeshConstants from '../face/constants';
import type { FaceResult } from '../result'; import type { FaceResult } from '../result';
import type { AnyCanvas, DrawOptions } from '../exports'; import type { AnyCanvas, DrawOptions } from '../exports';
let localOptions: DrawOptions; let opt: DrawOptions;
function drawLabels(f: FaceResult, ctx: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D) { function drawLabels(f: FaceResult, ctx: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D) {
if (!localOptions.drawLabels || (localOptions.faceLabels?.length === 0)) return; if (opt.drawLabels) {
let l = localOptions.faceLabels.slice(); // silly hack since fillText does not suport new line
l = replace(l, '[id]', f.id.toFixed(0)); const labels:string[] = [];
if (f.score) l = replace(l, '[score]', 100 * f.score); labels.push(`face: ${Math.trunc(100 * f.score)}%`);
if (f.gender) l = replace(l, '[gender]', f.gender); if (f.genderScore) labels.push(`${f.gender || ''} ${Math.trunc(100 * f.genderScore)}%`);
if (f.genderScore) l = replace(l, '[genderScore]', 100 * f.genderScore); if (f.age) labels.push(`age: ${f.age || ''}`);
if (f.age) l = replace(l, '[age]', f.age); if (f.iris) labels.push(`distance: ${f.iris}`);
if (f.distance) l = replace(l, '[distance]', 100 * f.distance); if (f.real) labels.push(`real: ${Math.trunc(100 * f.real)}%`);
if (f.real) l = replace(l, '[real]', 100 * f.real); if (f.live) labels.push(`live: ${Math.trunc(100 * f.live)}%`);
if (f.live) l = replace(l, '[live]', 100 * f.live); if (f.emotion && f.emotion.length > 0) {
if (f.emotion && f.emotion.length > 0) { const emotion = f.emotion.map((a) => `${Math.trunc(100 * a.score)}% ${a.emotion}`);
const emotion = f.emotion.map((a) => `${Math.trunc(100 * a.score)}% ${a.emotion}`); if (emotion.length > 3) emotion.length = 3;
if (emotion.length > 3) emotion.length = 3; labels.push(emotion.join(' '));
l = replace(l, '[emotions]', emotion.join(' ')); }
if (f.rotation?.angle && f.rotation?.gaze) {
if (f.rotation.angle.roll) labels.push(`roll: ${rad2deg(f.rotation.angle.roll)}° yaw:${rad2deg(f.rotation.angle.yaw)}° pitch:${rad2deg(f.rotation.angle.pitch)}°`);
if (f.rotation.gaze.bearing) labels.push(`gaze: ${rad2deg(f.rotation.gaze.bearing)}°`);
}
if (labels.length === 0) labels.push('face');
ctx.fillStyle = opt.color;
for (let i = labels.length - 1; i >= 0; i--) {
const x = Math.max(f.box[0], 0);
const y = i * opt.lineHeight + f.box[1];
if (opt.shadowColor && opt.shadowColor !== '') {
ctx.fillStyle = opt.shadowColor;
ctx.fillText(labels[i], x + 5, y + 16);
}
ctx.fillStyle = opt.labelColor;
ctx.fillText(labels[i], x + 4, y + 15);
}
} }
if (f.rotation?.angle?.roll) l = replace(l, '[roll]', rad2deg(f.rotation.angle.roll));
if (f.rotation?.angle?.yaw) l = replace(l, '[yaw]', rad2deg(f.rotation.angle.yaw));
if (f.rotation?.angle?.pitch) l = replace(l, '[pitch]', rad2deg(f.rotation.angle.pitch));
if (f.rotation?.gaze?.bearing) l = replace(l, '[gaze]', rad2deg(f.rotation.gaze.bearing));
labels(ctx, l, f.box[0], f.box[1], localOptions);
} }
function drawIrisElipse(f: FaceResult, ctx: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D) { function drawIrisElipse(f: FaceResult, ctx: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D) {
// iris: array[center, left, top, right, bottom] // iris: array[center, left, top, right, bottom]
if (f.annotations?.leftEyeIris && f.annotations?.leftEyeIris[0]) { if (f.annotations?.leftEyeIris && f.annotations?.leftEyeIris[0]) {
ctx.strokeStyle = localOptions.useDepth ? 'rgba(255, 200, 255, 0.3)' : localOptions.color; ctx.strokeStyle = opt.useDepth ? 'rgba(255, 200, 255, 0.3)' : opt.color;
ctx.beginPath(); ctx.beginPath();
const sizeX = Math.abs(f.annotations.leftEyeIris[3][0] - f.annotations.leftEyeIris[1][0]) / 2; const sizeX = Math.abs(f.annotations.leftEyeIris[3][0] - f.annotations.leftEyeIris[1][0]) / 2;
const sizeY = Math.abs(f.annotations.leftEyeIris[4][1] - f.annotations.leftEyeIris[2][1]) / 2; const sizeY = Math.abs(f.annotations.leftEyeIris[4][1] - f.annotations.leftEyeIris[2][1]) / 2;
ctx.ellipse(f.annotations.leftEyeIris[0][0], f.annotations.leftEyeIris[0][1], sizeX, sizeY, 0, 0, 2 * Math.PI); ctx.ellipse(f.annotations.leftEyeIris[0][0], f.annotations.leftEyeIris[0][1], sizeX, sizeY, 0, 0, 2 * Math.PI);
ctx.stroke(); ctx.stroke();
if (localOptions.fillPolygons) { if (opt.fillPolygons) {
ctx.fillStyle = localOptions.useDepth ? 'rgba(255, 255, 200, 0.3)' : localOptions.color; ctx.fillStyle = opt.useDepth ? 'rgba(255, 255, 200, 0.3)' : opt.color;
ctx.fill(); ctx.fill();
} }
} }
if (f.annotations?.rightEyeIris && f.annotations?.rightEyeIris[0]) { if (f.annotations?.rightEyeIris && f.annotations?.rightEyeIris[0]) {
ctx.strokeStyle = localOptions.useDepth ? 'rgba(255, 200, 255, 0.3)' : localOptions.color; ctx.strokeStyle = opt.useDepth ? 'rgba(255, 200, 255, 0.3)' : opt.color;
ctx.beginPath(); ctx.beginPath();
const sizeX = Math.abs(f.annotations.rightEyeIris[3][0] - f.annotations.rightEyeIris[1][0]) / 2; const sizeX = Math.abs(f.annotations.rightEyeIris[3][0] - f.annotations.rightEyeIris[1][0]) / 2;
const sizeY = Math.abs(f.annotations.rightEyeIris[4][1] - f.annotations.rightEyeIris[2][1]) / 2; const sizeY = Math.abs(f.annotations.rightEyeIris[4][1] - f.annotations.rightEyeIris[2][1]) / 2;
ctx.ellipse(f.annotations.rightEyeIris[0][0], f.annotations.rightEyeIris[0][1], sizeX, sizeY, 0, 0, 2 * Math.PI); ctx.ellipse(f.annotations.rightEyeIris[0][0], f.annotations.rightEyeIris[0][1], sizeX, sizeY, 0, 0, 2 * Math.PI);
ctx.stroke(); ctx.stroke();
if (localOptions.fillPolygons) { if (opt.fillPolygons) {
ctx.fillStyle = localOptions.useDepth ? 'rgba(255, 255, 200, 0.3)' : localOptions.color; ctx.fillStyle = opt.useDepth ? 'rgba(255, 255, 200, 0.3)' : opt.color;
ctx.fill(); ctx.fill();
} }
} }
} }
function drawGazeSpheres(f: FaceResult, ctx: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D) { function drawGazeSpheres(f: FaceResult, ctx: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D) {
if (localOptions.drawGaze && f.rotation?.angle && typeof Path2D !== 'undefined') { if (opt.drawGaze && f.rotation?.angle && typeof Path2D !== 'undefined') {
ctx.strokeStyle = 'pink'; ctx.strokeStyle = 'pink';
const valX = (f.box[0] + f.box[2] / 2) - (f.box[3] * rad2deg(f.rotation.angle.yaw) / 90); const valX = (f.box[0] + f.box[2] / 2) - (f.box[3] * rad2deg(f.rotation.angle.yaw) / 90);
const valY = (f.box[1] + f.box[3] / 2) + (f.box[2] * rad2deg(f.rotation.angle.pitch) / 90); const valY = (f.box[1] + f.box[3] / 2) + (f.box[2] * rad2deg(f.rotation.angle.pitch) / 90);
@ -84,7 +95,7 @@ function drawGazeSpheres(f: FaceResult, ctx: CanvasRenderingContext2D | Offscree
} }
function drawGazeArrows(f: FaceResult, ctx: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D) { function drawGazeArrows(f: FaceResult, ctx: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D) {
if (localOptions.drawGaze && f.rotation?.gaze.strength && f.rotation.gaze.bearing && f.annotations.leftEyeIris && f.annotations.rightEyeIris && f.annotations.leftEyeIris[0] && f.annotations.rightEyeIris[0]) { if (opt.drawGaze && f.rotation?.gaze.strength && f.rotation.gaze.bearing && f.annotations.leftEyeIris && f.annotations.rightEyeIris && f.annotations.leftEyeIris[0] && f.annotations.rightEyeIris[0]) {
ctx.strokeStyle = 'pink'; ctx.strokeStyle = 'pink';
ctx.fillStyle = 'pink'; ctx.fillStyle = 'pink';
const leftGaze = [ const leftGaze = [
@ -101,16 +112,16 @@ function drawGazeArrows(f: FaceResult, ctx: CanvasRenderingContext2D | Offscreen
} }
function drawFacePolygons(f: FaceResult, ctx: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D) { function drawFacePolygons(f: FaceResult, ctx: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D) {
if (localOptions.drawPolygons && f.mesh.length >= 468) { if (opt.drawPolygons && f.mesh.length >= 468) {
ctx.lineWidth = 1; ctx.lineWidth = 1;
for (let i = 0; i < triangulation.length / 3; i++) { for (let i = 0; i < triangulation.length / 3; i++) {
const points = [triangulation[i * 3 + 0], triangulation[i * 3 + 1], triangulation[i * 3 + 2]].map((index) => f.mesh[index]); const points = [triangulation[i * 3 + 0], triangulation[i * 3 + 1], triangulation[i * 3 + 2]].map((index) => f.mesh[index]);
lines(ctx, points, localOptions); lines(ctx, points, opt);
} }
drawIrisElipse(f, ctx); drawIrisElipse(f, ctx);
} }
/* /*
if (localOptions.drawPolygons && f.contours.length > 1) { if (opt.drawPolygons && f.contours.length > 1) {
ctx.lineWidth = 5; ctx.lineWidth = 5;
lines(ctx, f.contours, opt); lines(ctx, f.contours, opt);
} }
@ -119,42 +130,33 @@ function drawFacePolygons(f: FaceResult, ctx: CanvasRenderingContext2D | Offscre
} }
function drawFacePoints(f: FaceResult, ctx: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D) { function drawFacePoints(f: FaceResult, ctx: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D) {
if (localOptions.drawPoints) { if (opt.drawPoints && f.mesh.length >= 468) {
if (f?.mesh.length >= 468) { for (let i = 0; i < f.mesh.length; i++) {
for (let i = 0; i < f.mesh.length; i++) { point(ctx, f.mesh[i][0], f.mesh[i][1], f.mesh[i][2], opt);
point(ctx, f.mesh[i][0], f.mesh[i][1], f.mesh[i][2], localOptions); if (opt.drawAttention) {
if (localOptions.drawAttention) { if (facemeshConstants.LANDMARKS_REFINEMENT_LIPS_CONFIG.includes(i)) point(ctx, f.mesh[i][0], f.mesh[i][1], (f.mesh[i][2] as number) + 127, opt);
if (facemeshConstants.LANDMARKS_REFINEMENT_LIPS_CONFIG.includes(i)) point(ctx, f.mesh[i][0], f.mesh[i][1], (f.mesh[i][2] as number) + 127, localOptions); if (facemeshConstants.LANDMARKS_REFINEMENT_LEFT_EYE_CONFIG.includes(i)) point(ctx, f.mesh[i][0], f.mesh[i][1], (f.mesh[i][2] as number) - 127, opt);
if (facemeshConstants.LANDMARKS_REFINEMENT_LEFT_EYE_CONFIG.includes(i)) point(ctx, f.mesh[i][0], f.mesh[i][1], (f.mesh[i][2] as number) - 127, localOptions); if (facemeshConstants.LANDMARKS_REFINEMENT_RIGHT_EYE_CONFIG.includes(i)) point(ctx, f.mesh[i][0], f.mesh[i][1], (f.mesh[i][2] as number) - 127, opt);
if (facemeshConstants.LANDMARKS_REFINEMENT_RIGHT_EYE_CONFIG.includes(i)) point(ctx, f.mesh[i][0], f.mesh[i][1], (f.mesh[i][2] as number) - 127, localOptions);
}
}
} else {
for (const [k, v] of Object.entries(f?.annotations || {})) {
if (!v?.[0]) continue;
const pt = v[0];
point(ctx, pt[0], pt[1], 0, localOptions);
if (localOptions.drawLabels) labels(ctx, k, pt[0], pt[1], localOptions);
} }
} }
} }
} }
function drawFaceBoxes(f: FaceResult, ctx) { function drawFaceBoxes(f: FaceResult, ctx) {
if (localOptions.drawBoxes) { if (opt.drawBoxes) {
rect(ctx, f.box[0], f.box[1], f.box[2], f.box[3], localOptions); rect(ctx, f.box[0], f.box[1], f.box[2], f.box[3], opt);
} }
} }
/** draw detected faces */ /** draw detected faces */
export function face(inCanvas: AnyCanvas, result: FaceResult[], drawOptions?: Partial<DrawOptions>) { export function face(inCanvas: AnyCanvas, result: FaceResult[], drawOptions?: Partial<DrawOptions>) {
localOptions = mergeDeep(options, drawOptions); opt = mergeDeep(options, drawOptions);
if (!result || !inCanvas) return; if (!result || !inCanvas) return;
const ctx = getCanvasContext(inCanvas) as CanvasRenderingContext2D; const ctx = getCanvasContext(inCanvas);
if (!ctx) return; if (!ctx) return;
ctx.font = localOptions.font; ctx.font = opt.font;
ctx.strokeStyle = localOptions.color; ctx.strokeStyle = opt.color;
ctx.fillStyle = localOptions.color; ctx.fillStyle = opt.color;
for (const f of result) { for (const f of result) {
drawFaceBoxes(f, ctx); drawFaceBoxes(f, ctx);
drawLabels(f, ctx); drawLabels(f, ctx);

View File

@ -1,5 +1,5 @@
import { mergeDeep } from '../util/util'; import { mergeDeep } from '../util/util';
import { getCanvasContext, replace, labels } from './primitives'; import { getCanvasContext } from './primitives';
import { options } from './options'; import { options } from './options';
import type { GestureResult } from '../result'; import type { GestureResult } from '../result';
import type { AnyCanvas, DrawOptions } from '../exports'; import type { AnyCanvas, DrawOptions } from '../exports';
@ -8,21 +8,25 @@ import type { AnyCanvas, DrawOptions } from '../exports';
export function gesture(inCanvas: AnyCanvas, result: GestureResult[], drawOptions?: Partial<DrawOptions>) { export function gesture(inCanvas: AnyCanvas, result: GestureResult[], drawOptions?: Partial<DrawOptions>) {
const localOptions: DrawOptions = mergeDeep(options, drawOptions); const localOptions: DrawOptions = mergeDeep(options, drawOptions);
if (!result || !inCanvas) return; if (!result || !inCanvas) return;
if (localOptions.drawGestures && (localOptions.gestureLabels?.length > 0)) { if (localOptions.drawGestures) {
const ctx = getCanvasContext(inCanvas) as CanvasRenderingContext2D; const ctx = getCanvasContext(inCanvas);
if (!ctx) return; if (!ctx) return;
ctx.font = localOptions.font; ctx.font = localOptions.font;
ctx.fillStyle = localOptions.color; ctx.fillStyle = localOptions.color;
let i = 1; let i = 1;
for (let j = 0; j < result.length; j++) { for (let j = 0; j < result.length; j++) {
const [where, what] = Object.entries(result[j]); let where: unknown[] = []; // what&where is a record
let what: unknown[] = []; // what&where is a record
[where, what] = Object.entries(result[j]);
if ((what.length > 1) && ((what[1] as string).length > 0)) { if ((what.length > 1) && ((what[1] as string).length > 0)) {
const who = where[1] as number > 0 ? `#${where[1]}` : ''; const who = where[1] as number > 0 ? `#${where[1]}` : '';
let l = localOptions.gestureLabels.slice(); const label = `${where[0]} ${who}: ${what[1]}`;
l = replace(l, '[where]', where[0]); if (localOptions.shadowColor && localOptions.shadowColor !== '') {
l = replace(l, '[who]', who); ctx.fillStyle = localOptions.shadowColor;
l = replace(l, '[what]', what[1]); ctx.fillText(label, 8, 2 + (i * localOptions.lineHeight));
labels(ctx, l, 8, 2 + (i * localOptions.lineHeight), localOptions); }
ctx.fillStyle = localOptions.labelColor;
ctx.fillText(label, 6, 0 + (i * localOptions.lineHeight));
i += 1; i += 1;
} }
} }

View File

@ -1,5 +1,5 @@
import { mergeDeep } from '../util/util'; import { mergeDeep } from '../util/util';
import { getCanvasContext, rect, point, colorDepth, replace, labels } from './primitives'; import { getCanvasContext, rect, point, colorDepth } from './primitives';
import { options } from './options'; import { options } from './options';
import type { HandResult } from '../result'; import type { HandResult } from '../result';
import type { AnyCanvas, DrawOptions, Point } from '../exports'; import type { AnyCanvas, DrawOptions, Point } from '../exports';
@ -8,7 +8,7 @@ import type { AnyCanvas, DrawOptions, Point } from '../exports';
export function hand(inCanvas: AnyCanvas, result: HandResult[], drawOptions?: Partial<DrawOptions>) { export function hand(inCanvas: AnyCanvas, result: HandResult[], drawOptions?: Partial<DrawOptions>) {
const localOptions: DrawOptions = mergeDeep(options, drawOptions); const localOptions: DrawOptions = mergeDeep(options, drawOptions);
if (!result || !inCanvas) return; if (!result || !inCanvas) return;
const ctx = getCanvasContext(inCanvas) as CanvasRenderingContext2D; const ctx = getCanvasContext(inCanvas);
if (!ctx) return; if (!ctx) return;
ctx.lineJoin = 'round'; ctx.lineJoin = 'round';
ctx.font = localOptions.font; ctx.font = localOptions.font;
@ -17,12 +17,13 @@ export function hand(inCanvas: AnyCanvas, result: HandResult[], drawOptions?: Pa
ctx.strokeStyle = localOptions.color; ctx.strokeStyle = localOptions.color;
ctx.fillStyle = localOptions.color; ctx.fillStyle = localOptions.color;
rect(ctx, h.box[0], h.box[1], h.box[2], h.box[3], localOptions); rect(ctx, h.box[0], h.box[1], h.box[2], h.box[3], localOptions);
if (localOptions.drawLabels && (localOptions.handLabels?.length > 0)) { if (localOptions.drawLabels) {
let l = localOptions.handLabels.slice(); if (localOptions.shadowColor && localOptions.shadowColor !== '') {
l = replace(l, '[id]', h.id.toFixed(0)); ctx.fillStyle = localOptions.shadowColor;
l = replace(l, '[label]', h.label); ctx.fillText(`hand:${Math.trunc(100 * h.score)}%`, h.box[0] + 3, 1 + h.box[1] + localOptions.lineHeight, h.box[2]); // can use h.label
l = replace(l, '[score]', 100 * h.score); }
labels(ctx, l, h.box[0], h.box[1], localOptions); ctx.fillStyle = localOptions.labelColor;
ctx.fillText(`hand:${Math.trunc(100 * h.score)}%`, h.box[0] + 2, 0 + h.box[1] + localOptions.lineHeight, h.box[2]); // can use h.label
} }
ctx.stroke(); ctx.stroke();
} }
@ -34,12 +35,20 @@ export function hand(inCanvas: AnyCanvas, result: HandResult[], drawOptions?: Pa
} }
} }
} }
if (localOptions.drawLabels && h.annotations && (localOptions.fingerLabels?.length > 0)) { if (localOptions.drawLabels && h.annotations) {
for (const [part, pt] of Object.entries(h.annotations)) { const addHandLabel = (part: Point[], title: string) => {
let l = localOptions.fingerLabels.slice(); if (!part || part.length === 0 || !part[0]) return;
l = replace(l, '[label]', part); const z = part[part.length - 1][2] || -256;
labels(ctx, l, pt[pt.length - 1][0], pt[pt.length - 1][1], localOptions); ctx.fillStyle = colorDepth(z, localOptions);
} ctx.fillText(title, part[part.length - 1][0] + 4, part[part.length - 1][1] + 4);
};
ctx.font = localOptions.font;
addHandLabel(h.annotations.index, 'index');
addHandLabel(h.annotations.middle, 'middle');
addHandLabel(h.annotations.ring, 'ring');
addHandLabel(h.annotations.pinky, 'pinky');
addHandLabel(h.annotations.thumb, 'thumb');
addHandLabel(h.annotations.palm, 'palm');
} }
if (localOptions.drawPolygons && h.annotations) { if (localOptions.drawPolygons && h.annotations) {
const addHandLine = (part: Point[]) => { const addHandLine = (part: Point[]) => {

View File

@ -1,18 +0,0 @@
export const defaultLabels = {
face: `face
confidence: [score]%
[gender] [genderScore]%
age: [age] years
distance: [distance]cm
real: [real]%
live: [live]%
[emotions]
roll: [roll]° yaw:[yaw]° pitch:[pitch]°
gaze: [gaze]°`,
body: 'body [score]%',
bodyPart: '[label] [score]%',
object: '[label] [score]%',
hand: '[label] [score]%',
finger: '[label]',
gesture: '[where] [who]: [what]',
};

View File

@ -1,5 +1,5 @@
import { mergeDeep } from '../util/util'; import { mergeDeep } from '../util/util';
import { getCanvasContext, rect, replace, labels } from './primitives'; import { getCanvasContext, rect } from './primitives';
import { options } from './options'; import { options } from './options';
import type { ObjectResult } from '../result'; import type { ObjectResult } from '../result';
import type { AnyCanvas, DrawOptions } from '../exports'; import type { AnyCanvas, DrawOptions } from '../exports';
@ -8,7 +8,7 @@ import type { AnyCanvas, DrawOptions } from '../exports';
export function object(inCanvas: AnyCanvas, result: ObjectResult[], drawOptions?: Partial<DrawOptions>) { export function object(inCanvas: AnyCanvas, result: ObjectResult[], drawOptions?: Partial<DrawOptions>) {
const localOptions: DrawOptions = mergeDeep(options, drawOptions); const localOptions: DrawOptions = mergeDeep(options, drawOptions);
if (!result || !inCanvas) return; if (!result || !inCanvas) return;
const ctx = getCanvasContext(inCanvas) as CanvasRenderingContext2D; const ctx = getCanvasContext(inCanvas);
if (!ctx) return; if (!ctx) return;
ctx.lineJoin = 'round'; ctx.lineJoin = 'round';
ctx.font = localOptions.font; ctx.font = localOptions.font;
@ -17,12 +17,14 @@ export function object(inCanvas: AnyCanvas, result: ObjectResult[], drawOptions?
ctx.strokeStyle = localOptions.color; ctx.strokeStyle = localOptions.color;
ctx.fillStyle = localOptions.color; ctx.fillStyle = localOptions.color;
rect(ctx, h.box[0], h.box[1], h.box[2], h.box[3], localOptions); rect(ctx, h.box[0], h.box[1], h.box[2], h.box[3], localOptions);
if (localOptions.drawLabels && (localOptions.objectLabels?.length > 0)) { if (localOptions.drawLabels) {
let l = localOptions.objectLabels.slice(); const label = `${h.label} ${Math.round(100 * h.score)}%`;
l = replace(l, '[id]', h.id.toFixed(0)); if (localOptions.shadowColor && localOptions.shadowColor !== '') {
l = replace(l, '[label]', h.label); ctx.fillStyle = localOptions.shadowColor;
l = replace(l, '[score]', 100 * h.score); ctx.fillText(label, h.box[0] + 3, 1 + h.box[1] + localOptions.lineHeight, h.box[2]);
labels(ctx, l, h.box[0], h.box[1], localOptions); }
ctx.fillStyle = localOptions.labelColor;
ctx.fillText(label, h.box[0] + 2, 0 + h.box[1] + localOptions.lineHeight, h.box[2]);
} }
ctx.stroke(); ctx.stroke();
} }

View File

@ -1,7 +1,6 @@
/** Draw Options /** Draw Options
* - Accessed via `human.draw.options` or provided per each draw method as the drawOptions optional parameter * - Accessed via `human.draw.options` or provided per each draw method as the drawOptions optional parameter
*/ */
export interface DrawOptions { export interface DrawOptions {
/** draw line color */ /** draw line color */
color: string, color: string,
@ -41,20 +40,6 @@ export interface DrawOptions {
useDepth: boolean, useDepth: boolean,
/** should lines be curved? */ /** should lines be curved? */
useCurves: boolean, useCurves: boolean,
/** string template for face labels */
faceLabels: string,
/** string template for body labels */
bodyLabels: string,
/** string template for body part labels */
bodyPartLabels: string,
/** string template for hand labels */
handLabels: string,
/** string template for hand labels */
fingerLabels: string,
/** string template for object labels */
objectLabels: string,
/** string template for gesture labels */
gestureLabels: string,
} }
/** currently set draw options {@link DrawOptions} */ /** currently set draw options {@link DrawOptions} */
@ -78,11 +63,4 @@ export const options: DrawOptions = {
fillPolygons: false as boolean, fillPolygons: false as boolean,
useDepth: true as boolean, useDepth: true as boolean,
useCurves: false as boolean, useCurves: false as boolean,
faceLabels: '' as string,
bodyLabels: '' as string,
bodyPartLabels: '' as string,
objectLabels: '' as string,
handLabels: '' as string,
fingerLabels: '' as string,
gestureLabels: '' as string,
}; };

View File

@ -7,7 +7,7 @@ export const getCanvasContext = (input: AnyCanvas) => {
if (!input) log('draw error: invalid canvas'); if (!input) log('draw error: invalid canvas');
else if (!input.getContext) log('draw error: canvas context not defined'); else if (!input.getContext) log('draw error: canvas context not defined');
else { else {
const ctx = input.getContext('2d', { willReadFrequently: true }); const ctx = input.getContext('2d');
if (!ctx) log('draw error: cannot get canvas context'); if (!ctx) log('draw error: cannot get canvas context');
else return ctx; else return ctx;
} }
@ -16,28 +16,12 @@ export const getCanvasContext = (input: AnyCanvas) => {
export const rad2deg = (theta: number) => Math.round((theta * 180) / Math.PI); export const rad2deg = (theta: number) => Math.round((theta * 180) / Math.PI);
export const replace = (str: string, source: string, target: string | number) => str.replace(source, typeof target === 'number' ? target.toFixed(1) : target);
export const colorDepth = (z: number | undefined, opt: DrawOptions): string => { // performance optimization needed export const colorDepth = (z: number | undefined, opt: DrawOptions): string => { // performance optimization needed
if (!opt.useDepth || typeof z === 'undefined') return opt.color; if (!opt.useDepth || typeof z === 'undefined') return opt.color;
const rgb = Uint8ClampedArray.from([127 + (2 * z), 127 - (2 * z), 255]); const rgb = Uint8ClampedArray.from([127 + (2 * z), 127 - (2 * z), 255]);
return `rgba(${rgb[0]}, ${rgb[1]}, ${rgb[2]}, ${opt.alpha})`; return `rgba(${rgb[0]}, ${rgb[1]}, ${rgb[2]}, ${opt.alpha})`;
}; };
export function labels(ctx: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D, str: string, startX: number, startY: number, localOptions: DrawOptions) {
const line: string[] = str.replace(/\[.*\]/g, '').split('\n').map((l) => l.trim()); // remove unmatched templates and split into array
const x = Math.max(0, startX);
for (let i = line.length - 1; i >= 0; i--) {
const y = i * localOptions.lineHeight + startY;
if (localOptions.shadowColor && localOptions.shadowColor !== '') {
ctx.fillStyle = localOptions.shadowColor;
ctx.fillText(line[i], x + 5, y + 16);
}
ctx.fillStyle = localOptions.labelColor;
ctx.fillText(line[i], x + 4, y + 15);
}
}
export function point(ctx: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D, x: number, y: number, z: number | undefined, localOptions: DrawOptions) { export function point(ctx: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D, x: number, y: number, z: number | undefined, localOptions: DrawOptions) {
ctx.fillStyle = colorDepth(z, localOptions); ctx.fillStyle = colorDepth(z, localOptions);
ctx.beginPath(); ctx.beginPath();

View File

@ -6,10 +6,8 @@ export * from './config';
/* Export results details */ /* Export results details */
export * from './result'; export * from './result';
/** /* Explict reexport of main @tensorflow/tfjs types */
* Explict reexport of main @tensorflow/tfjs types export type { Tensor, TensorLike, GraphModel, Rank } from './tfjs/types';
*/
export type { Tensor, Tensor1D, Tensor2D, Tensor3D, Tensor4D, TensorLike, GraphModel, Rank } from './tfjs/types';
// re-export types // re-export types
export type { DrawOptions } from './draw/options'; export type { DrawOptions } from './draw/options';
@ -21,6 +19,8 @@ export type { WebCam, WebCamConfig } from './util/webcam';
// export type { Models, ModelStats, KernelOps } from './models'; // export type { Models, ModelStats, KernelOps } from './models';
export type { ModelInfo } from './tfjs/load'; export type { ModelInfo } from './tfjs/load';
// define enum types
/** Events dispatched by `human.events` /** Events dispatched by `human.events`
* - `create`: triggered when Human object is instantiated * - `create`: triggered when Human object is instantiated
* - `load`: triggered when models are loaded (explicitly or on-demand) * - `load`: triggered when models are loaded (explicitly or on-demand)

View File

@ -60,9 +60,7 @@ export const calculateFaceAngle = (face: FaceResult, imageSize: [number, number]
let thetaZ: number; let thetaZ: number;
if (r10 < 1) { // YZX calculation if (r10 < 1) { // YZX calculation
if (r10 > -1) { if (r10 > -1) {
// thetaZ = Math.asin(r10); thetaZ = Math.asin(r10);
const cosThetaZ = Math.sqrt(r00 * r00 + r20 * r20); // <https://github.com/vladmandic/human/issues/464>
thetaZ = Math.atan2(r10, cosThetaZ);
thetaY = Math.atan2(-r20, r00); thetaY = Math.atan2(-r20, r00);
thetaX = Math.atan2(-r12, r11); thetaX = Math.atan2(-r12, r11);
} else { } else {
@ -78,9 +76,9 @@ export const calculateFaceAngle = (face: FaceResult, imageSize: [number, number]
if (Number.isNaN(thetaX)) thetaX = 0; if (Number.isNaN(thetaX)) thetaX = 0;
if (Number.isNaN(thetaY)) thetaY = 0; if (Number.isNaN(thetaY)) thetaY = 0;
if (Number.isNaN(thetaZ)) thetaZ = 0; if (Number.isNaN(thetaZ)) thetaZ = 0;
// return { pitch: 2 * -thetaX, yaw: 2 * -thetaY, roll: 2 * -thetaZ }; return { pitch: 2 * -thetaX, yaw: 2 * -thetaY, roll: 2 * -thetaZ };
return { pitch: -thetaX, yaw: -thetaY, roll: -thetaZ };
}; };
/* /*
const meshToEulerAngle = (mesh) => { // simple Euler angle calculation based existing 3D mesh const meshToEulerAngle = (mesh) => { // simple Euler angle calculation based existing 3D mesh
const radians = (a1, a2, b1, b2) => Math.atan2(b2 - a2, b1 - a1); const radians = (a1, a2, b1, b2) => Math.atan2(b2 - a2, b1 - a1);

View File

@ -1,28 +0,0 @@
import type { FaceResult } from '../result';
export function calculateCameraDistance(face: FaceResult, width: number): number {
// iris points are [center, left, top, right, bottom]
// average size of human iris is 11.7mm - fairly constant for all ages/genders/races
const f = face?.annotations;
if (!f?.leftEyeIris || !f?.rightEyeIris) return 0;
// get size of left and right iris in pixels, pick larger one as its likely to be more accurate and normalize to 0..1 range instead of pixels
const irisSize = Math.max(Math.abs(f.leftEyeIris[3][0] - f.leftEyeIris[1][0]), Math.abs(f.rightEyeIris[3][0] - f.rightEyeIris[1][0])) / width;
// distance of eye from camera in meters
const cameraDistance = Math.round(1.17 / irisSize) / 100;
return cameraDistance;
}
export function calculateEyesDistance(face: FaceResult, width: number): number {
// average distance between eyes is 65mm - fairly constant for typical adult male, but varies otherwise
const f = face?.annotations;
if (!f?.leftEyeIris || !f?.rightEyeIris) return 0;
// get size of left and right iris in pixels, pick larger one as its likely to be more accurate and normalize to 0..1 range instead of pixels
const irisSize = Math.max(Math.abs(f.leftEyeIris[3][0] - f.leftEyeIris[1][0]), Math.abs(f.rightEyeIris[3][0] - f.rightEyeIris[1][0])) / width;
// pixel x and y distance of centers of left and right iris, you can use edges instead
const irisDistanceXY = [f.leftEyeIris[0][0] - f.rightEyeIris[0][0], f.leftEyeIris[0][1] - f.rightEyeIris[0][1]];
// absolute distance bewtween eyes in 0..1 range to account for head pitch (we can ignore yaw)
const irisDistance = Math.sqrt((irisDistanceXY[0] * irisDistanceXY[0]) + (irisDistanceXY[1] * irisDistanceXY[1])) / width;
// distance between eyes in meters
const eyesDistance = Math.round(1.17 * irisDistance / irisSize) / 100;
return eyesDistance;
}

View File

@ -2,10 +2,10 @@
* Anti-spoofing model implementation * Anti-spoofing model implementation
*/ */
import * as tf from 'dist/tfjs.esm.js';
import { log, now } from '../util/util'; import { log, now } from '../util/util';
import type { Config } from '../config'; import type { Config } from '../config';
import type { GraphModel, Tensor, Tensor4D } from '../tfjs/types'; import type { GraphModel, Tensor } from '../tfjs/types';
import * as tf from '../../dist/tfjs.esm.js';
import { loadModel } from '../tfjs/load'; import { loadModel } from '../tfjs/load';
import { env } from '../util/env'; import { env } from '../util/env';
@ -22,8 +22,8 @@ export async function load(config: Config): Promise<GraphModel> {
return model; return model;
} }
export async function predict(image: Tensor4D, config: Config, idx: number, count: number): Promise<number> { export async function predict(image: Tensor, config: Config, idx: number, count: number): Promise<number> {
if (!model?.['executor']) return 0; if (!model || !model?.['executor']) return 0;
const skipTime = (config.face.antispoof?.skipTime || 0) > (now() - lastTime); const skipTime = (config.face.antispoof?.skipTime || 0) > (now() - lastTime);
const skipFrame = skipped < (config.face.antispoof?.skipFrames || 0); const skipFrame = skipped < (config.face.antispoof?.skipFrames || 0);
if (config.skipAllowed && skipTime && skipFrame && (lastCount === count) && cached[idx]) { if (config.skipAllowed && skipTime && skipFrame && (lastCount === count) && cached[idx]) {

View File

@ -3,23 +3,24 @@
* See `facemesh.ts` for entry point * See `facemesh.ts` for entry point
*/ */
import * as tf from 'dist/tfjs.esm.js';
import { log } from '../util/util'; import { log } from '../util/util';
import * as tf from '../../dist/tfjs.esm.js';
import * as util from './facemeshutil'; import * as util from './facemeshutil';
import { loadModel } from '../tfjs/load'; import { loadModel } from '../tfjs/load';
import { constants } from '../tfjs/constants'; import { constants } from '../tfjs/constants';
import type { Config } from '../config'; import type { Config } from '../config';
import type { Tensor, GraphModel, Tensor1D, Tensor2D, Tensor4D } from '../tfjs/types'; import type { Tensor, GraphModel } from '../tfjs/types';
import { env } from '../util/env'; import { env } from '../util/env';
import type { Point } from '../result'; import type { Point } from '../result';
const keypointsCount = 6; const keypointsCount = 6;
const faceBoxScaleFactor = 1.4;
let model: GraphModel | null; let model: GraphModel | null;
let anchors: Tensor | null = null; let anchors: Tensor | null = null;
let inputSize = 0; let inputSize = 0;
let inputSizeT: Tensor | null = null; let inputSizeT: Tensor | null = null;
export interface DetectBox { startPoint: Point, endPoint: Point, landmarks: Point[], confidence: number, size: [number, number] } interface DetectBox { startPoint: Point, endPoint: Point, landmarks: Point[], confidence: number }
export const size = () => inputSize; export const size = () => inputSize;
@ -34,7 +35,6 @@ export async function load(config: Config): Promise<GraphModel> {
} }
function decodeBoxes(boxOutputs: Tensor) { function decodeBoxes(boxOutputs: Tensor) {
if (!anchors || !inputSizeT) return tf.zeros([0, 0]);
const t: Record<string, Tensor> = {}; const t: Record<string, Tensor> = {};
t.boxStarts = tf.slice(boxOutputs, [0, 1], [-1, 2]); t.boxStarts = tf.slice(boxOutputs, [0, 1], [-1, 2]);
t.centers = tf.add(t.boxStarts, anchors); t.centers = tf.add(t.boxStarts, anchors);
@ -46,36 +46,25 @@ function decodeBoxes(boxOutputs: Tensor) {
t.ends = tf.add(t.centersNormalized, t.halfBoxSize); t.ends = tf.add(t.centersNormalized, t.halfBoxSize);
t.startNormalized = tf.mul(t.starts, inputSizeT); t.startNormalized = tf.mul(t.starts, inputSizeT);
t.endNormalized = tf.mul(t.ends, inputSizeT); t.endNormalized = tf.mul(t.ends, inputSizeT);
const boxes = tf.concat2d([t.startNormalized as Tensor2D, t.endNormalized as Tensor2D], 1); const boxes = tf.concat2d([t.startNormalized, t.endNormalized], 1);
Object.keys(t).forEach((tensor) => tf.dispose(t[tensor])); Object.keys(t).forEach((tensor) => tf.dispose(t[tensor]));
return boxes; return boxes;
} }
export async function getBoxes(inputImage: Tensor4D, config: Config): Promise<DetectBox[]> { export async function getBoxes(inputImage: Tensor, config: Config) {
// sanity check on input // sanity check on input
if ((!inputImage) || (inputImage['isDisposedInternal']) || (inputImage.shape.length !== 4) || (inputImage.shape[1] < 1) || (inputImage.shape[2] < 1)) return []; if ((!inputImage) || (inputImage['isDisposedInternal']) || (inputImage.shape.length !== 4) || (inputImage.shape[1] < 1) || (inputImage.shape[2] < 1)) return [];
const t: Record<string, Tensor> = {}; const t: Record<string, Tensor> = {};
let pad = [0, 0]; t.resized = tf.image.resizeBilinear(inputImage, [inputSize, inputSize]);
let scale = [1, 1];
if (config?.face?.detector?.square) {
const xy = Math.max(inputImage.shape[2], inputImage.shape[1]);
pad = [Math.floor((xy - inputImage.shape[2]) / 2), Math.floor((xy - inputImage.shape[1]) / 2)];
t.padded = tf.pad(inputImage, [[0, 0], [pad[1], pad[1]], [pad[0], pad[0]], [0, 0]]);
scale = [inputImage.shape[2] / xy, inputImage.shape[1] / xy];
pad = [pad[0] / inputSize, pad[1] / inputSize];
} else {
t.padded = inputImage.clone();
}
t.resized = tf.image.resizeBilinear(t.padded as Tensor4D, [inputSize, inputSize]);
t.div = tf.div(t.resized, constants.tf127); t.div = tf.div(t.resized, constants.tf127);
t.normalized = tf.sub(t.div, constants.tf1); t.normalized = tf.sub(t.div, constants.tf05);
const res = model?.execute(t.normalized) as Tensor[]; const res = model?.execute(t.normalized) as Tensor[];
if (Array.isArray(res) && res.length > 2) { // pinto converted model? if (Array.isArray(res) && res.length > 2) { // pinto converted model?
const sorted = res.sort((a, b) => a.size - b.size); const sorted = res.sort((a, b) => a.size - b.size);
t.concat384 = tf.concat([sorted[0], sorted[2]], 2); // dim: 384, 1 + 16 t.concat384 = tf.concat([sorted[0], sorted[2]], 2); // dim: 384, 1 + 16
t.concat512 = tf.concat([sorted[1], sorted[3]], 2); // dim: 512, 1 + 16 t.concat512 = tf.concat([sorted[1], sorted[3]], 2); // dim: 512, 1 + 16
t.concat = tf.concat([t.concat512, t.concat384], 1); t.concat = tf.concat([t.concat512, t.concat384], 1);
t.batch = tf.squeeze(t.concat, [0]); t.batch = tf.squeeze(t.concat, 0);
} else if (Array.isArray(res)) { // new facemesh-detection tfhub model } else if (Array.isArray(res)) { // new facemesh-detection tfhub model
t.batch = tf.squeeze(res[0]); t.batch = tf.squeeze(res[0]);
} else { // original blazeface tfhub model } else { // original blazeface tfhub model
@ -86,7 +75,7 @@ export async function getBoxes(inputImage: Tensor4D, config: Config): Promise<De
t.logits = tf.slice(t.batch, [0, 0], [-1, 1]); t.logits = tf.slice(t.batch, [0, 0], [-1, 1]);
t.sigmoid = tf.sigmoid(t.logits); t.sigmoid = tf.sigmoid(t.logits);
t.scores = tf.squeeze(t.sigmoid); t.scores = tf.squeeze(t.sigmoid);
t.nms = await tf.image.nonMaxSuppressionAsync(t.boxes as Tensor2D, t.scores as Tensor1D, (config.face.detector?.maxDetected || 0), (config.face.detector?.iouThreshold || 0), (config.face.detector?.minConfidence || 0)); t.nms = await tf.image.nonMaxSuppressionAsync(t.boxes, t.scores, (config.face.detector?.maxDetected || 0), (config.face.detector?.iouThreshold || 0), (config.face.detector?.minConfidence || 0));
const nms = await t.nms.array() as number[]; const nms = await t.nms.array() as number[];
const boxes: DetectBox[] = []; const boxes: DetectBox[] = [];
const scores = await t.scores.data(); const scores = await t.scores.data();
@ -99,24 +88,16 @@ export async function getBoxes(inputImage: Tensor4D, config: Config): Promise<De
b.squeeze = tf.squeeze(b.slice); b.squeeze = tf.squeeze(b.slice);
b.landmarks = tf.reshape(b.squeeze, [keypointsCount, -1]); b.landmarks = tf.reshape(b.squeeze, [keypointsCount, -1]);
const points = await b.bbox.data(); const points = await b.bbox.data();
const unpadded = [ // TODO fix this math
points[0] * scale[0] - pad[0],
points[1] * scale[1] - pad[1],
points[2] * scale[0] - pad[0],
points[3] * scale[1] - pad[1],
];
const rawBox = { const rawBox = {
startPoint: [unpadded[0], unpadded[1]] as Point, startPoint: [points[0], points[1]] as Point,
endPoint: [unpadded[2], unpadded[3]] as Point, endPoint: [points[2], points[3]] as Point,
landmarks: (await b.landmarks.array()) as Point[], landmarks: (await b.landmarks.array()) as Point[],
confidence, confidence,
}; };
b.anchor = tf.slice(anchors as Tensor, [nms[i], 0], [1, 2]); const scaledBox = util.scaleBoxCoordinates(rawBox, [(inputImage.shape[2] || 0) / inputSize, (inputImage.shape[1] || 0) / inputSize]);
const anchor = await b.anchor.data(); const enlargedBox = util.enlargeBox(scaledBox, config.face['scale'] || faceBoxScaleFactor);
const scaledBox = util.scaleBoxCoordinates(rawBox, [(inputImage.shape[2] || 0) / inputSize, (inputImage.shape[1] || 0) / inputSize], anchor);
const enlargedBox = util.enlargeBox(scaledBox, config.face.detector?.scale || 1.4);
const squaredBox = util.squarifyBox(enlargedBox); const squaredBox = util.squarifyBox(enlargedBox);
if (squaredBox.size[0] > (config.face.detector?.['minSize'] || 0) && squaredBox.size[1] > (config.face.detector?.['minSize'] || 0)) boxes.push(squaredBox); boxes.push(squaredBox);
Object.keys(b).forEach((tensor) => tf.dispose(b[tensor])); Object.keys(b).forEach((tensor) => tf.dispose(b[tensor]));
} }
} }

View File

@ -3,9 +3,9 @@
* Uses FaceMesh, Emotion and FaceRes models to create a unified pipeline * Uses FaceMesh, Emotion and FaceRes models to create a unified pipeline
*/ */
import * as tf from 'dist/tfjs.esm.js';
import { log, now } from '../util/util'; import { log, now } from '../util/util';
import { env } from '../util/env'; import { env } from '../util/env';
import * as tf from '../../dist/tfjs.esm.js';
import * as facemesh from './facemesh'; import * as facemesh from './facemesh';
import * as emotion from '../gear/emotion'; import * as emotion from '../gear/emotion';
import * as faceres from './faceres'; import * as faceres from './faceres';
@ -18,14 +18,13 @@ import * as ssrnetGender from '../gear/ssrnet-gender';
import * as mobilefacenet from './mobilefacenet'; import * as mobilefacenet from './mobilefacenet';
import * as insightface from './insightface'; import * as insightface from './insightface';
import type { FaceResult, Emotion, Gender, Race } from '../result'; import type { FaceResult, Emotion, Gender, Race } from '../result';
import type { Tensor4D } from '../tfjs/types'; import type { Tensor } from '../tfjs/types';
import type { Human } from '../human'; import type { Human } from '../human';
import { calculateFaceAngle } from './angles'; import { calculateFaceAngle } from './angles';
import { calculateCameraDistance } from './anthropometry';
interface DescRes { age: number, gender: Gender, genderScore: number, descriptor: number[], race?: { score: number, race: Race }[] } interface DescRes { age: number, gender: Gender, genderScore: number, descriptor: number[], race?: { score: number, race: Race }[] }
export const detectFace = async (instance: Human /* instance of human */, input: Tensor4D): Promise<FaceResult[]> => { export const detectFace = async (instance: Human /* instance of human */, input: Tensor): Promise<FaceResult[]> => {
// run facemesh, includes blazeface and iris // run facemesh, includes blazeface and iris
let timeStamp: number = now(); let timeStamp: number = now();
let ageRes: { age: number } | Promise<{ age: number }> | null; let ageRes: { age: number } | Promise<{ age: number }> | null;
@ -40,7 +39,8 @@ export const detectFace = async (instance: Human /* instance of human */, input:
const faceRes: FaceResult[] = []; const faceRes: FaceResult[] = [];
instance.state = 'run:face'; instance.state = 'run:face';
const faces: FaceResult[] = await facemesh.predict(input, instance.config);
const faces = await facemesh.predict(input, instance.config);
instance.performance.face = env.perfadd ? (instance.performance.face || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp); instance.performance.face = env.perfadd ? (instance.performance.face || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
if (!input.shape || input.shape.length !== 4) return []; if (!input.shape || input.shape.length !== 4) return [];
if (!faces) return []; if (!faces) return [];
@ -68,11 +68,11 @@ export const detectFace = async (instance: Human /* instance of human */, input:
// run emotion, inherits face from blazeface // run emotion, inherits face from blazeface
instance.analyze('Start Emotion:'); instance.analyze('Start Emotion:');
if (instance.config.async) { if (instance.config.async) {
emotionRes = instance.config.face.emotion?.enabled ? emotion.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : []; emotionRes = instance.config.face.emotion?.enabled ? emotion.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : [];
} else { } else {
instance.state = 'run:emotion'; instance.state = 'run:emotion';
timeStamp = now(); timeStamp = now();
emotionRes = instance.config.face.emotion?.enabled ? await emotion.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : []; emotionRes = instance.config.face.emotion?.enabled ? await emotion.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : [];
instance.performance.emotion = env.perfadd ? (instance.performance.emotion || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp); instance.performance.emotion = env.perfadd ? (instance.performance.emotion || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
} }
instance.analyze('End Emotion:'); instance.analyze('End Emotion:');
@ -80,11 +80,11 @@ export const detectFace = async (instance: Human /* instance of human */, input:
// run antispoof, inherits face from blazeface // run antispoof, inherits face from blazeface
instance.analyze('Start AntiSpoof:'); instance.analyze('Start AntiSpoof:');
if (instance.config.async) { if (instance.config.async) {
antispoofRes = instance.config.face.antispoof?.enabled ? antispoof.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : 0; antispoofRes = instance.config.face.antispoof?.enabled ? antispoof.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : 0;
} else { } else {
instance.state = 'run:antispoof'; instance.state = 'run:antispoof';
timeStamp = now(); timeStamp = now();
antispoofRes = instance.config.face.antispoof?.enabled ? await antispoof.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : 0; antispoofRes = instance.config.face.antispoof?.enabled ? await antispoof.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : 0;
instance.performance.antispoof = env.perfadd ? (instance.performance.antispoof || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp); instance.performance.antispoof = env.perfadd ? (instance.performance.antispoof || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
} }
instance.analyze('End AntiSpoof:'); instance.analyze('End AntiSpoof:');
@ -92,11 +92,11 @@ export const detectFace = async (instance: Human /* instance of human */, input:
// run liveness, inherits face from blazeface // run liveness, inherits face from blazeface
instance.analyze('Start Liveness:'); instance.analyze('Start Liveness:');
if (instance.config.async) { if (instance.config.async) {
livenessRes = instance.config.face.liveness?.enabled ? liveness.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : 0; livenessRes = instance.config.face.liveness?.enabled ? liveness.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : 0;
} else { } else {
instance.state = 'run:liveness'; instance.state = 'run:liveness';
timeStamp = now(); timeStamp = now();
livenessRes = instance.config.face.liveness?.enabled ? await liveness.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : 0; livenessRes = instance.config.face.liveness?.enabled ? await liveness.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : 0;
instance.performance.liveness = env.perfadd ? (instance.performance.antispoof || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp); instance.performance.liveness = env.perfadd ? (instance.performance.antispoof || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
} }
instance.analyze('End Liveness:'); instance.analyze('End Liveness:');
@ -104,25 +104,25 @@ export const detectFace = async (instance: Human /* instance of human */, input:
// run gear, inherits face from blazeface // run gear, inherits face from blazeface
instance.analyze('Start GEAR:'); instance.analyze('Start GEAR:');
if (instance.config.async) { if (instance.config.async) {
gearRes = instance.config.face.gear?.enabled ? gear.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : null; gearRes = instance.config.face.gear?.enabled ? gear.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : null;
} else { } else {
instance.state = 'run:gear'; instance.state = 'run:gear';
timeStamp = now(); timeStamp = now();
gearRes = instance.config.face.gear?.enabled ? await gear.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : null; gearRes = instance.config.face.gear?.enabled ? await gear.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : null;
instance.performance.gear = Math.trunc(now() - timeStamp); instance.performance.gear = Math.trunc(now() - timeStamp);
} }
instance.analyze('End GEAR:'); instance.analyze('End GEAR:');
// run ssrnet, inherits face from blazeface // run gear, inherits face from blazeface
instance.analyze('Start SSRNet:'); instance.analyze('Start SSRNet:');
if (instance.config.async) { if (instance.config.async) {
ageRes = instance.config.face['ssrnet']?.enabled ? ssrnetAge.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : null; ageRes = instance.config.face['ssrnet']?.enabled ? ssrnetAge.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : null;
genderRes = instance.config.face['ssrnet']?.enabled ? ssrnetGender.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : null; genderRes = instance.config.face['ssrnet']?.enabled ? ssrnetGender.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : null;
} else { } else {
instance.state = 'run:ssrnet'; instance.state = 'run:ssrnet';
timeStamp = now(); timeStamp = now();
ageRes = instance.config.face['ssrnet']?.enabled ? await ssrnetAge.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : null; ageRes = instance.config.face['ssrnet']?.enabled ? await ssrnetAge.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : null;
genderRes = instance.config.face['ssrnet']?.enabled ? await ssrnetGender.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : null; genderRes = instance.config.face['ssrnet']?.enabled ? await ssrnetGender.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : null;
instance.performance.ssrnet = Math.trunc(now() - timeStamp); instance.performance.ssrnet = Math.trunc(now() - timeStamp);
} }
instance.analyze('End SSRNet:'); instance.analyze('End SSRNet:');
@ -130,11 +130,11 @@ export const detectFace = async (instance: Human /* instance of human */, input:
// run mobilefacenet alternative, inherits face from blazeface // run mobilefacenet alternative, inherits face from blazeface
instance.analyze('Start MobileFaceNet:'); instance.analyze('Start MobileFaceNet:');
if (instance.config.async) { if (instance.config.async) {
mobilefacenetRes = instance.config.face['mobilefacenet']?.enabled ? mobilefacenet.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : null; mobilefacenetRes = instance.config.face['mobilefacenet']?.enabled ? mobilefacenet.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : null;
} else { } else {
instance.state = 'run:mobilefacenet'; instance.state = 'run:mobilefacenet';
timeStamp = now(); timeStamp = now();
mobilefacenetRes = instance.config.face['mobilefacenet']?.enabled ? await mobilefacenet.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : null; mobilefacenetRes = instance.config.face['mobilefacenet']?.enabled ? await mobilefacenet.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : null;
instance.performance.mobilefacenet = Math.trunc(now() - timeStamp); instance.performance.mobilefacenet = Math.trunc(now() - timeStamp);
} }
instance.analyze('End MobileFaceNet:'); instance.analyze('End MobileFaceNet:');
@ -142,11 +142,11 @@ export const detectFace = async (instance: Human /* instance of human */, input:
// run insightface alternative, inherits face from blazeface // run insightface alternative, inherits face from blazeface
instance.analyze('Start InsightFace:'); instance.analyze('Start InsightFace:');
if (instance.config.async) { if (instance.config.async) {
insightfaceRes = instance.config.face['insightface']?.enabled ? insightface.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : null; insightfaceRes = instance.config.face['insightface']?.enabled ? insightface.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : null;
} else { } else {
instance.state = 'run:mobilefacenet'; instance.state = 'run:mobilefacenet';
timeStamp = now(); timeStamp = now();
insightfaceRes = instance.config.face['insightface']?.enabled ? await insightface.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length) : null; insightfaceRes = instance.config.face['insightface']?.enabled ? await insightface.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : null;
instance.performance.mobilefacenet = Math.trunc(now() - timeStamp); instance.performance.mobilefacenet = Math.trunc(now() - timeStamp);
} }
instance.analyze('End InsightFace:'); instance.analyze('End InsightFace:');
@ -154,11 +154,11 @@ export const detectFace = async (instance: Human /* instance of human */, input:
// run faceres, inherits face from blazeface // run faceres, inherits face from blazeface
instance.analyze('Start Description:'); instance.analyze('Start Description:');
if (instance.config.async) { if (instance.config.async) {
descRes = faceres.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length); descRes = faceres.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length);
} else { } else {
instance.state = 'run:description'; instance.state = 'run:description';
timeStamp = now(); timeStamp = now();
descRes = await faceres.predict(faces[i].tensor as Tensor4D || tf.tensor([]), instance.config, i, faces.length); descRes = await faceres.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length);
instance.performance.description = env.perfadd ? (instance.performance.description || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp); instance.performance.description = env.perfadd ? (instance.performance.description || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
} }
instance.analyze('End Description:'); instance.analyze('End Description:');
@ -194,10 +194,20 @@ export const detectFace = async (instance: Human /* instance of human */, input:
(descRes as DescRes).descriptor = insightfaceRes as number[]; (descRes as DescRes).descriptor = insightfaceRes as number[];
} }
const irisSize = instance.config.face.iris?.enabled ? calculateCameraDistance(faces[i], input.shape[2]) : 0; // calculate iris distance
// iris: array[ center, left, top, right, bottom]
if (!instance.config.face.iris?.enabled) {
// if (faces[i]?.annotations?.leftEyeIris) delete faces[i].annotations.leftEyeIris;
// if (faces[i]?.annotations?.rightEyeIris) delete faces[i].annotations.rightEyeIris;
}
const irisSize = (faces[i]?.annotations?.leftEyeIris?.[0] && faces[i]?.annotations?.rightEyeIris?.[0]
&& (faces[i].annotations.leftEyeIris.length > 0) && (faces[i].annotations.rightEyeIris.length > 0)
&& (faces[i].annotations.leftEyeIris[0] !== null) && (faces[i].annotations.rightEyeIris[0] !== null))
? Math.max(Math.abs(faces[i].annotations.leftEyeIris[3][0] - faces[i].annotations.leftEyeIris[1][0]), Math.abs(faces[i].annotations.rightEyeIris[4][1] - faces[i].annotations.rightEyeIris[2][1])) / input.shape[2]
: 0; // note: average human iris size is 11.7mm
// optionally return tensor // optionally return tensor
const tensor = instance.config.face.detector?.return ? tf.squeeze(faces[i].tensor as Tensor4D) : null; const tensor = instance.config.face.detector?.return ? tf.squeeze(faces[i].tensor) : null;
// dispose original face tensor // dispose original face tensor
tf.dispose(faces[i].tensor); tf.dispose(faces[i].tensor);
// delete temp face image // delete temp face image
@ -215,7 +225,7 @@ export const detectFace = async (instance: Human /* instance of human */, input:
if (emotionRes) res.emotion = emotionRes as { score: number, emotion: Emotion }[]; if (emotionRes) res.emotion = emotionRes as { score: number, emotion: Emotion }[];
if (antispoofRes) res.real = antispoofRes as number; if (antispoofRes) res.real = antispoofRes as number;
if (livenessRes) res.live = livenessRes as number; if (livenessRes) res.live = livenessRes as number;
if (irisSize > 0) res.distance = irisSize; if (irisSize && irisSize !== 0) res.iris = Math.trunc(500 / irisSize / 11.7) / 100;
if (rotation) res.rotation = rotation; if (rotation) res.rotation = rotation;
if (tensor) res.tensor = tensor; if (tensor) res.tensor = tensor;
faceRes.push(res); faceRes.push(res);

View File

@ -1,7 +1,7 @@
// https://github.com/TropComplique/FaceBoxes-tensorflow // https://github.com/TropComplique/FaceBoxes-tensorflow
import * as tf from 'dist/tfjs.esm.js';
import { log } from '../util/util'; import { log } from '../util/util';
import * as tf from '../../dist/tfjs.esm.js';
import { loadModel } from '../tfjs/load'; import { loadModel } from '../tfjs/load';
import type { GraphModel, Tensor } from '../tfjs/types'; import type { GraphModel, Tensor } from '../tfjs/types';
import type { Config } from '../config'; import type { Config } from '../config';
@ -9,11 +9,13 @@ import type { Config } from '../config';
type Box = [number, number, number, number]; type Box = [number, number, number, number];
export class FaceBoxes { export class FaceBoxes {
enlarge: number;
model: GraphModel; model: GraphModel;
config: Config; config: Config;
inputSize: 0; inputSize: 0;
constructor(model, config: Config) { constructor(model, config: Config) {
this.enlarge = 1.1;
this.model = model; this.model = model;
this.config = config; this.config = config;
this.inputSize = model.inputs[0].shape ? model.inputs[0].shape[2] : 0; this.inputSize = model.inputs[0].shape ? model.inputs[0].shape[2] : 0;
@ -21,23 +23,22 @@ export class FaceBoxes {
async estimateFaces(input, config) { async estimateFaces(input, config) {
if (config) this.config = config; if (config) this.config = config;
const enlarge = this.config.face.detector?.minConfidence || 0.1;
const results: { confidence: number, box: Box, boxRaw: Box, image: Tensor }[] = []; const results: { confidence: number, box: Box, boxRaw: Box, image: Tensor }[] = [];
const resizeT = tf.image.resizeBilinear(input, [this.inputSize, this.inputSize]); const resizeT = tf.image.resizeBilinear(input, [this.inputSize, this.inputSize]);
const castT = resizeT.toInt(); const castT = resizeT.toInt();
const [scoresT, boxesT, numT] = await this.model.executeAsync(castT) as Tensor[]; const [scoresT, boxesT, numT] = await this.model.executeAsync(castT) as Tensor[];
const scores = await scoresT.data(); const scores = await scoresT.data();
const squeezeT = tf.squeeze(boxesT); const squeezeT = tf.squeeze(boxesT);
const boxes = squeezeT.arraySync() as number[][]; const boxes = squeezeT.arraySync();
scoresT.dispose(); scoresT.dispose();
boxesT.dispose(); boxesT.dispose();
squeezeT.dispose(); squeezeT.dispose();
numT.dispose(); numT.dispose();
castT.dispose(); castT.dispose();
resizeT.dispose(); resizeT.dispose();
for (let i = 0; i < boxes.length; i++) { for (const i in boxes) {
if (scores[i] && scores[i] > (this.config.face.detector?.minConfidence || 0.1)) { if (scores[i] && scores[i] > (this.config.face.detector?.minConfidence || 0.1)) {
const crop = [boxes[i][0] / enlarge, boxes[i][1] / enlarge, boxes[i][2] * enlarge, boxes[i][3] * enlarge]; const crop = [boxes[i][0] / this.enlarge, boxes[i][1] / this.enlarge, boxes[i][2] * this.enlarge, boxes[i][3] * this.enlarge];
const boxRaw: Box = [crop[1], crop[0], (crop[3]) - (crop[1]), (crop[2]) - (crop[0])]; const boxRaw: Box = [crop[1], crop[0], (crop[3]) - (crop[1]), (crop[2]) - (crop[0])];
const box: Box = [ const box: Box = [
parseInt((boxRaw[0] * input.shape[2]).toString()), parseInt((boxRaw[0] * input.shape[2]).toString()),

View File

@ -7,9 +7,9 @@
* - Eye Iris Details: [**MediaPipe Iris**](https://drive.google.com/file/d/1bsWbokp9AklH2ANjCfmjqEzzxO1CNbMu/view) * - Eye Iris Details: [**MediaPipe Iris**](https://drive.google.com/file/d/1bsWbokp9AklH2ANjCfmjqEzzxO1CNbMu/view)
*/ */
import * as tf from 'dist/tfjs.esm.js';
import { log, now } from '../util/util'; import { log, now } from '../util/util';
import { loadModel } from '../tfjs/load'; import { loadModel } from '../tfjs/load';
import * as tf from '../../dist/tfjs.esm.js';
import * as blazeface from './blazeface'; import * as blazeface from './blazeface';
import * as util from './facemeshutil'; import * as util from './facemeshutil';
import * as coords from './facemeshcoords'; import * as coords from './facemeshcoords';
@ -17,10 +17,11 @@ import * as iris from './iris';
import * as attention from './attention'; import * as attention from './attention';
import { histogramEqualization } from '../image/enhance'; import { histogramEqualization } from '../image/enhance';
import { env } from '../util/env'; import { env } from '../util/env';
import type { GraphModel, Tensor, Tensor4D } from '../tfjs/types'; import type { GraphModel, Tensor } from '../tfjs/types';
import type { FaceResult, FaceLandmark, Point } from '../result'; import type { FaceResult, FaceLandmark, Point } from '../result';
import type { Config } from '../config'; import type { Config } from '../config';
import type { DetectBox } from './blazeface';
interface DetectBox { startPoint: Point, endPoint: Point, landmarks: Point[], confidence: number }
const cache = { const cache = {
boxes: [] as DetectBox[], boxes: [] as DetectBox[],
@ -31,7 +32,8 @@ const cache = {
let model: GraphModel | null = null; let model: GraphModel | null = null;
let inputSize = 0; let inputSize = 0;
export async function predict(input: Tensor4D, config: Config): Promise<FaceResult[]> { export async function predict(input: Tensor, config: Config): Promise<FaceResult[]> {
if (!model?.['executor']) return [];
// reset cached boxes // reset cached boxes
const skipTime = (config.face.detector?.skipTime || 0) > (now() - cache.timestamp); const skipTime = (config.face.detector?.skipTime || 0) > (now() - cache.timestamp);
const skipFrame = cache.skipped < (config.face.detector?.skipFrames || 0); const skipFrame = cache.skipped < (config.face.detector?.skipFrames || 0);
@ -59,7 +61,6 @@ export async function predict(input: Tensor4D, config: Config): Promise<FaceResu
score: 0, score: 0,
boxScore: 0, boxScore: 0,
faceScore: 0, faceScore: 0,
size: [0, 0],
// contoursRaw: [], // contoursRaw: [],
// contours: [], // contours: [],
annotations: {} as Record<FaceLandmark, Point[]>, annotations: {} as Record<FaceLandmark, Point[]>,
@ -73,14 +74,18 @@ export async function predict(input: Tensor4D, config: Config): Promise<FaceResu
if (equilized) face.tensor = equilized; if (equilized) face.tensor = equilized;
} }
face.boxScore = Math.round(100 * box.confidence) / 100; face.boxScore = Math.round(100 * box.confidence) / 100;
if (!config.face.mesh?.enabled || !model?.['executor']) { // mesh not enabled or not loaded, return resuts from detector only if (!config.face.mesh?.enabled) { // mesh not enabled, return resuts from detector only
face.box = util.clampBox(box, input); face.box = util.clampBox(box, input);
face.boxRaw = util.getRawBox(box, input); face.boxRaw = util.getRawBox(box, input);
face.score = face.boxScore; face.score = face.boxScore;
face.size = box.size; face.mesh = box.landmarks.map((pt) => [
face.mesh = box.landmarks; ((box.startPoint[0] + box.endPoint[0])) / 2 + ((box.endPoint[0] + box.startPoint[0]) * pt[0] / blazeface.size()),
((box.startPoint[1] + box.endPoint[1])) / 2 + ((box.endPoint[1] + box.startPoint[1]) * pt[1] / blazeface.size()),
]);
face.meshRaw = face.mesh.map((pt) => [pt[0] / (input.shape[2] || 0), pt[1] / (input.shape[1] || 0), (pt[2] || 0) / size]); face.meshRaw = face.mesh.map((pt) => [pt[0] / (input.shape[2] || 0), pt[1] / (input.shape[1] || 0), (pt[2] || 0) / size]);
for (const key of Object.keys(coords.blazeFaceLandmarks)) face.annotations[key] = [face.mesh[coords.blazeFaceLandmarks[key] as number]]; // add annotations for (const key of Object.keys(coords.blazeFaceLandmarks)) {
face.annotations[key] = [face.mesh[coords.blazeFaceLandmarks[key] as number]]; // add annotations
}
} else if (!model) { // mesh enabled, but not loaded } else if (!model) { // mesh enabled, but not loaded
if (config.debug) log('face mesh detection requested, but model is not loaded'); if (config.debug) log('face mesh detection requested, but model is not loaded');
} else { // mesh enabled } else { // mesh enabled
@ -95,12 +100,14 @@ export async function predict(input: Tensor4D, config: Config): Promise<FaceResu
face.faceScore = Math.round(100 * faceConfidence[0]) / 100; face.faceScore = Math.round(100 * faceConfidence[0]) / 100;
if (face.faceScore < (config.face.detector?.minConfidence || 1)) { // low confidence in detected mesh if (face.faceScore < (config.face.detector?.minConfidence || 1)) { // low confidence in detected mesh
box.confidence = face.faceScore; // reset confidence of cached box box.confidence = face.faceScore; // reset confidence of cached box
if (config.face.mesh['keepInvalid']) { if (config.face.mesh.keepInvalid) {
face.box = util.clampBox(box, input); face.box = util.clampBox(box, input);
face.boxRaw = util.getRawBox(box, input); face.boxRaw = util.getRawBox(box, input);
face.size = box.size;
face.score = face.boxScore; face.score = face.boxScore;
face.mesh = box.landmarks; face.mesh = box.landmarks.map((pt) => [
((box.startPoint[0] + box.endPoint[0])) / 2 + ((box.endPoint[0] + box.startPoint[0]) * pt[0] / blazeface.size()),
((box.startPoint[1] + box.endPoint[1])) / 2 + ((box.endPoint[1] + box.startPoint[1]) * pt[1] / blazeface.size()),
]);
face.meshRaw = face.mesh.map((pt) => [pt[0] / (input.shape[2] || 1), pt[1] / (input.shape[1] || 1), (pt[2] || 0) / size]); face.meshRaw = face.mesh.map((pt) => [pt[0] / (input.shape[2] || 1), pt[1] / (input.shape[1] || 1), (pt[2] || 0) / size]);
for (const key of Object.keys(coords.blazeFaceLandmarks)) { for (const key of Object.keys(coords.blazeFaceLandmarks)) {
face.annotations[key] = [face.mesh[coords.blazeFaceLandmarks[key] as number]]; // add annotations face.annotations[key] = [face.mesh[coords.blazeFaceLandmarks[key] as number]]; // add annotations
@ -114,21 +121,15 @@ export async function predict(input: Tensor4D, config: Config): Promise<FaceResu
if (config.face.attention?.enabled) { if (config.face.attention?.enabled) {
rawCoords = await attention.augment(rawCoords, results); // augment iris results using attention model results rawCoords = await attention.augment(rawCoords, results); // augment iris results using attention model results
} else if (config.face.iris?.enabled) { } else if (config.face.iris?.enabled) {
rawCoords = await iris.augmentIris(rawCoords, face.tensor, inputSize, config); // run iris model and augment results rawCoords = await iris.augmentIris(rawCoords, face.tensor, inputSize); // run iris model and augment results
} }
face.mesh = util.transformRawCoords(rawCoords, box, angle, rotationMatrix, inputSize); // get processed mesh face.mesh = util.transformRawCoords(rawCoords, box, angle, rotationMatrix, inputSize); // get processed mesh
face.meshRaw = face.mesh.map((pt) => [pt[0] / (input.shape[2] || 0), pt[1] / (input.shape[1] || 0), (pt[2] || 0) / size]); face.meshRaw = face.mesh.map((pt) => [pt[0] / (input.shape[2] || 0), pt[1] / (input.shape[1] || 0), (pt[2] || 0) / size]);
for (const key of Object.keys(coords.meshAnnotations)) face.annotations[key] = coords.meshAnnotations[key].map((index) => face.mesh[index]); // add annotations for (const key of Object.keys(coords.meshAnnotations)) face.annotations[key] = coords.meshAnnotations[key].map((index) => face.mesh[index]); // add annotations
face.score = face.faceScore; face.score = face.faceScore;
const calculatedBox = { const calculatedBox = { ...util.calculateFaceBox(face.mesh, box), confidence: box.confidence, landmarks: box.landmarks };
...util.calculateFaceBox(face.mesh, box),
confidence: box.confidence,
landmarks: box.landmarks,
size: box.size,
};
face.box = util.clampBox(calculatedBox, input); face.box = util.clampBox(calculatedBox, input);
face.boxRaw = util.getRawBox(calculatedBox, input); face.boxRaw = util.getRawBox(calculatedBox, input);
face.size = calculatedBox.size;
/* /*
const contoursT = results.find((t) => t.shape[t.shape.length - 1] === 266) as Tensor; const contoursT = results.find((t) => t.shape[t.shape.length - 1] === 266) as Tensor;
const contoursData = contoursT && await contoursT.data(); // 133 x 2d points const contoursData = contoursT && await contoursT.data(); // 133 x 2d points

View File

@ -3,7 +3,7 @@
* See `facemesh.ts` for entry point * See `facemesh.ts` for entry point
*/ */
import * as tf from 'dist/tfjs.esm.js'; import * as tf from '../../dist/tfjs.esm.js';
import * as coords from './facemeshcoords'; import * as coords from './facemeshcoords';
import { constants } from '../tfjs/constants'; import { constants } from '../tfjs/constants';
import type { Box, Point } from '../result'; import type { Box, Point } from '../result';
@ -31,19 +31,10 @@ export const getRawBox = (box, input): Box => (box ? [
(box.endPoint[1] - box.startPoint[1]) / (input.shape[1] || 0), (box.endPoint[1] - box.startPoint[1]) / (input.shape[1] || 0),
] : [0, 0, 0, 0]); ] : [0, 0, 0, 0]);
export const scaleBoxCoordinates = (box, factor, anchor) => { export const scaleBoxCoordinates = (box, factor) => {
const startPoint: Point = [box.startPoint[0] * factor[0], box.startPoint[1] * factor[1]]; const startPoint: Point = [box.startPoint[0] * factor[0], box.startPoint[1] * factor[1]];
const endPoint: Point = [box.endPoint[0] * factor[0], box.endPoint[1] * factor[1]]; const endPoint: Point = [box.endPoint[0] * factor[0], box.endPoint[1] * factor[1]];
// const centerPoint = [(startPoint[0] + endPoint[0]) / 2, (startPoint[1] + endPoint[1]) / 2]; return { startPoint, endPoint, landmarks: box.landmarks, confidence: box.confidence };
const landmarks = box.landmarks.map((pt) => [(pt[0] + anchor[0]) * factor[0], (pt[1] + anchor[1]) * factor[1]]);
/**
face.mesh = box.landmarks.map((pt) => [
((box.startPoint[0] + box.endPoint[0]) / 2) + (pt[0] * input.shape[2] / blazeface.size()),
((box.startPoint[1] + box.endPoint[1]) / 2) + (pt[1] * input.shape[1] / blazeface.size()),
]);
*/
return { startPoint, endPoint, landmarks, confidence: box.confidence };
}; };
export const cutAndResize = (box, image, cropSize) => { export const cutAndResize = (box, image, cropSize) => {
@ -60,36 +51,20 @@ export const enlargeBox = (box, factor) => {
const center = getBoxCenter(box); const center = getBoxCenter(box);
const size = getBoxSize(box); const size = getBoxSize(box);
const halfSize: [number, number] = [factor * size[0] / 2, factor * size[1] / 2]; const halfSize: [number, number] = [factor * size[0] / 2, factor * size[1] / 2];
return { return { startPoint: [center[0] - halfSize[0], center[1] - halfSize[1]] as Point, endPoint: [center[0] + halfSize[0], center[1] + halfSize[1]] as Point, landmarks: box.landmarks, confidence: box.confidence };
startPoint: [center[0] - halfSize[0], center[1] - halfSize[1]] as Point,
endPoint: [center[0] + halfSize[0], center[1] + halfSize[1]] as Point,
landmarks: box.landmarks,
confidence: box.confidence,
size,
};
}; };
export const squarifyBox = (box) => { export const squarifyBox = (box) => {
const centers = getBoxCenter(box); const centers = getBoxCenter(box);
const size = getBoxSize(box); const size = getBoxSize(box);
const halfSize = Math.max(...size) / 2; const halfSize = Math.max(...size) / 2;
return { return { startPoint: [Math.round(centers[0] - halfSize), Math.round(centers[1] - halfSize)] as Point, endPoint: [Math.round(centers[0] + halfSize), Math.round(centers[1] + halfSize)] as Point, landmarks: box.landmarks, confidence: box.confidence };
startPoint: [Math.round(centers[0] - halfSize), Math.round(centers[1] - halfSize)] as Point,
endPoint: [Math.round(centers[0] + halfSize), Math.round(centers[1] + halfSize)] as Point,
landmarks: box.landmarks,
confidence: box.confidence,
size: [Math.round(size[0]), Math.round(size[1])] as [number, number],
};
}; };
export const calculateLandmarksBoundingBox = (landmarks) => { export const calculateLandmarksBoundingBox = (landmarks) => {
const x = landmarks.map((d) => d[0]); const x = landmarks.map((d) => d[0]);
const y = landmarks.map((d) => d[1]); const y = landmarks.map((d) => d[1]);
return { return { startPoint: [Math.min(...x), Math.min(...y)] as Point, endPoint: [Math.max(...x), Math.max(...y)] as Point, landmarks };
startPoint: [Math.min(...x), Math.min(...y)] as Point,
endPoint: [Math.max(...x), Math.max(...y)] as Point,
landmarks,
};
}; };
export const fixedRotationMatrix = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]; export const fixedRotationMatrix = [[1, 0, 0], [0, 1, 0], [0, 0, 1]];
@ -200,7 +175,7 @@ export function correctFaceRotation(rotate, box, input, inputSize) {
if (largeAngle) { // perform rotation only if angle is sufficiently high if (largeAngle) { // perform rotation only if angle is sufficiently high
const center: Point = getBoxCenter(box); const center: Point = getBoxCenter(box);
const centerRaw: Point = [center[0] / input.shape[2], center[1] / input.shape[1]]; const centerRaw: Point = [center[0] / input.shape[2], center[1] / input.shape[1]];
const rotated = tf.image.rotateWithOffset(input, angle, 0, [centerRaw[0], centerRaw[1]]); const rotated = tf.image.rotateWithOffset(input, angle, 0, centerRaw);
rotationMatrix = buildRotationMatrix(-angle, center); rotationMatrix = buildRotationMatrix(-angle, center);
face = cutAndResize(box, rotated, [inputSize, inputSize]); face = cutAndResize(box, rotated, [inputSize, inputSize]);
tf.dispose(rotated); tf.dispose(rotated);

View File

@ -2,17 +2,17 @@
* FaceRes model implementation * FaceRes model implementation
* *
* Returns Age, Gender, Descriptor * Returns Age, Gender, Descriptor
* Implements Face similarity function * Implements Face simmilarity function
* *
* Based on: [**HSE-FaceRes**](https://github.com/HSE-asavchenko/HSE_FaceRec_tf) * Based on: [**HSE-FaceRes**](https://github.com/HSE-asavchenko/HSE_FaceRec_tf)
*/ */
import * as tf from 'dist/tfjs.esm.js';
import { log, now } from '../util/util'; import { log, now } from '../util/util';
import { env } from '../util/env'; import { env } from '../util/env';
import * as tf from '../../dist/tfjs.esm.js';
import { loadModel } from '../tfjs/load'; import { loadModel } from '../tfjs/load';
import { constants } from '../tfjs/constants'; import { constants } from '../tfjs/constants';
import type { Tensor, GraphModel, Tensor4D, Tensor1D } from '../tfjs/types'; import type { Tensor, GraphModel } from '../tfjs/types';
import type { Config } from '../config'; import type { Config } from '../config';
import type { Gender, Race } from '../result'; import type { Gender, Race } from '../result';
@ -32,17 +32,10 @@ export async function load(config: Config): Promise<GraphModel> {
return model; return model;
} }
export function enhance(input, config: Config): Tensor { export function enhance(input): Tensor {
const tensor = (input.image || input.tensor || input) as Tensor4D; // input received from detector is already normalized to 0..1, input is also assumed to be straightened const tensor = (input.image || input.tensor || input) as Tensor; // input received from detector is already normalized to 0..1, input is also assumed to be straightened
if (!model?.inputs[0].shape) return tensor; // model has no shape so no point continuing if (!model?.inputs[0].shape) return tensor; // model has no shape so no point continuing
let crop: Tensor; const crop: Tensor = tf.image.resizeBilinear(tensor, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
if (config.face.description?.['crop'] > 0) { // optional crop
const cropval = config.face.description?.['crop'];
const box = [[cropval, cropval, 1 - cropval, 1 - cropval]];
crop = tf.image.cropAndResize(tensor, box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]);
} else {
crop = tf.image.resizeBilinear(tensor, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
}
const norm: Tensor = tf.mul(crop, constants.tf255); const norm: Tensor = tf.mul(crop, constants.tf255);
tf.dispose(crop); tf.dispose(crop);
return norm; return norm;
@ -65,7 +58,7 @@ export function enhance(input, config: Config): Tensor {
*/ */
} }
export async function predict(image: Tensor4D, config: Config, idx: number, count: number): Promise<FaceRes> { export async function predict(image: Tensor, config: Config, idx: number, count: number): Promise<FaceRes> {
const obj: FaceRes = { const obj: FaceRes = {
age: 0 as number, age: 0 as number,
gender: 'unknown' as Gender, gender: 'unknown' as Gender,
@ -82,7 +75,7 @@ export async function predict(image: Tensor4D, config: Config, idx: number, coun
skipped = 0; skipped = 0;
return new Promise(async (resolve) => { return new Promise(async (resolve) => {
if (config.face.description?.enabled) { if (config.face.description?.enabled) {
const enhanced = enhance(image, config); const enhanced = enhance(image);
const resT = model?.execute(enhanced) as Tensor[]; const resT = model?.execute(enhanced) as Tensor[];
lastTime = now(); lastTime = now();
tf.dispose(enhanced); tf.dispose(enhanced);
@ -93,7 +86,7 @@ export async function predict(image: Tensor4D, config: Config, idx: number, coun
obj.gender = gender[0] <= 0.5 ? 'female' : 'male'; obj.gender = gender[0] <= 0.5 ? 'female' : 'male';
obj.genderScore = Math.min(0.99, confidence); obj.genderScore = Math.min(0.99, confidence);
} }
const argmax = tf.argMax(resT.find((t) => t.shape[1] === 100) as Tensor1D, 1); const argmax = tf.argMax(resT.find((t) => t.shape[1] === 100), 1);
const ageIdx: number = (await argmax.data())[0]; const ageIdx: number = (await argmax.data())[0];
tf.dispose(argmax); tf.dispose(argmax);
const ageT = resT.find((t) => t.shape[1] === 100) as Tensor; const ageT = resT.find((t) => t.shape[1] === 100) as Tensor;

Some files were not shown because too many files have changed in this diff Show More