From 8bc4f095f474ee19cdff78c2ff2c1be450d15aa6 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Sun, 14 Mar 2021 08:47:38 -0400 Subject: [PATCH] create api specs --- CHANGELOG.md | 2 +- README.md | 169 +- TUTORIAL.md | 108 +- {example => demo}/facemesh.png | Bin {example => demo}/index.html | 0 {example => demo}/index.js | 2 +- {example => demo}/node-multiprocess-worker.js | 0 {example => demo}/node-multiprocess.js | 4 +- {example => demo}/node-singleprocess.js | 2 +- example/sample (1).jpg => demo/sample1.jpg | Bin example/sample (2).jpg => demo/sample2.jpg | Bin example/sample (3).jpg => demo/sample3.jpg | Bin example/sample (4).jpg => demo/sample4.jpg | Bin example/sample (5).jpg => demo/sample5.jpg | Bin example/sample (6).jpg => demo/sample6.jpg | Bin .../screenshot-images.png | Bin {example => demo}/webcam.html | 0 {example => demo}/webcam.js | 0 package-lock.json | 426 +++ package.json | 11 +- server/build.js | 26 +- server/changelog.js | 14 +- server/serve.js | 8 +- tsconfig.json | 15 + typedoc/assets/css/main.css | 2660 +++++++++++++ typedoc/assets/images/icons.png | Bin 0 -> 9615 bytes typedoc/assets/images/icons@2x.png | Bin 0 -> 28144 bytes typedoc/assets/images/widgets.png | Bin 0 -> 480 bytes typedoc/assets/images/widgets@2x.png | Bin 0 -> 855 bytes typedoc/assets/js/main.js | 248 ++ typedoc/assets/js/search.js | 1 + typedoc/classes/agegendernet.html | 825 ++++ typedoc/classes/boundingbox.html | 824 ++++ typedoc/classes/box.html | 828 ++++ typedoc/classes/composabletask.html | 257 ++ .../computeallfacedescriptorstask.html | 308 ++ .../computefacedescriptorstaskbase.html | 280 ++ .../computesinglefacedescriptortask.html | 308 ++ .../classes/detectallfacelandmarkstask.html | 332 ++ typedoc/classes/detectallfacestask.html | 321 ++ .../classes/detectfacelandmarkstaskbase.html | 283 ++ typedoc/classes/detectfacestaskbase.html | 274 ++ .../detectsinglefacelandmarkstask.html | 332 ++ typedoc/classes/detectsinglefacetask.html | 321 ++ typedoc/classes/dimensions.html | 257 ++ typedoc/classes/draw.drawbox.html | 298 ++ typedoc/classes/draw.drawboxoptions.html | 288 ++ typedoc/classes/draw.drawfacelandmarks.html | 298 ++ .../draw.drawfacelandmarksoptions.html | 316 ++ typedoc/classes/draw.drawtextfield.html | 393 ++ .../classes/draw.drawtextfieldoptions.html | 328 ++ typedoc/classes/facedetection.html | 402 ++ typedoc/classes/facedetectionnet.html | 715 ++++ typedoc/classes/faceexpressionnet.html | 828 ++++ typedoc/classes/faceexpressions.html | 302 ++ typedoc/classes/facelandmark68net.html | 870 +++++ typedoc/classes/facelandmark68tinynet.html | 865 +++++ typedoc/classes/facelandmarknet.html | 865 +++++ typedoc/classes/facelandmarks.html | 467 +++ typedoc/classes/facelandmarks5.html | 461 +++ typedoc/classes/facelandmarks68.html | 608 +++ typedoc/classes/facematch.html | 263 ++ typedoc/classes/facematcher.html | 360 ++ typedoc/classes/facerecognitionnet.html | 709 ++++ typedoc/classes/labeledbox.html | 861 +++++ typedoc/classes/labeledfacedescriptors.html | 276 ++ typedoc/classes/netinput.html | 508 +++ typedoc/classes/neuralnetwork.html | 638 ++++ typedoc/classes/objectdetection.html | 398 ++ typedoc/classes/point.html | 407 ++ typedoc/classes/predictedbox.html | 935 +++++ typedoc/classes/rect.html | 824 ++++ typedoc/classes/ssdmobilenetv1.html | 717 ++++ typedoc/classes/ssdmobilenetv1options.html | 217 ++ typedoc/classes/tinyfacedetector.html | 912 +++++ typedoc/classes/tinyfacedetectoroptions.html | 223 ++ typedoc/classes/tinyyolov2.html | 939 +++++ typedoc/classes/tinyyolov2options.html | 222 ++ typedoc/enums/draw.anchorposition.html | 245 ++ typedoc/enums/gender.html | 160 + typedoc/index.html | 3401 +++++++++++++++++ typedoc/interfaces/draw.idrawboxoptions.html | 253 ++ .../draw.idrawfacelandmarksoptions.html | 281 ++ .../draw.idrawtextfieldoptions.html | 287 ++ typedoc/interfaces/iboundingbox.html | 203 + typedoc/interfaces/idimensions.html | 174 + typedoc/interfaces/ifacedetecion.html | 174 + typedoc/interfaces/ifacelandmarks.html | 174 + typedoc/interfaces/ifacematch.html | 174 + typedoc/interfaces/ipoint.html | 174 + typedoc/interfaces/irect.html | 203 + .../interfaces/issdmobilenetv1options.html | 168 + .../interfaces/itinyfacedetectoroptions.html | 175 + typedoc/interfaces/itinyyolov2options.html | 173 + typedoc/modules/draw.html | 363 ++ typedoc/modules/utils.html | 522 +++ 96 files changed, 34305 insertions(+), 158 deletions(-) rename {example => demo}/facemesh.png (100%) rename {example => demo}/index.html (100%) rename {example => demo}/index.js (97%) rename {example => demo}/node-multiprocess-worker.js (100%) rename {example => demo}/node-multiprocess.js (95%) rename {example => demo}/node-singleprocess.js (97%) rename example/sample (1).jpg => demo/sample1.jpg (100%) rename example/sample (2).jpg => demo/sample2.jpg (100%) rename example/sample (3).jpg => demo/sample3.jpg (100%) rename example/sample (4).jpg => demo/sample4.jpg (100%) rename example/sample (5).jpg => demo/sample5.jpg (100%) rename example/sample (6).jpg => demo/sample6.jpg (100%) rename example/screenshot.png => demo/screenshot-images.png (100%) rename {example => demo}/webcam.html (100%) rename {example => demo}/webcam.js (100%) create mode 100644 typedoc/assets/css/main.css create mode 100644 typedoc/assets/images/icons.png create mode 100644 typedoc/assets/images/icons@2x.png create mode 100644 typedoc/assets/images/widgets.png create mode 100644 typedoc/assets/images/widgets@2x.png create mode 100644 typedoc/assets/js/main.js create mode 100644 typedoc/assets/js/search.js create mode 100644 typedoc/classes/agegendernet.html create mode 100644 typedoc/classes/boundingbox.html create mode 100644 typedoc/classes/box.html create mode 100644 typedoc/classes/composabletask.html create mode 100644 typedoc/classes/computeallfacedescriptorstask.html create mode 100644 typedoc/classes/computefacedescriptorstaskbase.html create mode 100644 typedoc/classes/computesinglefacedescriptortask.html create mode 100644 typedoc/classes/detectallfacelandmarkstask.html create mode 100644 typedoc/classes/detectallfacestask.html create mode 100644 typedoc/classes/detectfacelandmarkstaskbase.html create mode 100644 typedoc/classes/detectfacestaskbase.html create mode 100644 typedoc/classes/detectsinglefacelandmarkstask.html create mode 100644 typedoc/classes/detectsinglefacetask.html create mode 100644 typedoc/classes/dimensions.html create mode 100644 typedoc/classes/draw.drawbox.html create mode 100644 typedoc/classes/draw.drawboxoptions.html create mode 100644 typedoc/classes/draw.drawfacelandmarks.html create mode 100644 typedoc/classes/draw.drawfacelandmarksoptions.html create mode 100644 typedoc/classes/draw.drawtextfield.html create mode 100644 typedoc/classes/draw.drawtextfieldoptions.html create mode 100644 typedoc/classes/facedetection.html create mode 100644 typedoc/classes/facedetectionnet.html create mode 100644 typedoc/classes/faceexpressionnet.html create mode 100644 typedoc/classes/faceexpressions.html create mode 100644 typedoc/classes/facelandmark68net.html create mode 100644 typedoc/classes/facelandmark68tinynet.html create mode 100644 typedoc/classes/facelandmarknet.html create mode 100644 typedoc/classes/facelandmarks.html create mode 100644 typedoc/classes/facelandmarks5.html create mode 100644 typedoc/classes/facelandmarks68.html create mode 100644 typedoc/classes/facematch.html create mode 100644 typedoc/classes/facematcher.html create mode 100644 typedoc/classes/facerecognitionnet.html create mode 100644 typedoc/classes/labeledbox.html create mode 100644 typedoc/classes/labeledfacedescriptors.html create mode 100644 typedoc/classes/netinput.html create mode 100644 typedoc/classes/neuralnetwork.html create mode 100644 typedoc/classes/objectdetection.html create mode 100644 typedoc/classes/point.html create mode 100644 typedoc/classes/predictedbox.html create mode 100644 typedoc/classes/rect.html create mode 100644 typedoc/classes/ssdmobilenetv1.html create mode 100644 typedoc/classes/ssdmobilenetv1options.html create mode 100644 typedoc/classes/tinyfacedetector.html create mode 100644 typedoc/classes/tinyfacedetectoroptions.html create mode 100644 typedoc/classes/tinyyolov2.html create mode 100644 typedoc/classes/tinyyolov2options.html create mode 100644 typedoc/enums/draw.anchorposition.html create mode 100644 typedoc/enums/gender.html create mode 100644 typedoc/index.html create mode 100644 typedoc/interfaces/draw.idrawboxoptions.html create mode 100644 typedoc/interfaces/draw.idrawfacelandmarksoptions.html create mode 100644 typedoc/interfaces/draw.idrawtextfieldoptions.html create mode 100644 typedoc/interfaces/iboundingbox.html create mode 100644 typedoc/interfaces/idimensions.html create mode 100644 typedoc/interfaces/ifacedetecion.html create mode 100644 typedoc/interfaces/ifacelandmarks.html create mode 100644 typedoc/interfaces/ifacematch.html create mode 100644 typedoc/interfaces/ipoint.html create mode 100644 typedoc/interfaces/irect.html create mode 100644 typedoc/interfaces/issdmobilenetv1options.html create mode 100644 typedoc/interfaces/itinyfacedetectoroptions.html create mode 100644 typedoc/interfaces/itinyyolov2options.html create mode 100644 typedoc/modules/draw.html create mode 100644 typedoc/modules/utils.html diff --git a/CHANGELOG.md b/CHANGELOG.md index 97f7276..2c20be1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,7 +9,7 @@ Repository: **** ## Changelog -### **HEAD -> master** 2021/03/09 mandic00@live.com +### **HEAD -> master** 2021/03/13 mandic00@live.com ### **1.0.2** 2021/03/09 mandic00@live.com diff --git a/README.md b/README.md index f37c9ba..7a088f0 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,14 @@
-**Live Demo**: +**Live Demo**: + +
+ +## Documentation + +- [**Tutorial**](TUTORIAL.md) +- [**API Specification**](https://justadudewhohacks.github.io/face-api.js/docs/globals.html)
@@ -26,6 +33,7 @@ And since original Face-API was open-source, I've released this version as well Changes ended up being too large for a simple pull request and it ended up being a full-fledged version on its own + Plus many features were added since original inception


@@ -37,20 +45,25 @@ Plus many features were added since original inception ### Browser Browser example that uses static images and showcases both models -as well as all of the extensions is included in `/example/index.html` +as well as all of the extensions is included in `/demo/index.html` Example can be accessed directly using Git pages using URL: - + -Browser example that uses live webcam is included in `/example/webcam.html` +Browser example that uses live webcam is included in `/demo/webcam.html` Example can be accessed directly using Git pages using URL: - +
+**Demo using FaceAPI to process images** *Note: Photos shown below are taken by me* -![screenshot](example/screenshot.png) +![screenshot](demo/screenshot-images.png) + +**Demo using FaceAPI to process live webcam** + +![screenshot](demo/screenshot-webcam.png)
@@ -58,43 +71,49 @@ Example can be accessed directly using Git pages using URL: Two NodeJS examples are: -- `/example/node-singleprocess.js`: Regular usage of `FaceAPI` from `NodeJS` -- `/example/node-multiprocess.js`: Multiprocessing showcase that uses pool of worker processes (`node-multiprocess-worker.js` - Main starts fixed pool of worker processes with each worker having it's instance of `FaceAPI` - Workers communicate with main when they are ready and main dispaches job to each ready worker until job queue is empty +- `/demo/node-singleprocess.js`: + Regular usage of `FaceAPI` from `NodeJS` +- `/demo/node-multiprocess.js`: + Multiprocessing showcase that uses pool of worker processes + (`node-multiprocess-worker.js`) + Main starts fixed pool of worker processes with each worker having + it's instance of `FaceAPI` + Workers communicate with main when they are ready and main dispaches + job to each ready worker until job queue is empty ```json -2020-12-08 08:30:01 INFO: @vladmandic/face-api version 0.9.1 -2020-12-08 08:30:01 INFO: User: vlado Platform: linux Arch: x64 Node: v15.0.1 -2020-12-08 08:30:01 INFO: FaceAPI multi-process test -2020-12-08 08:30:01 STATE: Main: started worker: 265238 -2020-12-08 08:30:01 STATE: Main: started worker: 265244 -2020-12-08 08:30:02 STATE: Worker: PID: 265238 TensorFlow/JS 2.7.0 FaceAPI 0.9.1 Backend: tensorflow -2020-12-08 08:30:02 STATE: Worker: PID: 265244 TensorFlow/JS 2.7.0 FaceAPI 0.9.1 Backend: tensorflow -2020-12-08 08:30:02 STATE: Main: dispatching to worker: 265238 -2020-12-08 08:30:02 STATE: Main: dispatching to worker: 265244 -2020-12-08 08:30:02 DATA: Worker received message: 265238 { image: 'example/sample (1).jpg' } -2020-12-08 08:30:02 DATA: Worker received message: 265244 { image: 'example/sample (2).jpg' } -2020-12-08 08:30:04 DATA: Main: worker finished: 265238 detected faces: 3 -2020-12-08 08:30:04 STATE: Main: dispatching to worker: 265238 -2020-12-08 08:30:04 DATA: Main: worker finished: 265244 detected faces: 3 -2020-12-08 08:30:04 STATE: Main: dispatching to worker: 265244 -2020-12-08 08:30:04 DATA: Worker received message: 265238 { image: 'example/sample (3).jpg' } -2020-12-08 08:30:04 DATA: Worker received message: 265244 { image: 'example/sample (4).jpg' } -2020-12-08 08:30:06 DATA: Main: worker finished: 265238 detected faces: 3 -2020-12-08 08:30:06 STATE: Main: dispatching to worker: 265238 -2020-12-08 08:30:06 DATA: Worker received message: 265238 { image: 'example/sample (5).jpg' } -2020-12-08 08:30:06 DATA: Main: worker finished: 265244 detected faces: 4 -2020-12-08 08:30:06 STATE: Main: dispatching to worker: 265244 -2020-12-08 08:30:06 DATA: Worker received message: 265244 { image: 'example/sample (6).jpg' } -2020-12-08 08:30:07 DATA: Main: worker finished: 265238 detected faces: 5 -2020-12-08 08:30:07 STATE: Main: worker exit: 265238 0 -2020-12-08 08:30:08 DATA: Main: worker finished: 265244 detected faces: 4 -2020-12-08 08:30:08 INFO: Processed 12 images in 6826 ms -2020-12-08 08:30:08 STATE: Main: worker exit: 265244 0 +2021-03-14 08:42:03 INFO: @vladmandic/face-api version 1.0.2 +2021-03-14 08:42:03 INFO: User: vlado Platform: linux Arch: x64 Node: v15.7.0 +2021-03-14 08:42:03 INFO: FaceAPI multi-process test +2021-03-14 08:42:03 STATE: Main: started worker: 1888019 +2021-03-14 08:42:03 STATE: Main: started worker: 1888025 +2021-03-14 08:42:04 STATE: Worker: PID: 1888025 TensorFlow/JS 3.3.0 FaceAPI 1.0.2 Backend: tensorflow +2021-03-14 08:42:04 STATE: Worker: PID: 1888019 TensorFlow/JS 3.3.0 FaceAPI 1.0.2 Backend: tensorflow +2021-03-14 08:42:04 STATE: Main: dispatching to worker: 1888019 +2021-03-14 08:42:04 STATE: Main: dispatching to worker: 1888025 +2021-03-14 08:42:04 DATA: Worker received message: 1888019 { image: 'demo/sample1.jpg' } +2021-03-14 08:42:04 DATA: Worker received message: 1888025 { image: 'demo/sample2.jpg' } +2021-03-14 08:42:06 DATA: Main: worker finished: 1888025 detected faces: 3 +2021-03-14 08:42:06 STATE: Main: dispatching to worker: 1888025 +2021-03-14 08:42:06 DATA: Worker received message: 1888025 { image: 'demo/sample3.jpg' } +2021-03-14 08:42:06 DATA: Main: worker finished: 1888019 detected faces: 3 +2021-03-14 08:42:06 STATE: Main: dispatching to worker: 1888019 +2021-03-14 08:42:06 DATA: Worker received message: 1888019 { image: 'demo/sample4.jpg' } +2021-03-14 08:42:07 DATA: Main: worker finished: 1888025 detected faces: 3 +2021-03-14 08:42:07 STATE: Main: dispatching to worker: 1888025 +2021-03-14 08:42:07 DATA: Worker received message: 1888025 { image: 'demo/sample5.jpg' } +2021-03-14 08:42:08 DATA: Main: worker finished: 1888019 detected faces: 4 +2021-03-14 08:42:08 STATE: Main: dispatching to worker: 1888019 +2021-03-14 08:42:08 DATA: Worker received message: 1888019 { image: 'demo/sample6.jpg' } +2021-03-14 08:42:09 DATA: Main: worker finished: 1888025 detected faces: 5 +2021-03-14 08:42:09 STATE: Main: worker exit: 1888025 0 +2021-03-14 08:42:09 DATA: Main: worker finished: 1888019 detected faces: 4 +2021-03-14 08:42:09 INFO: Processed 15 images in 5944 ms +2021-03-14 08:42:09 STATE: Main: worker exit: 1888019 0 ``` -Note that `@tensorflow/tfjs-node` or `@tensorflow/tfjs-node-gpu` must be installed before using NodeJS example +Note that `@tensorflow/tfjs-node` or `@tensorflow/tfjs-node-gpu` +must be installed before using NodeJS example


@@ -127,7 +146,8 @@ Defaults are: Bundled `TFJS` can be used directly via export: `faceapi.tf` -Reason for additional `nobundle` version is if you want to include a specific version of TFJS and not rely on pre-packaged one +Reason for additional `nobundle` version is if you want to +include a specific version of TFJS and not rely on pre-packaged one `FaceAPI` is compatible with TFJS 2.0+ @@ -302,21 +322,19 @@ npm run dev By default it starts HTTP server on port 8000 and HTTPS server on port 8001 and can be accessed as: -- -- +- +- ```json -2021-01-10 08:39:00 INFO: @vladmandic/face-api version 0.10.2 -2021-01-10 08:39:00 INFO: User: vlado Platform: linux Arch: x64 Node: v15.4.0 -2021-01-10 08:39:00 INFO: Build: file startup all target: es2018 -2021-01-10 08:39:00 STATE: HTTP server listening: 8000 -2021-01-10 08:39:00 STATE: HTTP2 server listening: 8001 -2021-01-10 08:39:00 STATE: Monitoring: [ 'package.json', 'config.js', 'example', 'src', [length]: 4 ] -2021-01-10 08:39:00 STATE: Monitoring: [ 'package.json', 'config.js', 'example', 'src', [length]: 4 ] -2021-01-10 08:39:01 STATE: Build for: browserBundle type: tfjs: { modules: 1253, moduleBytes: 3997175, imports: 7, importBytes: 276, outputBytes: 1565414, outputFiles: 'dist/tfjs.esm.js' } -2021-01-10 08:39:01 STATE: Build for: browserBundle type: iife: { imports: 160, importBytes: 1797487, outputBytes: 1699552, outputFiles: 'dist/face-api.js' } -2021-01-10 08:39:01 STATE: Build for: browserBundle type: esm: { imports: 160, importBytes: 1797487, outputBytes: 1697086, outputFiles: 'dist/face-api.esm.js' } -2021-01-10 08:39:01 INFO: Compile: [ 'src/index.ts', [length]: 1 ] +2021-03-14 08:41:09 INFO: @vladmandic/face-api version 1.0.2 +2021-03-14 08:41:09 INFO: User: vlado Platform: linux Arch: x64 Node: v15.7.0 +2021-03-14 08:41:09 INFO: Build: file startup all target: es2018 +2021-03-14 08:41:09 STATE: HTTP server listening: 8000 +2021-03-14 08:41:09 STATE: HTTP2 server listening: 8001 +2021-03-14 08:41:09 STATE: Monitoring: [ 'package.json', 'demo', 'src', [length]: 3 ] +2021-03-14 08:41:10 STATE: Build for: browserBundle type: tfjs: { modules: 1258, moduleBytes: 4040087, imports: 7, importBytes: 276, outputBytes: 1072314, outputFiles: 'dist/tfjs.esm.js' } +2021-03-14 08:41:10 STATE: Build for: browserBundle type: iife: { imports: 160, importBytes: 1305679, outputBytes: 1151683, outputFiles: 'dist/face-api.js' } +2021-03-14 08:41:10 STATE: Build for: browserBundle type: esm: { imports: 160, importBytes: 1305679, outputBytes: 1151520, outputFiles: 'dist/face-api.esm.js' } ```
@@ -349,26 +367,28 @@ npm run build Build process uses script `build.js` that creates optimized build for each target: ```text -npm run build - -> @vladmandic/face-api@0.8.9 build /home/vlado/dev/face-api -> rimraf dist/* && node ./build.js +> @vladmandic/face-api@1.0.2 build +> rimraf dist/* types/* typedoc/* && node server/build.js ``` ```json -2021-01-10 08:42:01 INFO: @vladmandic/face-api version 0.10.2 -2021-01-10 08:42:01 INFO: User: vlado Platform: linux Arch: x64 Node: v15.4.0 -2021-01-10 08:42:01 INFO: Build: file startup all target: es2018 -2021-01-10 08:42:01 STATE: Build for: node type: tfjs: { imports: 1, importBytes: 143, outputBytes: 1042, outputFiles: 'dist/tfjs.esm.js' } -2021-01-10 08:42:01 STATE: Build for: node type: node: { imports: 160, importBytes: 233115, outputBytes: 132266, outputFiles: 'dist/face-api.node.js' } -2021-01-10 08:42:01 STATE: Build for: nodeGPU type: tfjs: { imports: 1, importBytes: 147, outputBytes: 1046, outputFiles: 'dist/tfjs.esm.js' } -2021-01-10 08:42:01 STATE: Build for: nodeGPU type: node: { imports: 160, importBytes: 233119, outputBytes: 132274, outputFiles: 'dist/face-api.node-gpu.js' } -2021-01-10 08:42:01 STATE: Build for: browserNoBundle type: tfjs: { imports: 1, importBytes: 276, outputBytes: 244, outputFiles: 'dist/tfjs.esm.js' } -2021-01-10 08:42:01 STATE: Build for: browserNoBundle type: esm: { imports: 160, importBytes: 232317, outputBytes: 129069, outputFiles: 'dist/face-api.esm-nobundle.js' } -2021-01-10 08:42:01 STATE: Build for: browserBundle type: tfjs: { modules: 1253, moduleBytes: 3997175, imports: 7, importBytes: 276, outputBytes: 1565414, outputFiles: 'dist/tfjs.esm.js' } -2021-01-10 08:42:02 STATE: Build for: browserBundle type: iife: { imports: 160, importBytes: 1797487, outputBytes: 1699552, outputFiles: 'dist/face-api.js' } -2021-01-10 08:42:02 STATE: Build for: browserBundle type: esm: { imports: 160, importBytes: 1797487, outputBytes: 1697086, outputFiles: 'dist/face-api.esm.js' } -2021-01-10 08:42:02 INFO: Compile: [ 'src/index.ts', [length]: 1 ]``` +2021-03-14 08:39:21 INFO: @vladmandic/face-api version 1.0.2 +2021-03-14 08:39:21 INFO: User: vlado Platform: linux Arch: x64 Node: v15.7.0 +2021-03-14 08:39:21 INFO: Build: file startup all target: es2018 +2021-03-14 08:39:21 STATE: Build for: node type: tfjs: { imports: 1, importBytes: 143, outputBytes: 731, outputFiles: 'dist/tfjs.esm.js' } +2021-03-14 08:39:21 STATE: Build for: node type: node: { imports: 160, importBytes: 234096, outputBytes: 85371, outputFiles: 'dist/face-api.node.js' } +2021-03-14 08:39:21 STATE: Build for: nodeGPU type: tfjs: { imports: 1, importBytes: 147, outputBytes: 735, outputFiles: 'dist/tfjs.esm.js' } +2021-03-14 08:39:21 STATE: Build for: nodeGPU type: node: { imports: 160, importBytes: 234100, outputBytes: 85379, outputFiles: 'dist/face-api.node-gpu.js' } +2021-03-14 08:39:21 STATE: Build for: nodeCPU type: tfjs: { imports: 1, importBytes: 138, outputBytes: 726, outputFiles: 'dist/tfjs.esm.js' } +2021-03-14 08:39:21 STATE: Build for: nodeCPU type: node: { imports: 160, importBytes: 234091, outputBytes: 85370, outputFiles: 'dist/face-api.node-cpu.js' } +2021-03-14 08:39:21 STATE: Build for: browserNoBundle type: tfjs: { imports: 1, importBytes: 276, outputBytes: 244, outputFiles: 'dist/tfjs.esm.js' } +2021-03-14 08:39:21 STATE: Build for: browserNoBundle type: esm: { imports: 160, importBytes: 233609, outputBytes: 82634, outputFiles: 'dist/face-api.esm-nobundle.js' } +2021-03-14 08:39:22 STATE: Build for: browserBundle type: tfjs: { modules: 1258, moduleBytes: 4040087, imports: 7, importBytes: 276, outputBytes: 1072314, outputFiles: 'dist/tfjs.esm.js' } +2021-03-14 08:39:22 STATE: Build for: browserBundle type: iife: { imports: 160, importBytes: 1305679, outputBytes: 1151683, outputFiles: 'dist/face-api.js' } +2021-03-14 08:39:22 STATE: Build for: browserBundle type: esm: { imports: 160, importBytes: 1305679, outputBytes: 1151520, outputFiles: 'dist/face-api.esm.js' } +2021-03-14 08:39:22 INFO: Compile typings: [ 'src/index.ts', [length]: 1 ] +2021-03-14 08:39:27 INFO: Update Change log: [ '/home/vlado/dev/face-api/CHANGELOG.md', [length]: 1 ] +2021-03-14 08:39:27 INFO: Generate TypeDocs: [ 'src/index.ts', [length]: 1 ] ```
@@ -379,14 +399,7 @@ npm run build `FaceAPI` landmark model returns 68-point face mesh as detailed in the image below: -![facemesh](example/facemesh.png) - -
- -## Documentation - -- [**Tutorial**](TUTORIAL.md) -- [**API Documentation**](https://justadudewhohacks.github.io/face-api.js/docs/globals.html) +![facemesh](demo/facemesh.png)
diff --git a/TUTORIAL.md b/TUTORIAL.md index ed125b1..ae53954 100644 --- a/TUTORIAL.md +++ b/TUTORIAL.md @@ -1,4 +1,4 @@ -# FaceAPI Usage +# FaceAPI Tutorial ## Features @@ -38,7 +38,7 @@ All global neural network instances are exported via faceapi.nets: -``` javascript +```js console.log(faceapi.nets) // ageGenderNet // faceExpressionNet @@ -54,7 +54,7 @@ To load a model, you have to provide the corresponding manifest.json file as wel Assuming the models reside in **public/models**: -``` javascript +```js await faceapi.nets.ssdMobilenetv1.loadFromUri('/models') // accordingly for the other models: // await faceapi.nets.faceLandmark68Net.loadFromUri('/models') @@ -64,26 +64,26 @@ await faceapi.nets.ssdMobilenetv1.loadFromUri('/models') In a nodejs environment you can furthermore load the models directly from disk: -``` javascript +```js await faceapi.nets.ssdMobilenetv1.loadFromDisk('./models') ``` You can also load the model from a tf.NamedTensorMap: -``` javascript +```js await faceapi.nets.ssdMobilenetv1.loadFromWeightMap(weightMap) ``` Alternatively, you can also create own instances of the neural nets: -``` javascript +```js const net = new faceapi.SsdMobilenetv1() await net.loadFromUri('/models') ``` You can also load the weights as a Float32Array (in case you want to use the uncompressed models): -``` javascript +```js // using fetch net.load(await faceapi.fetchNetWeights('/models/face_detection_model.weights')) @@ -105,7 +105,7 @@ In the following **input** can be an HTML img, video or canvas element or the id ``` -``` javascript +```js const input = document.getElementById('myImg') // const input = document.getElementById('myVideo') // const input = document.getElementById('myCanvas') @@ -117,19 +117,19 @@ const input = document.getElementById('myImg') Detect all faces in an image. Returns **Array<[FaceDetection](#interface-face-detection)>**: -``` javascript +```js const detections = await faceapi.detectAllFaces(input) ``` Detect the face with the highest confidence score in an image. Returns **[FaceDetection](#interface-face-detection) | undefined**: -``` javascript +```js const detection = await faceapi.detectSingleFace(input) ``` By default **detectAllFaces** and **detectSingleFace** utilize the SSD Mobilenet V1 Face Detector. You can specify the face detector by passing the corresponding options object: -``` javascript +```js const detections1 = await faceapi.detectAllFaces(input, new faceapi.SsdMobilenetv1Options()) const detections2 = await faceapi.detectAllFaces(input, new faceapi.TinyFaceDetectorOptions()) ``` @@ -142,19 +142,19 @@ You can tune the options of each face detector as shown [here](#getting-started- Detect all faces in an image + computes 68 Point Face Landmarks for each detected face. Returns **Array<[WithFaceLandmarks>](#getting-started-utility-classes)>**: -``` javascript +```js const detectionsWithLandmarks = await faceapi.detectAllFaces(input).withFaceLandmarks() ``` Detect the face with the highest confidence score in an image + computes 68 Point Face Landmarks for that face. Returns **[WithFaceLandmarks>](#getting-started-utility-classes) | undefined**: -``` javascript +```js const detectionWithLandmarks = await faceapi.detectSingleFace(input).withFaceLandmarks() ``` You can also specify to use the tiny model instead of the default model: -``` javascript +```js const useTinyModel = true const detectionsWithLandmarks = await faceapi.detectAllFaces(input).withFaceLandmarks(useTinyModel) ``` @@ -165,13 +165,13 @@ const detectionsWithLandmarks = await faceapi.detectAllFaces(input).withFaceLand Detect all faces in an image + compute 68 Point Face Landmarks for each detected face. Returns **Array<[WithFaceDescriptor>>](#getting-started-utility-classes)>**: -``` javascript +```js const results = await faceapi.detectAllFaces(input).withFaceLandmarks().withFaceDescriptors() ``` Detect the face with the highest confidence score in an image + compute 68 Point Face Landmarks and face descriptor for that face. Returns **[WithFaceDescriptor>>](#getting-started-utility-classes) | undefined**: -``` javascript +```js const result = await faceapi.detectSingleFace(input).withFaceLandmarks().withFaceDescriptor() ``` @@ -181,13 +181,13 @@ const result = await faceapi.detectSingleFace(input).withFaceLandmarks().withFac Detect all faces in an image + recognize face expressions of each face. Returns **Array<[WithFaceExpressions>>](#getting-started-utility-classes)>**: -``` javascript +```js const detectionsWithExpressions = await faceapi.detectAllFaces(input).withFaceLandmarks().withFaceExpressions() ``` Detect the face with the highest confidence score in an image + recognize the face expressions for that face. Returns **[WithFaceExpressions>>](#getting-started-utility-classes) | undefined**: -``` javascript +```js const detectionWithExpressions = await faceapi.detectSingleFace(input).withFaceLandmarks().withFaceExpressions() ``` @@ -195,13 +195,13 @@ const detectionWithExpressions = await faceapi.detectSingleFace(input).withFaceL Detect all faces without face alignment + recognize face expressions of each face. Returns **Array<[WithFaceExpressions>](#getting-started-utility-classes)>**: -``` javascript +```js const detectionsWithExpressions = await faceapi.detectAllFaces(input).withFaceExpressions() ``` Detect the face with the highest confidence score without face alignment + recognize the face expression for that face. Returns **[WithFaceExpressions>](#getting-started-utility-classes) | undefined**: -``` javascript +```js const detectionWithExpressions = await faceapi.detectSingleFace(input).withFaceExpressions() ``` @@ -211,13 +211,13 @@ const detectionWithExpressions = await faceapi.detectSingleFace(input).withFaceE Detect all faces in an image + estimate age and recognize gender of each face. Returns **Array<[WithAge>>>](#getting-started-utility-classes)>**: -``` javascript +```js const detectionsWithAgeAndGender = await faceapi.detectAllFaces(input).withFaceLandmarks().withAgeAndGender() ``` Detect the face with the highest confidence score in an image + estimate age and recognize gender for that face. Returns **[WithAge>>>](#getting-started-utility-classes) | undefined**: -``` javascript +```js const detectionWithAgeAndGender = await faceapi.detectSingleFace(input).withFaceLandmarks().withAgeAndGender() ``` @@ -225,13 +225,13 @@ const detectionWithAgeAndGender = await faceapi.detectSingleFace(input).withFace Detect all faces without face alignment + estimate age and recognize gender of each face. Returns **Array<[WithAge>>](#getting-started-utility-classes)>**: -``` javascript +```js const detectionsWithAgeAndGender = await faceapi.detectAllFaces(input).withAgeAndGender() ``` Detect the face with the highest confidence score without face alignment + estimate age and recognize gender for that face. Returns **[WithAge>>](#getting-started-utility-classes) | undefined**: -``` javascript +```js const detectionWithAgeAndGender = await faceapi.detectSingleFace(input).withAgeAndGender() ``` @@ -239,7 +239,7 @@ const detectionWithAgeAndGender = await faceapi.detectSingleFace(input).withAgeA **Tasks can be composed as follows:** -``` javascript +```js // all faces await faceapi.detectAllFaces(input) await faceapi.detectAllFaces(input).withFaceExpressions() @@ -265,7 +265,7 @@ To perform face recognition, one can use faceapi.FaceMatcher to compare referenc First, we initialize the FaceMatcher with the reference data, for example we can simply detect faces in a **referenceImage** and match the descriptors of the detected faces to faces of subsequent images: -``` javascript +```js const results = await faceapi .detectAllFaces(referenceImage) .withFaceLandmarks() @@ -282,7 +282,7 @@ const faceMatcher = new faceapi.FaceMatcher(results) Now we can recognize a persons face shown in **queryImage1**: -``` javascript +```js const singleResult = await faceapi .detectSingleFace(queryImage1) .withFaceLandmarks() @@ -296,7 +296,7 @@ if (singleResult) { Or we can recognize all faces shown in **queryImage2**: -``` javascript +```js const results = await faceapi .detectAllFaces(queryImage2) .withFaceLandmarks() @@ -310,7 +310,7 @@ results.forEach(fd => { You can also create labeled reference descriptors as follows: -``` javascript +```js const labeledDescriptors = [ new faceapi.LabeledFaceDescriptors( 'obama', @@ -331,7 +331,7 @@ const faceMatcher = new faceapi.FaceMatcher(labeledDescriptors) Preparing the overlay canvas: -``` javascript +```js const displaySize = { width: input.width, height: input.height } // resize the overlay canvas to the input dimensions const canvas = document.getElementById('overlay') @@ -340,7 +340,7 @@ faceapi.matchDimensions(canvas, displaySize) face-api.js predefines some highlevel drawing functions, which you can utilize: -``` javascript +```js /* Display detected face bounding boxes */ const detections = await faceapi.detectAllFaces(input) // resize the detected boxes in case your displayed image has a different size than the original @@ -376,7 +376,7 @@ faceapi.draw.drawFaceExpressions(canvas, resizedResults, minProbability) You can also draw boxes with custom text ([DrawBox](https://github.com/justadudewhohacks/tfjs-image-recognition-base/blob/master/src/draw/DrawBox.ts)): -``` javascript +```js const box = { x: 50, y: 50, width: 100, height: 100 } // see DrawBoxOptions below const drawOptions = { @@ -389,7 +389,7 @@ drawBox.draw(document.getElementById('myCanvas')) DrawBox drawing options: -``` javascript +```js export interface IDrawBoxOptions { boxColor?: string lineWidth?: number @@ -400,7 +400,7 @@ export interface IDrawBoxOptions { Finally you can draw custom text fields ([DrawTextField](https://github.com/justadudewhohacks/tfjs-image-recognition-base/blob/master/src/draw/DrawTextField.ts)): -``` javascript +```js const text = [ 'This is a textline!', 'This is another textline!' @@ -417,7 +417,7 @@ drawBox.draw(document.getElementById('myCanvas')) DrawTextField drawing options: -``` javascript +```js export interface IDrawTextFieldOptions { anchorPosition?: AnchorPosition backgroundColor?: string @@ -441,7 +441,7 @@ export enum AnchorPosition { #### SsdMobilenetv1Options -``` javascript +```js export interface ISsdMobilenetv1Options { // minimum confidence threshold // default: 0.5 @@ -458,7 +458,7 @@ const options = new faceapi.SsdMobilenetv1Options({ minConfidence: 0.8 }) #### TinyFaceDetectorOptions -``` javascript +```js export interface ITinyFaceDetectorOptions { // size at which image is processed, the smaller the faster, // but less precise in detecting smaller faces, must be divisible @@ -483,7 +483,7 @@ const options = new faceapi.TinyFaceDetectorOptions({ inputSize: 320 }) #### IBox -``` javascript +```js export interface IBox { x: number y: number @@ -494,7 +494,7 @@ export interface IBox { #### IFaceDetection -``` javascript +```js export interface IFaceDetection { score: number box: Box @@ -503,7 +503,7 @@ export interface IFaceDetection { #### IFaceLandmarks -``` javascript +```js export interface IFaceLandmarks { positions: Point[] shift: Point @@ -512,7 +512,7 @@ export interface IFaceLandmarks { #### WithFaceDetection -``` javascript +```js export type WithFaceDetection = TSource & { detection: FaceDetection } @@ -520,7 +520,7 @@ export type WithFaceDetection = TSource & { #### WithFaceLandmarks -``` javascript +```js export type WithFaceLandmarks = TSource & { unshiftedLandmarks: FaceLandmarks landmarks: FaceLandmarks @@ -533,7 +533,7 @@ export type WithFaceLandmarks = TSource & { #### WithFaceDescriptor -``` javascript +```js export type WithFaceDescriptor = TSource & { descriptor: Float32Array } @@ -541,7 +541,7 @@ export type WithFaceDescriptor = TSource & { #### WithFaceExpressions -``` javascript +```js export type WithFaceExpressions = TSource & { expressions: FaceExpressions } @@ -549,7 +549,7 @@ export type WithFaceExpressions = TSource & { #### WithAge -``` javascript +```js export type WithAge = TSource & { age: number } @@ -557,7 +557,7 @@ export type WithAge = TSource & { #### WithGender -``` javascript +```js export type WithGender = TSource & { gender: Gender genderProbability: number @@ -577,7 +577,7 @@ export enum Gender { Instead of using the high level API, you can directly use the forward methods of each neural network: -``` javascript +```js const detections1 = await faceapi.ssdMobilenetv1(input, options) const detections2 = await faceapi.tinyFaceDetector(input, options) const landmarks1 = await faceapi.detectFaceLandmarks(faceImage) @@ -587,7 +587,7 @@ const descriptor = await faceapi.computeFaceDescriptor(alignedFaceImage) #### Extracting a Canvas for an Image Region -``` javascript +```js const regionsToExtract = [ new faceapi.Rect(0, 0, 100, 100) ] @@ -598,7 +598,7 @@ const canvases = await faceapi.extractFaces(input, regionsToExtract) #### Euclidean Distance -``` javascript +```js // ment to be used for computing the euclidean distance between two face descriptors const dist = faceapi.euclideanDistance([0, 0], [0, 10]) console.log(dist) // 10 @@ -606,7 +606,7 @@ console.log(dist) // 10 #### Retrieve the Face Landmark Points and Contours -``` javascript +```js const landmarkPositions = landmarks.positions // or get the positions of individual contours, @@ -626,7 +626,7 @@ const rightEyeBrow = landmarks.getRightEyeBrow() ``` -``` javascript +```js const image = await faceapi.fetchImage('/images/example.png') console.log(image instanceof HTMLImageElement) // true @@ -638,7 +638,7 @@ myImg.src = image.src #### Fetching JSON -``` javascript +```js const json = await faceapi.fetchJson('/files/example.json') ``` @@ -649,7 +649,7 @@ const json = await faceapi.fetchJson('/files/example.json') ``` -``` javascript +```js async function uploadImage() { const imgFile = document.getElementById('myFileUpload').files[0] // create an HTMLImageElement from a Blob @@ -665,7 +665,7 @@ async function uploadImage() {