added example
46
README.md
|
@ -1,15 +1,15 @@
|
|||
# FaceAPI
|
||||
|
||||
## Note
|
||||
|
||||
This is updated **face-api.js** with latest available tensorflow/js as original face-api.js is not compatible with **tfjs 2.0+**.
|
||||
If original repository is updated, this one will become obsolete.
|
||||
|
||||
Forked from **face-api.js** version **0.22.2** released on March 22nd, 2020
|
||||
|
||||
- <https://github.com/justadudewhohacks/face-api.js>
|
||||
- <https://www.npmjs.com/package/face-api.js>
|
||||
|
||||
## Note
|
||||
|
||||
I don't plan to maintain a separate distribution of **face-api.js**, this is only a temporary repository to use latest available face-api with latest available tensorflow/js as original face-api.js is not compatible with **tfjs 2.0+**.
|
||||
If original repository is updated, this one will become obsolete.
|
||||
|
||||
## Differences
|
||||
|
||||
- Removed tests, docs, examples
|
||||
|
@ -19,23 +19,43 @@ If original repository is updated, this one will become obsolete.
|
|||
- Removed unnecesary package dependencies (karma, jasmine, etc.)
|
||||
- Updated Typescript build process to target ES2018 instead of dual ES5/ES6
|
||||
- Changed browser bundle process to use ESBuild instead of Rollup
|
||||
- Updated dependencies to @tensorflow/tfjs since backends were removed from @tensorflow/tfjs-core
|
||||
- Updated TFJS dependencies since backends were removed from @tensorflow/tfjs-core
|
||||
- Updated mobileNetv1 model due to batchNorm() dependency
|
||||
- Removed following models as they are either obsolete or non-functional with tfjs 2.0+
|
||||
- mtcnn: Mostly obsolete
|
||||
- tinyYolov2: Non-functional since weights are missing
|
||||
|
||||
Which means valid models are **tinyFaceDetector** and **mobileNetv1**
|
||||
Due to reduced code and changed build process, resulting bundle is about **>5x smaller** than the original!
|
||||
|
||||
Due to reduced code and changed build process, resulting bundle is about **2x smaller** than the original!
|
||||
## Installation
|
||||
|
||||
**Imporant!**: This version of **face-api** does not embedd full version of **tfjs** to enable dynamic loading of different versions of tfjs as well as to enable reusability of tfjs for different purposes. *Load tfjs explicitly before loading face-api.*
|
||||
|
||||
For example as ESM script:
|
||||
|
||||
```html
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/tensorflow/2.3.0/tf.es2017.js"></script>
|
||||
<script src="/dist/face-api.js"></script> <!-- full version / 960KB -->
|
||||
<script src="/dist/face-api.min.js"></script> <!-- minified version / 320KB -->
|
||||
```
|
||||
|
||||
or as NPM module:
|
||||
|
||||
```js
|
||||
# npm install @tensorflow/tfjs @vladmandic/face-api
|
||||
|
||||
const tf = require('@tensorflow/tfjs');
|
||||
const faceapi = require('@vladmandic/face-api');
|
||||
```
|
||||
|
||||
## Weights
|
||||
|
||||
Pretrained models are includes in `./weights` and uplodaed using GIT LFS support.
|
||||
Pretrained models are includes in `./weights`.
|
||||
|
||||
## Build
|
||||
|
||||
Both `./build` and `./dist` folders are included by default, so no need for build during install.
|
||||
Both **`./build`** (used by `import` or `require`) and **`./dist`** (used by `<script src...>`) folders are included by default, so no need for build during install.
|
||||
However, if you want to rebuild use:
|
||||
|
||||
```shell
|
||||
|
@ -46,4 +66,10 @@ Which will compile everything in `./src` into `./build` and create both standard
|
|||
|
||||
## Documentation
|
||||
|
||||
For documentation refer to original project
|
||||
For documentation refer to original project at <https://github.com/justadudewhohacks/face-api.js>
|
||||
|
||||
## Example
|
||||
|
||||
Single new example that uses both models as well as all of the extensions is included in `/example/index.html`
|
||||
|
||||

|
||||
|
|
|
@ -0,0 +1,180 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<title>OpenImages Test</title>
|
||||
<meta http-equiv="content-type">
|
||||
<meta content="text/html">
|
||||
<meta charset="UTF-8">
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/tensorflow/2.3.0/tf.es2017.js" integrity="sha512-86mDh7XD/1SzFWCS4UwRHiIN8jak+8wHL4ird70XnjlLTrWsFbYCAez10OeivKcbvhAG1HC3qm2RlJrM3lnIqA==" crossorigin="anonymous"></script>
|
||||
<script src="/dist/face-api.js"></script>
|
||||
<style>
|
||||
body { font-family: monospace; background: black; color: white; font-size: 16px; line-height: 22px; margin: 0; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div id="log"></div>
|
||||
<script>
|
||||
/* global faceapi */ // face-api is loaded via <script src> in a <head> section
|
||||
/* tfjs should be loaded explicitly and is not embedded inside facepi.js to keep size small and allow reusability */
|
||||
|
||||
// configuration options
|
||||
const modelPath = '/weights'; // relative path to model that will be loaded using http
|
||||
const imgSize = 512; // maximum image size in pixels
|
||||
const minScore = 0.1; // minimum score
|
||||
const maxResults = 5; // maximum number of results to return
|
||||
const samples = ['/example/sample (1).jpg', '/example/sample (2).jpg', '/example/sample (3).jpg', '/example/sample (4).jpg', '/example/sample (5).jpg', '/example/sample (6).jpg']; // sample images to be loaded using http
|
||||
|
||||
// helper function to pretty-print json object to string
|
||||
function str(json) {
|
||||
let text = '<font color="lightblue">';
|
||||
text += JSON.stringify(json).replace(/{|}|"|\[|\]/g, '').replace(/,/g, ', ');
|
||||
text += '</font>';
|
||||
return text;
|
||||
}
|
||||
|
||||
// helper function to print strings to html document as a log
|
||||
function log(...txt) {
|
||||
// eslint-disable-next-line no-console
|
||||
console.log(...txt);
|
||||
document.getElementById('log').innerHTML += `<br>${txt}`;
|
||||
}
|
||||
|
||||
// helper function to draw detected faces
|
||||
function faces(name, title, id, data) {
|
||||
// create canvas to draw on
|
||||
const img = document.getElementById(id);
|
||||
const canvas = document.createElement('canvas');
|
||||
canvas.style.position = 'absolute';
|
||||
canvas.style.left = `${img.offsetLeft}px`;
|
||||
canvas.style.top = `${img.offsetTop}px`;
|
||||
canvas.width = img.width;
|
||||
canvas.height = img.height;
|
||||
const ctx = canvas.getContext('2d');
|
||||
// draw title
|
||||
ctx.font = '1rem sans-serif';
|
||||
ctx.fillStyle = 'black';
|
||||
ctx.fillText(name, 2, 15);
|
||||
ctx.fillText(title, 2, 35);
|
||||
for (const person of data) {
|
||||
// draw box around each face
|
||||
ctx.lineWidth = 3;
|
||||
ctx.strokeStyle = 'deepskyblue';
|
||||
ctx.fillStyle = 'deepskyblue';
|
||||
ctx.globalAlpha = 0.4;
|
||||
ctx.beginPath();
|
||||
ctx.rect(person.detection.box.x, person.detection.box.y, person.detection.box.width, person.detection.box.height);
|
||||
ctx.stroke();
|
||||
ctx.globalAlpha = 1;
|
||||
ctx.fillText(`${Math.round(100 * person.genderProbability)}% ${person.gender}`, person.detection.box.x, person.detection.box.y - 18);
|
||||
ctx.fillText(`${Math.round(person.age)} years`, person.detection.box.x, person.detection.box.y - 2);
|
||||
// draw face points for each face
|
||||
ctx.fillStyle = 'lightblue';
|
||||
ctx.globalAlpha = 0.5;
|
||||
const pointSize = 2;
|
||||
for (const pt of person.landmarks.positions) {
|
||||
ctx.beginPath();
|
||||
ctx.arc(pt.x, pt.y, pointSize, 0, 2 * Math.PI);
|
||||
ctx.fill();
|
||||
}
|
||||
}
|
||||
// add canvas to document
|
||||
document.body.appendChild(canvas);
|
||||
}
|
||||
|
||||
// helper function to draw processed image and its results
|
||||
function print(title, img, data) {
|
||||
// eslint-disable-next-line no-console
|
||||
console.log('Results:', title, img, data);
|
||||
const el = new Image();
|
||||
el.id = Math.floor(Math.random() * 100000);
|
||||
el.src = img;
|
||||
el.width = imgSize;
|
||||
el.onload = () => faces(img, title, el.id, data);
|
||||
document.body.appendChild(el);
|
||||
// document.getElementById('log').innerHTML += `<img id="${id}" src="${img}" height=${imgSize}></img>`;
|
||||
// document.getElementById(el.id).addEventListener('load', () => faces(img, title, image.id, data));
|
||||
// setTimeout(() => faces(img, title, id, data), 1000);
|
||||
}
|
||||
|
||||
// loads image and draws it on resized canvas so we alwys have correct image size regardless of source
|
||||
async function image(url) {
|
||||
return new Promise((resolve) => {
|
||||
const img = new Image();
|
||||
// wait until image is actually loaded
|
||||
img.addEventListener('load', () => {
|
||||
// resize image so larger axis is not bigger than limit
|
||||
const ratio = 1.0 * img.height / img.width;
|
||||
img.width = ratio <= 1 ? imgSize : 1.0 * imgSize / ratio;
|
||||
img.height = ratio >= 1 ? imgSize : 1.0 * imgSize * ratio;
|
||||
// create canvas and draw loaded image
|
||||
const canvas = document.createElement('canvas');
|
||||
canvas.height = img.height;
|
||||
canvas.width = img.width;
|
||||
const ctx = canvas.getContext('2d');
|
||||
ctx.drawImage(img, 0, 0, img.width, img.height);
|
||||
// return generated canvas to be used by tfjs during detection
|
||||
resolve(canvas);
|
||||
});
|
||||
// load image
|
||||
img.src = url;
|
||||
});
|
||||
}
|
||||
|
||||
async function main() {
|
||||
await faceapi.nets.tinyFaceDetector.load(modelPath);
|
||||
await faceapi.nets.ssdMobilenetv1.load(modelPath);
|
||||
await faceapi.nets.ageGenderNet.load(modelPath);
|
||||
await faceapi.nets.faceLandmark68Net.load(modelPath);
|
||||
await faceapi.nets.faceRecognitionNet.load(modelPath);
|
||||
await faceapi.nets.faceExpressionNet.load(modelPath);
|
||||
const optionsTinyFace = new faceapi.TinyFaceDetectorOptions({ inputSize: imgSize, scoreThreshold: minScore });
|
||||
const optionsSSDMobileNet = new faceapi.SsdMobilenetv1Options({ minConfidence: minScore, maxResults });
|
||||
|
||||
// initialize tfjs
|
||||
await faceapi.tf.setBackend('webgl');
|
||||
await faceapi.tf.enableProdMode();
|
||||
faceapi.tf.ENV.set('DEBUG', false);
|
||||
log('FaceAPI Test');
|
||||
log(`TensorFlow/JS Version: ${faceapi.tf.version_core} Backend ${faceapi.tf.getBackend()}`);
|
||||
log(`TF Flags: ${str(faceapi.tf.ENV.flags)}`);
|
||||
const engine = await faceapi.tf.engine();
|
||||
log(`TF Engine State: ${str(engine.state)}`);
|
||||
// loop through all images and try to process them
|
||||
log(`Start processing: ${samples.length} images ...<br>`);
|
||||
for (const img of samples) {
|
||||
// new line
|
||||
document.body.appendChild(document.createElement('br'));
|
||||
// load and resize image
|
||||
const canvas = await image(img);
|
||||
try {
|
||||
// actual model execution
|
||||
const dataTinyYolo = await faceapi
|
||||
.detectAllFaces(canvas, optionsTinyFace)
|
||||
.withFaceLandmarks()
|
||||
.withFaceExpressions()
|
||||
.withFaceDescriptors()
|
||||
.withAgeAndGender();
|
||||
// print results to screen
|
||||
print('TinyFace Detector', img, dataTinyYolo);
|
||||
// actual model execution
|
||||
const dataSSDMobileNet = await faceapi
|
||||
.detectAllFaces(canvas, optionsSSDMobileNet)
|
||||
.withFaceLandmarks()
|
||||
.withFaceExpressions()
|
||||
.withFaceDescriptors()
|
||||
.withAgeAndGender();
|
||||
// print results to screen
|
||||
print('SSD MobileNet', img, dataSSDMobileNet);
|
||||
} catch (err) {
|
||||
log(`Image: ${img} Error during processing ${str(err)}`);
|
||||
// eslint-disable-next-line no-console
|
||||
console.error(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// start processing as soon as page is loaded
|
||||
window.onload = main;
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
After Width: | Height: | Size: 141 KiB |
After Width: | Height: | Size: 178 KiB |
After Width: | Height: | Size: 216 KiB |
After Width: | Height: | Size: 206 KiB |
After Width: | Height: | Size: 162 KiB |
After Width: | Height: | Size: 295 KiB |
After Width: | Height: | Size: 569 KiB |