switched face embedding to mobileface

pull/280/head
Vladimir Mandic 2021-03-12 12:54:08 -05:00
parent 162ace9fc3
commit 12b0058a1b
10 changed files with 90 additions and 36 deletions

View File

@ -1,6 +1,6 @@
# @vladmandic/human
Version: **1.0.3**
Version: **1.1.0**
Description: **Human: AI-powered 3D Face Detection, Face Embedding & Recognition, Body Pose Tracking, Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction & Gesture Recognition**
Author: **Vladimir Mandic <mandic00@live.com>**
@ -11,14 +11,16 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
### **HEAD -> main** 2021/03/11 mandic00@live.com
### **1.0.4** 2021/03/11 mandic00@live.com
- add face return tensor
- add test for face descriptors
- wip on embedding
- simplify face box coordinate calculations
- annotated models and removed gender-ssrnet
- autodetect inputsizes
### **origin/main** 2021/03/10 mandic00@live.com
### **1.0.3** 2021/03/10 mandic00@live.com
- strong typing for public classes and hide private classes

View File

@ -69,7 +69,7 @@ Default models in Human library are:
- **Gender Detection**: Oarriaga Gender
- **Age Detection**: SSR-Net Age IMDB
- **Body Analysis**: PoseNet
- **Face Embedding**: Sirius-AI MobileFaceNet Embedding
- **Face Embedding**: BecauseofAI MobileFace Embedding
Note that alternative models are provided and can be enabled via configuration
For example, `PoseNet` model can be switched for `BlazePose` model depending on the use case

10
TODO.md
View File

@ -8,3 +8,13 @@
- Explore EfficientPose
<https://github.com/daniegr/EfficientPose>
<https://github.com/PINTO0309/PINTO_model_zoo/tree/main/084_EfficientPose>
## WiP: Embedding
- Implement offsetRaw
full with and without rotation
full with and without embedding
full with any without mesh
embedding with and without mesh
boxRaw and meshRaw with and without mesh

View File

@ -121,9 +121,9 @@ export default {
},
embedding: {
enabled: false, // to improve accuracy of face embedding extraction it is recommended
// to enable detector.rotation and mesh.enabled
modelPath: '../models/mobilefacenet.json',
enabled: false, // to improve accuracy of face embedding extraction it is
// highly recommended to enable detector.rotation and mesh.enabled
modelPath: '../models/mobileface.json',
},
},

View File

@ -84,11 +84,22 @@ let original;
async function calcSimmilariry(result) {
document.getElementById('compare-container').style.display = human.config.face.embedding.enabled ? 'block' : 'none';
if (!human.config.face.embedding.enabled) return;
if (!(result?.face?.length > 0) || (result?.face[0]?.embedding?.length !== 192)) return;
if (!(result?.face?.length > 0) || (result?.face[0]?.embedding?.length !== 256)) return;
if (!original) {
original = result;
if (result.face[0].tensor) {
const enhanced = human.enhance(result.face[0]);
if (enhanced) {
const c = document.getElementById('orig');
const squeeze = enhanced.squeeze();
human.tf.browser.toPixels(squeeze, c);
enhanced.dispose();
squeeze.dispose();
}
} else {
document.getElementById('compare-canvas').getContext('2d').drawImage(original.canvas, 0, 0, 200, 200);
}
}
const simmilarity = human.simmilarity(original?.face[0]?.embedding, result?.face[0]?.embedding);
document.getElementById('simmilarity').innerText = `simmilarity: ${Math.trunc(1000 * simmilarity) / 10}%`;
}

View File

@ -26,9 +26,9 @@
<body>
<br>Sample Images:
<div id="images"></div>
<br>Selected Face<br>
<br>Selected Face (Enhanced)<br>
<canvas id="orig" style="width: 200px; height: 200px;"></canvas>
<br>Extracted Faces - click on a face to sort by simmilarity:<br>
<br><br>Extracted Faces - click on a face to sort by simmilarity:<br>
<div id="faces"></div>
</body>
</html>

View File

@ -11,7 +11,7 @@ const userConfig = {
enabled: true,
detector: { rotation: true, return: true },
mesh: { enabled: true },
embedding: { enabled: true, modelPath: '../models/mobilefacenet.json' },
embedding: { enabled: true },
iris: { enabled: false },
age: { enabled: false },
gender: { enabled: false },
@ -21,12 +21,15 @@ const userConfig = {
gesture: { enabled: false },
body: { enabled: false },
};
const human = new Human(userConfig);
const human = new Human(userConfig); // new instance of human
const samples = ['../assets/sample-me.jpg', '../assets/sample6.jpg', '../assets/sample1.jpg', '../assets/sample4.jpg', '../assets/sample5.jpg', '../assets/sample3.jpg', '../assets/sample2.jpg'];
// const samples = ['../assets/sample-me.jpg', '../assets/sample6.jpg', '../assets/sample1.jpg', '../assets/sample4.jpg', '../assets/sample5.jpg', '../assets/sample3.jpg', '../assets/sample2.jpg',
// '../private/me (1).jpg', '../private/me (2).jpg', '../private/me (3).jpg', '../private/me (4).jpg', '../private/me (5).jpg', '../private/me (6).jpg', '../private/me (7).jpg', '../private/me (8).jpg',
// '../private/me (9).jpg', '../private/me (10).jpg', '../private/me (11).jpg', '../private/me (12).jpg', '../private/me (13).jpg'];
const all = [];
const all = []; // array that will hold all detected faces
function log(...msg) {
const dt = new Date();
@ -38,14 +41,24 @@ function log(...msg) {
async function analyze(face) {
log('Face:', face);
const box = [[0.05, 0.15, 0.90, 0.85]]; // top, left, bottom, right
const crop = human.tf.image.cropAndResize(face.tensor.expandDims(0), box, [0], [200, 200]); // optionally do a tight box crop
// if we have face image tensor, enhance it and display it
if (face.tensor) {
const enhanced = human.enhance(face);
if (enhanced) {
const c = document.getElementById('orig');
human.tf.browser.toPixels(crop.squeeze(), c);
const squeeze = enhanced.squeeze();
human.tf.browser.toPixels(squeeze, c);
enhanced.dispose();
squeeze.dispose();
}
}
// loop through all canvases that contain faces
const canvases = document.getElementsByClassName('face');
for (const canvas of canvases) {
// calculate simmilarity from selected face to current one in the loop
const res = human.simmilarity(face.embedding, all[canvas.tag.sample][canvas.tag.face].embedding);
// draw the canvas and simmilarity score
canvas.title = res;
await human.tf.browser.toPixels(all[canvas.tag.sample][canvas.tag.face].tensor, canvas);
const ctx = canvas.getContext('2d');
@ -55,6 +68,8 @@ async function analyze(face) {
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
ctx.fillText(`${(100 * res).toFixed(1)}%`, 4, 20);
}
// sort all faces by simmilarity
const sorted = document.getElementById('faces');
[...sorted.children]
.sort((a, b) => parseFloat(b.title) - parseFloat(a.title))
@ -70,22 +85,26 @@ async function faces(index, res) {
canvas.width = 200;
canvas.height = 200;
canvas.className = 'face';
// mouse click on any face canvas triggers analysis
canvas.addEventListener('click', (evt) => {
log('Select:', 'Image:', evt.target.tag.sample, 'Face:', evt.target.tag.face);
analyze(all[evt.target.tag.sample][evt.target.tag.face]);
});
// if we actually got face image tensor, draw canvas with that face
if (res.face[i].tensor) {
human.tf.browser.toPixels(res.face[i].tensor, canvas);
document.getElementById('faces').appendChild(canvas);
}
}
}
async function add(index) {
log('Add image:', samples[index]);
return new Promise((resolve) => {
const img = new Image(100, 100);
img.onload = () => {
human.detect(img).then((res) => faces(index, res));
document.getElementById('images').appendChild(img);
img.onload = () => { // must wait until image is loaded
human.detect(img).then((res) => faces(index, res)); // then wait until image is analyzed
document.getElementById('images').appendChild(img); // and finally we can add it
resolve(true);
};
img.title = samples[index];
@ -95,7 +114,7 @@ async function add(index) {
async function main() {
await human.load();
for (const i in samples) await add(i);
for (const i in samples) await add(i); // download and analyze all images
log('Ready');
}

View File

@ -1,8 +1,10 @@
const log = require('@vladmandic/pilogger');
const fs = require('fs');
const process = require('process');
// for Node, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human
// for NodeJS, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human
const tf = require('@tensorflow/tfjs-node'); // or const tf = require('@tensorflow/tfjs-node-gpu');
// load specific version of Human library that matches TensorFlow mode
const Human = require('../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
@ -15,15 +17,16 @@ const myConfig = {
async: false,
face: {
enabled: true,
detector: { modelPath: 'file://models/blazeface-back.json', enabled: true },
detector: { modelPath: 'file://models/blazeface-back.json', enabled: true, rotation: false },
mesh: { modelPath: 'file://models/facemesh.json', enabled: true },
iris: { modelPath: 'file://models/iris.json', enabled: true },
age: { modelPath: 'file://models/age.json', enabled: true },
gender: { modelPath: 'file://models/gender.json', enabled: true },
emotion: { modelPath: 'file://models/emotion.json', enabled: true },
embedding: { modelPath: 'file://models/mobileface.json', enabled: true },
},
// body: { modelPath: 'file://models/blazepose.json', modelType: 'blazepose', enabled: true },
body: { modelPath: 'file://models/posenet.json', modelType: 'posenet', enabled: true },
// body: { modelPath: 'file://models/blazepose.json', enabled: true },
body: { modelPath: 'file://models/posenet.json', enabled: true },
hand: {
enabled: true,
detector: { modelPath: 'file://models/handdetect.json' },

View File

@ -1,6 +1,6 @@
{
"name": "@vladmandic/human",
"version": "1.0.4",
"version": "1.1.0",
"description": "Human: AI-powered 3D Face Detection, Face Embedding & Recognition, Body Pose Tracking, Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction & Gesture Recognition",
"sideEffects": false,
"main": "dist/human.node.js",
@ -54,13 +54,13 @@
"@tensorflow/tfjs-layers": "^3.3.0",
"@tensorflow/tfjs-node": "^3.3.0",
"@tensorflow/tfjs-node-gpu": "^3.3.0",
"@types/node": "^14.14.33",
"@types/node": "^14.14.34",
"@typescript-eslint/eslint-plugin": "^4.17.0",
"@typescript-eslint/parser": "^4.17.0",
"@vladmandic/pilogger": "^0.2.14",
"chokidar": "^3.5.1",
"dayjs": "^1.10.4",
"esbuild": "^0.9.0",
"esbuild": "=0.9.0",
"eslint": "^7.21.0",
"eslint-config-airbnb-base": "^14.2.1",
"eslint-plugin-import": "^2.22.1",

View File

@ -151,6 +151,11 @@ class Human {
return 0;
}
enhance(input: any): any {
if (this.config.face.embedding.enabled) return embedding.enhance(input);
return null;
}
// preload models, not explicitly required as it's done automatically on first use
async load(userConfig = null) {
this.state = 'load';
@ -359,11 +364,11 @@ class Human {
// run emotion, inherits face from blazeface
this.#analyze('Start Embedding:');
if (this.config.async) {
embeddingRes = this.config.face.embedding.enabled ? embedding.predict(face.image, this.config) : [];
embeddingRes = this.config.face.embedding.enabled ? embedding.predict(face, this.config) : [];
} else {
this.state = 'run:embedding';
timeStamp = now();
embeddingRes = this.config.face.embedding.enabled ? await embedding.predict(face.image, this.config) : [];
embeddingRes = this.config.face.embedding.enabled ? await embedding.predict(face, this.config) : [];
this.#perf.embedding = Math.trunc(now() - timeStamp);
}
this.#analyze('End Emotion:');
@ -388,6 +393,8 @@ class Human {
// combine results
faceRes.push({
...face,
/*
confidence: face.confidence,
faceConfidence: face.faceConfidence,
boxConfidence: face.boxConfidence,
@ -395,7 +402,9 @@ class Human {
mesh: face.mesh,
boxRaw: face.boxRaw,
meshRaw: face.meshRaw,
offsetRaw: face.offsetRaw,
annotations: face.annotations,
*/
age: ageRes.age,
gender: genderRes.gender,
genderConfidence: genderRes.confidence,