updated face description

pull/280/head
Vladimir Mandic 2021-03-24 11:08:49 -04:00
parent f296ed1427
commit cece9f71fc
8 changed files with 1423 additions and 59 deletions

File diff suppressed because it is too large Load Diff

View File

@ -6,7 +6,7 @@
# Human Library # Human Library
**3D Face Detection & Rotation Tracking, Face Embedding & Recognition,** **3D Face Detection & Rotation Tracking, Face Description & Recognition,**
**Body Pose Tracking, 3D Hand & Finger Tracking,** **Body Pose Tracking, 3D Hand & Finger Tracking,**
**Iris Analysis, Age & Gender & Emotion Prediction,** **Iris Analysis, Age & Gender & Emotion Prediction,**
**Gesture Recognition** **Gesture Recognition**
@ -46,8 +46,9 @@ Check out [**Live Demo**](https://vladmandic.github.io/human/demo/index.html) fo
- [**Usage & Functions**](https://github.com/vladmandic/human/wiki/Usage) - [**Usage & Functions**](https://github.com/vladmandic/human/wiki/Usage)
- [**Configuration Details**](https://github.com/vladmandic/human/wiki/Configuration) - [**Configuration Details**](https://github.com/vladmandic/human/wiki/Configuration)
- [**Output Details**](https://github.com/vladmandic/human/wiki/Outputs) - [**Output Details**](https://github.com/vladmandic/human/wiki/Outputs)
- [**Face Recognition & Face Embedding**](https://github.com/vladmandic/human/wiki/Embedding) - [**Face Recognition & Face Description**](https://github.com/vladmandic/human/wiki/Embedding)
- [**Gesture Recognition**](https://github.com/vladmandic/human/wiki/Gesture) - [**Gesture Recognition**](https://github.com/vladmandic/human/wiki/Gesture)
- [**Common Issues**](https://github.com/vladmandic/human/wiki/Issues)
<br> <br>
@ -144,12 +145,10 @@ Default models in Human library are:
- **Face Detection**: MediaPipe BlazeFace-Back - **Face Detection**: MediaPipe BlazeFace-Back
- **Face Mesh**: MediaPipe FaceMesh - **Face Mesh**: MediaPipe FaceMesh
- **Face Description**: HSE FaceRes
- **Face Iris Analysis**: MediaPipe Iris - **Face Iris Analysis**: MediaPipe Iris
- **Emotion Detection**: Oarriaga Emotion - **Emotion Detection**: Oarriaga Emotion
- **Gender Detection**: Oarriaga Gender
- **Age Detection**: SSR-Net Age IMDB
- **Body Analysis**: PoseNet - **Body Analysis**: PoseNet
- **Face Embedding**: BecauseofAI MobileFace Embedding
Note that alternative models are provided and can be enabled via configuration Note that alternative models are provided and can be enabled via configuration
For example, `PoseNet` model can be switched for `BlazePose` model depending on the use case For example, `PoseNet` model can be switched for `BlazePose` model depending on the use case

View File

@ -13,7 +13,7 @@
<link rel="manifest" href="./manifest.webmanifest"> <link rel="manifest" href="./manifest.webmanifest">
<link rel="shortcut icon" href="../favicon.ico" type="image/x-icon"> <link rel="shortcut icon" href="../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../assets/icon.png"> <link rel="apple-touch-icon" href="../assets/icon.png">
<script src="./embedding.js" type="module"></script> <script src="./facematch.js" type="module"></script>
<style> <style>
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../assets/lato-light.woff2') } @font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../assets/lato-light.woff2') }
html { font-family: 'Lato', 'Segoe UI'; font-size: 24px; font-variant: small-caps; } html { font-family: 'Lato', 'Segoe UI'; font-size: 24px; font-variant: small-caps; }
@ -27,12 +27,12 @@
<div style="display: flex"> <div style="display: flex">
<div> <div>
Selected Face<br> Selected Face<br>
<canvas id="orig" style="width: 200px; height: 200px;"></canvas> <canvas id="orig" style="width: 200px; height: 200px; padding: 20px"></canvas>
</div> </div>
<div style="width: 20px"></div> <div style="width: 20px"></div>
<div> <div>
Sample Images<br> Sample Images<br>
<div id="images"></div> <div id="images" style="display: flex; flex-wrap: wrap; width: 85vw"></div>
</div> </div>
</span> </span>
<span id="desc" style="visibility: hidden; font-size: 0.4rem;"></span><br> <span id="desc" style="visibility: hidden; font-size: 0.4rem;"></span><br>

View File

@ -30,6 +30,9 @@ const human = new Human(userConfig); // new instance of human
const all = []; // array that will hold all detected faces const all = []; // array that will hold all detected faces
let db = []; // array that holds all known faces let db = []; // array that holds all known faces
const minScore = 0.6;
const minConfidence = 0.8;
function log(...msg) { function log(...msg) {
const dt = new Date(); const dt = new Date();
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`; const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
@ -37,13 +40,30 @@ function log(...msg) {
console.log(ts, ...msg); console.log(ts, ...msg);
} }
async function getFaceDB() {
// download db with known faces
try {
const res = await fetch('/demo/faces.json');
db = (res && res.ok) ? await res.json() : [];
for (const rec of db) {
rec.embedding = rec.embedding.map((a) => parseFloat(a.toFixed(4)));
}
} catch (err) {
log('Could not load faces database', err);
}
}
async function analyze(face) { async function analyze(face) {
// refresh faces database
await getFaceDB();
// if we have face image tensor, enhance it and display it // if we have face image tensor, enhance it and display it
if (face.tensor) { if (face.tensor) {
const enhanced = human.enhance(face); const enhanced = human.enhance(face);
// const desc = document.getElementById('desc'); const desc = document.getElementById('desc');
// desc.innerText = `{"name":"unknown", "source":"${face.fileName}", "embedding":[${face.embedding}]},`; desc.innerText = `{"name":"unknown", "source":"${face.fileName}", "embedding":[${face.embedding}]},`;
navigator.clipboard.writeText(`{"name":"unknown", "source":"${face.fileName}", "embedding":[${face.embedding}]},`); const embedding = face.embedding.map((a) => parseFloat(a.toFixed(4)));
navigator.clipboard.writeText(`{"name":"unknown", "source":"${face.fileName}", "embedding":[${embedding}]},`);
if (enhanced) { if (enhanced) {
const c = document.getElementById('orig'); const c = document.getElementById('orig');
const squeeze = enhanced.squeeze().div(255); const squeeze = enhanced.squeeze().div(255);
@ -73,11 +93,11 @@ async function analyze(face) {
ctx.fillStyle = 'rgba(255, 255, 255, 1)'; ctx.fillStyle = 'rgba(255, 255, 255, 1)';
ctx.fillText(`${(100 * similarity).toFixed(1)}%`, 4, 24); ctx.fillText(`${(100 * similarity).toFixed(1)}%`, 4, 24);
ctx.font = 'small-caps 0.8rem "Lato"'; ctx.font = 'small-caps 0.8rem "Lato"';
ctx.fillText(`${current.age}y ${(100 * current.genderConfidence).toFixed(1)}% ${current.gender}`, 4, canvas.height - 6); ctx.fillText(`${current.age}y ${(100 * (current.genderConfidence || 0)).toFixed(1)}% ${current.gender}`, 4, canvas.height - 6);
// identify person // identify person
// ctx.font = 'small-caps 1rem "Lato"'; ctx.font = 'small-caps 1rem "Lato"';
// const person = await human.match(current.embedding, db); const person = await human.match(current.embedding, db);
// if (person.similarity) ctx.fillText(`${(100 * person.similarity).toFixed(1)}% ${person.name}`, 4, canvas.height - 30); if (person.similarity && person.similarity > minScore && current.confidence > minConfidence) ctx.fillText(`${(100 * person.similarity).toFixed(1)}% ${person.name}`, 4, canvas.height - 30);
} }
// sort all faces by similarity // sort all faces by similarity
@ -90,7 +110,6 @@ async function analyze(face) {
async function faces(index, res, fileName) { async function faces(index, res, fileName) {
all[index] = res.face; all[index] = res.face;
for (const i in res.face) { for (const i in res.face) {
// log(res.face[i]);
all[index][i].fileName = fileName; all[index][i].fileName = fileName;
const canvas = document.createElement('canvas'); const canvas = document.createElement('canvas');
canvas.tag = { sample: index, face: i }; canvas.tag = { sample: index, face: i };
@ -109,10 +128,10 @@ async function faces(index, res, fileName) {
const ctx = canvas.getContext('2d'); const ctx = canvas.getContext('2d');
ctx.font = 'small-caps 0.8rem "Lato"'; ctx.font = 'small-caps 0.8rem "Lato"';
ctx.fillStyle = 'rgba(255, 255, 255, 1)'; ctx.fillStyle = 'rgba(255, 255, 255, 1)';
ctx.fillText(`${res.face[i].age}y ${(100 * res.face[i].genderConfidence).toFixed(1)}% ${res.face[i].gender}`, 4, canvas.height - 6); ctx.fillText(`${res.face[i].age}y ${(100 * (res.face[i].genderConfidence || 0)).toFixed(1)}% ${res.face[i].gender}`, 4, canvas.height - 6);
// const person = await human.match(res.face[i].embedding, db); const person = await human.match(res.face[i].embedding, db);
// ctx.font = 'small-caps 1rem "Lato"'; ctx.font = 'small-caps 1rem "Lato"';
// if (person.similarity && person.similarity > 0.60) ctx.fillText(`${(100 * person.similarity).toFixed(1)}% ${person.name}`, 4, canvas.height - 30); if (person.similarity && person.similarity > minScore && res.face[i].confidence > minConfidence) ctx.fillText(`${(100 * person.similarity).toFixed(1)}% ${person.name}`, 4, canvas.height - 30);
} }
} }
} }
@ -145,24 +164,23 @@ async function main() {
// pre-load human models // pre-load human models
await human.load(); await human.load();
// download db with known faces let res;
let res = await fetch('/demo/faces.json'); let images = [];
db = (res && res.ok) ? await res.json() : []; let dir = [];
await getFaceDB();
// enumerate all sample images in /assets // enumerate all sample images in /assets
res = await fetch('/assets'); res = await fetch('/assets');
let dir = (res && res.ok) ? await res.json() : []; dir = (res && res.ok) ? await res.json() : [];
let images = dir.filter((img) => (img.endsWith('.jpg') && img.includes('sample'))); images = images.concat(dir.filter((img) => (img.endsWith('.jpg') && img.includes('sample'))));
// enumerate additional private test images in /private, not includded in git repository // enumerate additional private test images in /private, not includded in git repository
res = await fetch('/private/me'); res = await fetch('/private/me');
dir = (res && res.ok) ? await res.json() : []; dir = (res && res.ok) ? await res.json() : [];
images = images.concat(dir.filter((img) => (img.endsWith('.jpg')))); images = images.concat(dir.filter((img) => (img.endsWith('.jpg'))));
// enumerate just possible error images, not includded in git repository // enumerate additional error images, not includded in git repository
// res = await fetch('/private/err'); res = await fetch('/private/err');
// dir = (res && res.ok) ? await res.json() : []; dir = (res && res.ok) ? await res.json() : [];
// images = dir.filter((img) => (img.endsWith('.jpg'))); images = images.concat(dir.filter((img) => (img.endsWith('.jpg'))));
// download and analyze all images // download and analyze all images
log('Enumerated:', images.length, 'images'); log('Enumerated:', images.length, 'images');

File diff suppressed because one or more lines are too long

View File

@ -1,7 +1,7 @@
{ {
"name": "@vladmandic/human", "name": "@vladmandic/human",
"version": "1.2.4", "version": "1.2.4",
"description": "Human: AI-powered 3D Face Detection, Face Embedding & Recognition, Body Pose Tracking, Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction & Gesture Recognition", "description": "Human: AI-powered 3D Face Detection, Face Description & Recognition, Body Pose Tracking, Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction & Gesture Recognition",
"sideEffects": false, "sideEffects": false,
"main": "dist/human.node.js", "main": "dist/human.node.js",
"module": "dist/human.esm.js", "module": "dist/human.esm.js",

View File

@ -218,7 +218,7 @@ const config: Config = {
}, },
description: { description: {
enabled: true, // to improve accuracy of face embedding extraction it is enabled: true, // to improve accuracy of face description extraction it is
// recommended to enable detector.rotation and mesh.enabled // recommended to enable detector.rotation and mesh.enabled
modelPath: '../models/faceres.json', modelPath: '../models/faceres.json',
skipFrames: 31, // how many frames to go without re-running the detector skipFrames: 31, // how many frames to go without re-running the detector

2
wiki

@ -1 +1 @@
Subproject commit ad7e00dab159a3730e1558e6e134a5727990babd Subproject commit 11e68676b2bf9aadd7ff1e2cc80dd4a70052f9d3