mirror of https://github.com/vladmandic/human
new module: face description
parent
79a434b384
commit
64adb7ebd8
|
@ -1,6 +1,6 @@
|
|||
# @vladmandic/human
|
||||
|
||||
Version: **1.1.10**
|
||||
Version: **1.2.0**
|
||||
Description: **Human: AI-powered 3D Face Detection, Face Embedding & Recognition, Body Pose Tracking, Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction & Gesture Recognition**
|
||||
|
||||
Author: **Vladimir Mandic <mandic00@live.com>**
|
||||
|
@ -9,6 +9,10 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
|
|||
|
||||
## Changelog
|
||||
|
||||
### **1.1.11** 2021/03/21 mandic00@live.com
|
||||
|
||||
- refactor face classes
|
||||
|
||||
### **1.1.10** 2021/03/18 mandic00@live.com
|
||||
|
||||
- cleanup
|
||||
|
|
|
@ -102,8 +102,8 @@ async function calcSimmilariry(result) {
|
|||
document.getElementById('compare-canvas').getContext('2d').drawImage(original.canvas, 0, 0, 200, 200);
|
||||
}
|
||||
}
|
||||
const simmilarity = human.simmilarity(original?.face[0]?.embedding, result?.face[0]?.embedding);
|
||||
document.getElementById('simmilarity').innerText = `simmilarity: ${Math.trunc(1000 * simmilarity) / 10}%`;
|
||||
const similarity = human.similarity(original?.face[0]?.embedding, result?.face[0]?.embedding);
|
||||
document.getElementById('similarity').innerText = `similarity: ${Math.trunc(1000 * similarity) / 10}%`;
|
||||
}
|
||||
|
||||
// draws processed results and starts processing of a next frame
|
||||
|
|
|
@ -27,7 +27,8 @@
|
|||
<div id="images"></div>
|
||||
<br>Selected Face (Enhanced):<br>
|
||||
<canvas id="orig" style="width: 200px; height: 200px;"></canvas>
|
||||
<br><br>Extracted Faces - click on a face to sort by simmilarity and get a known face match:<br>
|
||||
<span id="desc" style="visibility: hidden; font-size: 0.4rem;"></span>
|
||||
<br><br>Extracted Faces - click on a face to sort by similarity and get a known face match:<br>
|
||||
<div id="faces"></div>
|
||||
</body>
|
||||
</html>
|
||||
|
|
|
@ -12,9 +12,10 @@ const userConfig = {
|
|||
mesh: { enabled: true },
|
||||
embedding: { enabled: true },
|
||||
iris: { enabled: false },
|
||||
age: { enabled: true },
|
||||
gender: { enabled: true },
|
||||
age: { enabled: false },
|
||||
gender: { enabled: false },
|
||||
emotion: { enabled: false },
|
||||
description: { enabled: true },
|
||||
},
|
||||
hand: { enabled: false },
|
||||
gesture: { enabled: false },
|
||||
|
@ -40,41 +41,52 @@ async function analyze(face) {
|
|||
// if we have face image tensor, enhance it and display it
|
||||
if (face.tensor) {
|
||||
const enhanced = human.enhance(face);
|
||||
// const desc = document.getElementById('desc');
|
||||
// desc.innerText = `{"name":"unknown", "source":"${face.fileName}", "embedding":[${face.embedding}]},`;
|
||||
navigator.clipboard.writeText(`{"name":"unknown", "source":"${face.fileName}", "embedding":[${face.embedding}]},`);
|
||||
if (enhanced) {
|
||||
const c = document.getElementById('orig');
|
||||
const squeeze = enhanced.squeeze();
|
||||
human.tf.browser.toPixels(squeeze, c);
|
||||
const squeeze = enhanced.squeeze().div(255);
|
||||
await human.tf.browser.toPixels(squeeze, c);
|
||||
enhanced.dispose();
|
||||
squeeze.dispose();
|
||||
const ctx = c.getContext('2d');
|
||||
ctx.font = 'small-caps 0.4rem "Lato"';
|
||||
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
|
||||
}
|
||||
}
|
||||
|
||||
// loop through all canvases that contain faces
|
||||
const canvases = document.getElementsByClassName('face');
|
||||
for (const canvas of canvases) {
|
||||
// calculate simmilarity from selected face to current one in the loop
|
||||
const simmilarity = human.simmilarity(face.embedding, all[canvas.tag.sample][canvas.tag.face].embedding, 2);
|
||||
// calculate similarity from selected face to current one in the loop
|
||||
const current = all[canvas.tag.sample][canvas.tag.face];
|
||||
const similarity = human.similarity(face.embedding, current.embedding, 2);
|
||||
// get best match
|
||||
const person = (simmilarity > 0.99) ? await human.match(face.embedding, db) : { name: '' };
|
||||
// draw the canvas and simmilarity score
|
||||
canvas.title = simmilarity;
|
||||
await human.tf.browser.toPixels(all[canvas.tag.sample][canvas.tag.face].tensor, canvas);
|
||||
const person = await human.match(current.embedding, db);
|
||||
// draw the canvas and similarity score
|
||||
canvas.title = similarity;
|
||||
await human.tf.browser.toPixels(current.tensor, canvas);
|
||||
const ctx = canvas.getContext('2d');
|
||||
ctx.font = 'small-caps 1rem "Lato"';
|
||||
ctx.fillStyle = 'rgba(0, 0, 0, 1)';
|
||||
ctx.fillText(`${(100 * simmilarity).toFixed(1)}% ${person.name}`, 3, 23);
|
||||
ctx.fillText(`${(100 * similarity).toFixed(1)}%`, 3, 23);
|
||||
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
|
||||
ctx.fillText(`${(100 * simmilarity).toFixed(1)}% ${person.name}`, 4, 24);
|
||||
ctx.fillText(`${(100 * similarity).toFixed(1)}%`, 4, 24);
|
||||
ctx.font = 'small-caps 0.8rem "Lato"';
|
||||
ctx.fillText(`${current.age}y ${(100 * current.genderConfidence).toFixed(1)}% ${current.gender}`, 4, canvas.height - 6);
|
||||
ctx.font = 'small-caps 1rem "Lato"';
|
||||
if (person.similarity) ctx.fillText(`${(100 * person.similarity).toFixed(1)}% ${person.name}`, 4, canvas.height - 30);
|
||||
}
|
||||
|
||||
// sort all faces by simmilarity
|
||||
// sort all faces by similarity
|
||||
const sorted = document.getElementById('faces');
|
||||
[...sorted.children]
|
||||
.sort((a, b) => parseFloat(b.title) - parseFloat(a.title))
|
||||
.forEach((canvas) => sorted.appendChild(canvas));
|
||||
}
|
||||
|
||||
function faces(index, res, fileName) {
|
||||
async function faces(index, res, fileName) {
|
||||
all[index] = res.face;
|
||||
for (const i in res.face) {
|
||||
// log(res.face[i]);
|
||||
|
@ -87,13 +99,19 @@ function faces(index, res, fileName) {
|
|||
// mouse click on any face canvas triggers analysis
|
||||
canvas.addEventListener('click', (evt) => {
|
||||
log('Select:', 'Image:', evt.target.tag.sample, 'Face:', evt.target.tag.face, all[evt.target.tag.sample][evt.target.tag.face]);
|
||||
log('Select:', 'Gender:', all[evt.target.tag.sample][evt.target.tag.face].gender);
|
||||
analyze(all[evt.target.tag.sample][evt.target.tag.face]);
|
||||
});
|
||||
// if we actually got face image tensor, draw canvas with that face
|
||||
if (res.face[i].tensor) {
|
||||
human.tf.browser.toPixels(res.face[i].tensor, canvas);
|
||||
await human.tf.browser.toPixels(res.face[i].tensor, canvas);
|
||||
document.getElementById('faces').appendChild(canvas);
|
||||
const ctx = canvas.getContext('2d');
|
||||
ctx.font = 'small-caps 0.8rem "Lato"';
|
||||
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
|
||||
ctx.fillText(`${res.face[i].age}y ${(100 * res.face[i].genderConfidence).toFixed(1)}% ${res.face[i].gender}`, 4, canvas.height - 6);
|
||||
const person = await human.match(res.face[i].embedding, db);
|
||||
ctx.font = 'small-caps 1rem "Lato"';
|
||||
if (person.similarity && person.similarity > 0.60) ctx.fillText(`${(100 * person.similarity).toFixed(1)}% ${person.name}`, 4, canvas.height - 30);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -102,8 +120,8 @@ async function process(index, image) {
|
|||
return new Promise((resolve) => {
|
||||
const img = new Image(128, 128);
|
||||
img.onload = () => { // must wait until image is loaded
|
||||
human.detect(img).then((res) => {
|
||||
faces(index, res, image); // then wait until image is analyzed
|
||||
human.detect(img).then(async (res) => {
|
||||
await faces(index, res, image); // then wait until image is analyzed
|
||||
log('Add image:', index + 1, image, 'faces:', res.face.length);
|
||||
document.getElementById('images').appendChild(img); // and finally we can add it
|
||||
resolve(true);
|
||||
|
@ -136,10 +154,14 @@ async function main() {
|
|||
let images = dir.filter((img) => (img.endsWith('.jpg') && img.includes('sample')));
|
||||
|
||||
// enumerate additional private test images in /private, not includded in git repository
|
||||
res = await fetch('/private/err');
|
||||
res = await fetch('/private/me');
|
||||
dir = (res && res.ok) ? await res.json() : [];
|
||||
// images = images.concat(dir.filter((img) => (img.endsWith('.jpg'))));
|
||||
images = dir.filter((img) => (img.endsWith('.jpg')));
|
||||
images = images.concat(dir.filter((img) => (img.endsWith('.jpg'))));
|
||||
|
||||
// enumerate just possible error images, not includded in git repository
|
||||
// res = await fetch('/private/err');
|
||||
// dir = (res && res.ok) ? await res.json() : [];
|
||||
// images = dir.filter((img) => (img.endsWith('.jpg')));
|
||||
|
||||
// download and analyze all images
|
||||
log('Enumerated:', images.length, 'images');
|
||||
|
|
124
demo/faces.json
124
demo/faces.json
File diff suppressed because one or more lines are too long
|
@ -93,7 +93,7 @@
|
|||
</div>
|
||||
<div id="compare-container" style="display: none" class="compare-image">
|
||||
<canvas id="compare-canvas" width="200px" height="200px"></canvas>
|
||||
<div id="simmilarity"></div>
|
||||
<div id="similarity"></div>
|
||||
</div>
|
||||
<div id="samples-container" class="samples-container"></div>
|
||||
<div id="log" class="log"></div>
|
||||
|
|
Binary file not shown.
File diff suppressed because one or more lines are too long
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "@vladmandic/human",
|
||||
"version": "1.1.11",
|
||||
"version": "1.2.0",
|
||||
"description": "Human: AI-powered 3D Face Detection, Face Embedding & Recognition, Body Pose Tracking, Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction & Gesture Recognition",
|
||||
"sideEffects": false,
|
||||
"main": "dist/human.node.js",
|
||||
|
|
|
@ -59,6 +59,11 @@ export interface Config {
|
|||
enabled: boolean,
|
||||
modelPath: string,
|
||||
},
|
||||
description: {
|
||||
enabled: boolean,
|
||||
modelPath: string,
|
||||
skipFrames: number,
|
||||
},
|
||||
age: {
|
||||
enabled: boolean,
|
||||
modelPath: string,
|
||||
|
@ -212,21 +217,14 @@ const config: Config = {
|
|||
modelPath: '../models/iris.json',
|
||||
},
|
||||
|
||||
age: {
|
||||
enabled: true,
|
||||
modelPath: '../models/age.json',
|
||||
description: {
|
||||
enabled: true, // to improve accuracy of face embedding extraction it is
|
||||
// recommended to enable detector.rotation and mesh.enabled
|
||||
modelPath: '../models/faceres.json',
|
||||
skipFrames: 31, // how many frames to go without re-running the detector
|
||||
// only used for video inputs
|
||||
},
|
||||
|
||||
gender: {
|
||||
enabled: true,
|
||||
minConfidence: 0.1, // threshold for discarding a prediction
|
||||
modelPath: '../models/gender.json',
|
||||
skipFrames: 32, // how many frames to go without re-running the detector
|
||||
// only used for video inputs
|
||||
},
|
||||
|
||||
emotion: {
|
||||
enabled: true,
|
||||
minConfidence: 0.1, // threshold for discarding a prediction
|
||||
|
@ -234,9 +232,23 @@ const config: Config = {
|
|||
modelPath: '../models/emotion.json',
|
||||
},
|
||||
|
||||
age: {
|
||||
enabled: false, // obsolete, replaced by description module
|
||||
modelPath: '../models/age.json',
|
||||
skipFrames: 31, // how many frames to go without re-running the detector
|
||||
// only used for video inputs
|
||||
},
|
||||
|
||||
gender: {
|
||||
enabled: false, // obsolete, replaced by description module
|
||||
minConfidence: 0.1, // threshold for discarding a prediction
|
||||
modelPath: '../models/gender.json',
|
||||
skipFrames: 32, // how many frames to go without re-running the detector
|
||||
// only used for video inputs
|
||||
},
|
||||
|
||||
embedding: {
|
||||
enabled: false, // to improve accuracy of face embedding extraction it is
|
||||
// highly recommended to enable detector.rotation and mesh.enabled
|
||||
enabled: false, // obsolete, replaced by description module
|
||||
modelPath: '../models/mobileface.json',
|
||||
},
|
||||
},
|
||||
|
|
|
@ -4,6 +4,7 @@ import * as age from './age/age';
|
|||
import * as gender from './gender/gender';
|
||||
import * as emotion from './emotion/emotion';
|
||||
import * as embedding from './embedding/embedding';
|
||||
import * as faceres from './faceres/faceres';
|
||||
|
||||
type Tensor = typeof tf.Tensor;
|
||||
|
||||
|
@ -33,6 +34,7 @@ export const detectFace = async (parent, input): Promise<any> => {
|
|||
let genderRes;
|
||||
let emotionRes;
|
||||
let embeddingRes;
|
||||
let descRes;
|
||||
const faceRes: Array<{
|
||||
confidence: number,
|
||||
boxConfidence: number,
|
||||
|
@ -111,11 +113,23 @@ export const detectFace = async (parent, input): Promise<any> => {
|
|||
embeddingRes = parent.config.face.embedding.enabled ? await embedding.predict(face, parent.config) : [];
|
||||
parent.perf.embedding = Math.trunc(now() - timeStamp);
|
||||
}
|
||||
parent.analyze('End Emotion:');
|
||||
parent.analyze('End Embedding:');
|
||||
|
||||
// run emotion, inherits face from blazeface
|
||||
parent.analyze('Start Description:');
|
||||
if (parent.config.async) {
|
||||
descRes = parent.config.face.description.enabled ? faceres.predict(face, parent.config) : [];
|
||||
} else {
|
||||
parent.state = 'run:description';
|
||||
timeStamp = now();
|
||||
descRes = parent.config.face.description.enabled ? await faceres.predict(face.image, parent.config) : [];
|
||||
parent.perf.embedding = Math.trunc(now() - timeStamp);
|
||||
}
|
||||
parent.analyze('End Description:');
|
||||
|
||||
// if async wait for results
|
||||
if (parent.config.async) {
|
||||
[ageRes, genderRes, emotionRes, embeddingRes] = await Promise.all([ageRes, genderRes, emotionRes, embeddingRes]);
|
||||
[ageRes, genderRes, emotionRes, embeddingRes, descRes] = await Promise.all([ageRes, genderRes, emotionRes, embeddingRes, descRes]);
|
||||
}
|
||||
|
||||
parent.analyze('Finish Face:');
|
||||
|
@ -134,11 +148,11 @@ export const detectFace = async (parent, input): Promise<any> => {
|
|||
// combine results
|
||||
faceRes.push({
|
||||
...face,
|
||||
age: ageRes.age,
|
||||
gender: genderRes.gender,
|
||||
genderConfidence: genderRes.confidence,
|
||||
age: descRes.age || ageRes.age,
|
||||
gender: descRes.gender || genderRes.gender,
|
||||
genderConfidence: descRes.genderConfidence || genderRes.confidence,
|
||||
embedding: descRes.descriptor || embeddingRes,
|
||||
emotion: emotionRes,
|
||||
embedding: embeddingRes,
|
||||
iris: (irisSize !== 0) ? Math.trunc(irisSize) / 100 : 0,
|
||||
angle,
|
||||
tensor: parent.config.face.detector.return ? face.image?.squeeze() : null,
|
||||
|
|
|
@ -0,0 +1,146 @@
|
|||
import { log } from '../helpers';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import * as profile from '../profile';
|
||||
|
||||
let model;
|
||||
let last = { age: 0 };
|
||||
let skipped = Number.MAX_SAFE_INTEGER;
|
||||
|
||||
type Tensor = typeof tf.Tensor;
|
||||
type DB = Array<{ name: string, source: string, embedding: number[] }>;
|
||||
|
||||
export async function load(config) {
|
||||
if (!model) {
|
||||
model = await tf.loadGraphModel(config.face.description.modelPath);
|
||||
if (config.debug) log(`load model: ${config.face.description.modelPath.match(/\/(.*)\./)[1]}`);
|
||||
}
|
||||
return model;
|
||||
}
|
||||
|
||||
export function similarity(embedding1, embedding2, order = 2): number {
|
||||
if (!embedding1 || !embedding2) return 0;
|
||||
if (embedding1?.length === 0 || embedding2?.length === 0) return 0;
|
||||
if (embedding1?.length !== embedding2?.length) return 0;
|
||||
// general minkowski distance, euclidean distance is limited case where order is 2
|
||||
const distance = 4.0 * embedding1
|
||||
.map((val, i) => (Math.abs(embedding1[i] - embedding2[i]) ** order)) // distance squared
|
||||
.reduce((sum, now) => (sum + now), 0) // sum all distances
|
||||
** (1 / order); // get root of
|
||||
const res = Math.max(0, 100 - distance) / 100.0;
|
||||
return res;
|
||||
}
|
||||
|
||||
export function match(embedding: Array<number>, db: DB, threshold = 0) {
|
||||
let best = { similarity: 0, name: '', source: '', embedding: [] as number[] };
|
||||
if (!embedding || !db || !Array.isArray(embedding) || !Array.isArray(db)) return best;
|
||||
for (const f of db) {
|
||||
if (f.embedding && f.name) {
|
||||
const perc = similarity(embedding, f.embedding);
|
||||
if (perc > threshold && perc > best.similarity) best = { ...f, similarity: perc };
|
||||
}
|
||||
}
|
||||
return best;
|
||||
}
|
||||
|
||||
export function enhance(input): Tensor {
|
||||
const image = tf.tidy(() => {
|
||||
// input received from detector is already normalized to 0..1
|
||||
// input is also assumed to be straightened
|
||||
const tensor = input.image || input.tensor || input;
|
||||
/*
|
||||
// do a tight crop of image and resize it to fit the model
|
||||
const box = [[0.05, 0.15, 0.85, 0.85]]; // empyrical values for top, left, bottom, right
|
||||
if (!(tensor instanceof tf.Tensor)) return null;
|
||||
const crop = (tensor.shape.length === 3)
|
||||
? tf.image.cropAndResize(tf.expandDims(tensor, 0), box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]) // add batch dimension if missing
|
||||
: tf.image.cropAndResize(tensor, box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]);
|
||||
*/
|
||||
const crop = tf.image.resizeBilinear(tensor, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false); // just resize to fit the embedding model
|
||||
|
||||
/*
|
||||
// convert to black&white to avoid colorization impact
|
||||
const rgb = [0.2989, 0.5870, 0.1140]; // factors for red/green/blue colors when converting to grayscale: https://www.mathworks.com/help/matlab/ref/rgb2gray.html
|
||||
const [red, green, blue] = tf.split(crop, 3, 3);
|
||||
const redNorm = tf.mul(red, rgb[0]);
|
||||
const greenNorm = tf.mul(green, rgb[1]);
|
||||
const blueNorm = tf.mul(blue, rgb[2]);
|
||||
const grayscale = tf.addN([redNorm, greenNorm, blueNorm]);
|
||||
const merge = tf.stack([grayscale, grayscale, grayscale], 3).squeeze(4);
|
||||
*/
|
||||
|
||||
/*
|
||||
// optional increase image contrast
|
||||
// or do it per-channel so mean is done on each channel
|
||||
// or do it based on histogram
|
||||
const mean = merge.mean();
|
||||
const factor = 5;
|
||||
const contrast = merge.sub(mean).mul(factor).add(mean);
|
||||
*/
|
||||
/*
|
||||
// normalize brightness from 0..1
|
||||
const darken = crop.sub(crop.min());
|
||||
const lighten = darken.div(darken.max());
|
||||
*/
|
||||
const norm = crop.mul(255);
|
||||
|
||||
return norm;
|
||||
});
|
||||
return image;
|
||||
}
|
||||
|
||||
export async function predict(image, config) {
|
||||
if (!model) return null;
|
||||
if ((skipped < config.face.description.skipFrames) && config.videoOptimized && last.age && (last.age > 0)) {
|
||||
skipped++;
|
||||
return last;
|
||||
}
|
||||
if (config.videoOptimized) skipped = 0;
|
||||
else skipped = Number.MAX_SAFE_INTEGER;
|
||||
return new Promise(async (resolve) => {
|
||||
// const resize = tf.image.resizeBilinear(image, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
|
||||
// const enhanced = tf.mul(resize, [255.0]);
|
||||
// tf.dispose(resize);
|
||||
const enhanced = enhance(image);
|
||||
|
||||
let resT;
|
||||
const obj = {
|
||||
age: <number>0,
|
||||
gender: <string>'unknown',
|
||||
genderConfidence: <number>0,
|
||||
descriptor: <number[]>[] };
|
||||
|
||||
if (!config.profile) {
|
||||
if (config.face.description.enabled) resT = await model.predict(enhanced);
|
||||
} else {
|
||||
const profileAge = config.face.description.enabled ? await tf.profile(() => model.predict(enhanced)) : {};
|
||||
resT = profileAge.result.clone();
|
||||
profileAge.result.dispose();
|
||||
profile.run('age', profileAge);
|
||||
}
|
||||
tf.dispose(enhanced);
|
||||
|
||||
if (resT) {
|
||||
tf.tidy(() => {
|
||||
const gender = resT.find((t) => t.shape[1] === 1).dataSync();
|
||||
const confidence = Math.trunc(200 * Math.abs((gender[0] - 0.5))) / 100;
|
||||
if (confidence > config.face.gender.minConfidence) {
|
||||
obj.gender = gender[0] <= 0.5 ? 'female' : 'male';
|
||||
obj.genderConfidence = Math.min(0.99, confidence);
|
||||
}
|
||||
const age = resT.find((t) => t.shape[1] === 100).argMax(1).dataSync()[0];
|
||||
const all = resT.find((t) => t.shape[1] === 100).dataSync();
|
||||
obj.age = Math.round(all[age - 1] > all[age + 1] ? 10 * age - 100 * all[age - 1] : 10 * age + 100 * all[age + 1]) / 10;
|
||||
|
||||
const desc = resT.find((t) => t.shape[1] === 1024);
|
||||
// const reshape = desc.reshape([128, 8]);
|
||||
// const reduce = reshape.logSumExp(1); // reduce 2nd dimension by calculating logSumExp on it
|
||||
|
||||
obj.descriptor = [...desc.dataSync()];
|
||||
});
|
||||
resT.forEach((t) => tf.dispose(t));
|
||||
}
|
||||
|
||||
last = obj;
|
||||
resolve(obj);
|
||||
});
|
||||
}
|
|
@ -59,6 +59,7 @@ export async function predict(image, config) {
|
|||
enhance.dispose();
|
||||
|
||||
if (genderT) {
|
||||
if (!Array.isArray(genderT)) {
|
||||
const data = genderT.dataSync();
|
||||
if (alternative) {
|
||||
// returns two values 0..1, bigger one is prediction
|
||||
|
@ -74,9 +75,23 @@ export async function predict(image, config) {
|
|||
obj.confidence = Math.min(0.99, confidence);
|
||||
}
|
||||
}
|
||||
}
|
||||
genderT.dispose();
|
||||
|
||||
} else {
|
||||
const gender = genderT[0].dataSync();
|
||||
const confidence = Math.trunc(200 * Math.abs((gender[0] - 0.5))) / 100;
|
||||
if (confidence > config.face.gender.minConfidence) {
|
||||
obj.gender = gender[0] <= 0.5 ? 'female' : 'male';
|
||||
obj.confidence = Math.min(0.99, confidence);
|
||||
}
|
||||
/*
|
||||
let age = genderT[1].argMax(1).dataSync()[0];
|
||||
const all = genderT[1].dataSync();
|
||||
age = Math.round(all[age - 1] > all[age + 1] ? 10 * age - 100 * all[age - 1] : 10 * age + 100 * all[age + 1]) / 10;
|
||||
const descriptor = genderT[1].dataSync();
|
||||
*/
|
||||
genderT.forEach((t) => tf.dispose(t));
|
||||
}
|
||||
}
|
||||
last = obj;
|
||||
resolve(obj);
|
||||
});
|
||||
|
|
19
src/human.ts
19
src/human.ts
|
@ -6,6 +6,7 @@ import * as faceall from './faceall';
|
|||
import * as facemesh from './blazeface/facemesh';
|
||||
import * as age from './age/age';
|
||||
import * as gender from './gender/gender';
|
||||
import * as faceres from './faceres/faceres';
|
||||
import * as emotion from './emotion/emotion';
|
||||
import * as embedding from './embedding/embedding';
|
||||
import * as posenet from './posenet/posenet';
|
||||
|
@ -71,6 +72,7 @@ export class Human {
|
|||
emotion: Model | null,
|
||||
embedding: Model | null,
|
||||
nanodet: Model | null,
|
||||
faceres: Model | null,
|
||||
};
|
||||
classes: {
|
||||
facemesh: typeof facemesh;
|
||||
|
@ -80,6 +82,7 @@ export class Human {
|
|||
body: typeof posenet | typeof blazepose;
|
||||
hand: typeof handpose;
|
||||
nanodet: typeof nanodet;
|
||||
faceres: typeof faceres;
|
||||
};
|
||||
sysinfo: { platform: string, agent: string };
|
||||
perf: any;
|
||||
|
@ -112,6 +115,7 @@ export class Human {
|
|||
emotion: null,
|
||||
embedding: null,
|
||||
nanodet: null,
|
||||
faceres: null,
|
||||
};
|
||||
// export access to image processing
|
||||
// @ts-ignore
|
||||
|
@ -122,6 +126,7 @@ export class Human {
|
|||
age,
|
||||
gender,
|
||||
emotion,
|
||||
faceres,
|
||||
body: this.config.body.modelPath.includes('posenet') ? posenet : blazepose,
|
||||
hand: handpose,
|
||||
nanodet,
|
||||
|
@ -160,19 +165,20 @@ export class Human {
|
|||
return null;
|
||||
}
|
||||
|
||||
simmilarity(embedding1: Array<number>, embedding2: Array<number>): number {
|
||||
if (this.config.face.embedding.enabled) return embedding.simmilarity(embedding1, embedding2);
|
||||
similarity(embedding1: Array<number>, embedding2: Array<number>): number {
|
||||
if (this.config.face.description.enabled) return faceres.similarity(embedding1, embedding2);
|
||||
if (this.config.face.embedding.enabled) return embedding.similarity(embedding1, embedding2);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// eslint-disable-next-line class-methods-use-this
|
||||
enhance(input: Tensor): Tensor | null {
|
||||
return embedding.enhance(input);
|
||||
return faceres.enhance(input);
|
||||
}
|
||||
|
||||
// eslint-disable-next-line class-methods-use-this
|
||||
match(faceEmbedding: Array<number>, db: Array<{ name: string, source: string, embedding: number[] }>, threshold = 0): { name: string, source: string, simmilarity: number, embedding: number[] } {
|
||||
return embedding.match(faceEmbedding, db, threshold);
|
||||
match(faceEmbedding: Array<number>, db: Array<{ name: string, source: string, embedding: number[] }>, threshold = 0): { name: string, source: string, similarity: number, embedding: number[] } {
|
||||
return faceres.match(faceEmbedding, db, threshold);
|
||||
}
|
||||
|
||||
// preload models, not explicitly required as it's done automatically on first use
|
||||
|
@ -204,6 +210,7 @@ export class Human {
|
|||
this.models.posenet,
|
||||
this.models.blazepose,
|
||||
this.models.nanodet,
|
||||
this.models.faceres,
|
||||
] = await Promise.all([
|
||||
this.models.face || (this.config.face.enabled ? facemesh.load(this.config) : null),
|
||||
this.models.age || ((this.config.face.enabled && this.config.face.age.enabled) ? age.load(this.config) : null),
|
||||
|
@ -214,6 +221,7 @@ export class Human {
|
|||
this.models.posenet || (this.config.body.enabled && this.config.body.modelPath.includes('posenet') ? posenet.load(this.config) : null),
|
||||
this.models.posenet || (this.config.body.enabled && this.config.body.modelPath.includes('blazepose') ? blazepose.load(this.config) : null),
|
||||
this.models.nanodet || (this.config.object.enabled ? nanodet.load(this.config) : null),
|
||||
this.models.faceres || ((this.config.face.enabled && this.config.face.description.enabled) ? faceres.load(this.config) : null),
|
||||
]);
|
||||
} else {
|
||||
if (this.config.face.enabled && !this.models.face) this.models.face = await facemesh.load(this.config);
|
||||
|
@ -225,6 +233,7 @@ export class Human {
|
|||
if (this.config.body.enabled && !this.models.posenet && this.config.body.modelPath.includes('posenet')) this.models.posenet = await posenet.load(this.config);
|
||||
if (this.config.body.enabled && !this.models.blazepose && this.config.body.modelPath.includes('blazepose')) this.models.blazepose = await blazepose.load(this.config);
|
||||
if (this.config.object.enabled && !this.models.nanodet) this.models.nanodet = await nanodet.load(this.config);
|
||||
if (this.config.face.enabled && this.config.face.description.enabled && !this.models.faceres) this.models.faceres = await faceres.load(this.config);
|
||||
}
|
||||
|
||||
if (this.#firstRun) {
|
||||
|
|
Loading…
Reference in New Issue