add liveness module and facerecognition demo

pull/280/head
Vladimir Mandic 2021-11-09 14:37:50 -05:00
parent dfd1ee8418
commit 32539a10f5
15 changed files with 206 additions and 69 deletions

View File

@ -9,8 +9,9 @@
## Changelog
### **HEAD -> main** 2021/11/08 mandic00@live.com
### **HEAD -> main** 2021/11/09 mandic00@live.com
- rebuild
- add type defs when working with relative path imports
- disable humangl backend if webgl 1.0 is detected

View File

@ -45,6 +45,7 @@ JavaScript module using TensorFlow/JS Machine Learning library
- [*Live:* **Main Application**](https://vladmandic.github.io/human/demo/index.html)
- [*Live:* **Simple Application**](https://vladmandic.github.io/human/demo/typescript/index.html)
- [*Live:* **Face Extraction, Description, Identification and Matching**](https://vladmandic.github.io/human/demo/facematch/index.html)
- [*Live:* **Face Validation and Matching: FaceID**](https://vladmandic.github.io/human/demo/facerecognition/index.html)
- [*Live:* **Face Extraction and 3D Rendering**](https://vladmandic.github.io/human/demo/face3d/index.html)
- [*Live:* **Multithreaded Detection Showcasing Maximum Performance**](https://vladmandic.github.io/human/demo/multithread/index.html)
- [*Live:* **VR Model with Head, Face, Eye, Body and Hand tracking**](https://vladmandic.github.io/human-vrm/src/human-vrm.html)

View File

@ -38,3 +38,12 @@ MoveNet MultiPose model does not work with WASM backend due to missing F32 broad
<https://github.com/tensorflow/tfjs/issues/5516>
<br><hr><br>
## Pending release notes:
New:
- new demo `demos/facerecognition` that utilizes multiple algorithm
to validte input before triggering face recognition - similar to **FaceID**
- new optional model `liveness`
checks if input appears to be a real-world live image or a recording
best used together with `antispoofing` that checks if input appears to have a realistic face

View File

@ -15,7 +15,8 @@ var humanConfig = {
description: { enabled: true },
iris: { enabled: true },
emotion: { enabled: false },
antispoof: { enabled: true }
antispoof: { enabled: true },
liveness: { enabled: true }
},
body: { enabled: false },
hand: { enabled: false },
@ -23,10 +24,30 @@ var humanConfig = {
gesture: { enabled: true }
};
var options = {
faceDB: "../facematch/faces.json",
minConfidence: 0.6,
minSize: 224,
maxTime: 1e4
maxTime: 1e4,
blinkMin: 10,
blinkMax: 800
};
var ok = {
faceCount: false,
faceConfidence: false,
facingCenter: false,
blinkDetected: false,
faceSize: false,
antispoofCheck: false,
livenessCheck: false,
elapsedMs: 0
};
var allOk = () => ok.faceCount && ok.faceSize && ok.blinkDetected && ok.facingCenter && ok.faceConfidence && ok.antispoofCheck && ok.livenessCheck;
var blink = {
start: 0,
end: 0,
time: 0
};
var db = [];
var human = new Human(humanConfig);
human.env["perfadd"] = false;
human.draw.options.font = 'small-caps 18px "Lato"';
@ -59,11 +80,7 @@ async function webCam() {
await ready;
dom.canvas.width = dom.video.videoWidth;
dom.canvas.height = dom.video.videoHeight;
const track = stream.getVideoTracks()[0];
const capabilities = track.getCapabilities ? track.getCapabilities() : "";
const settings = track.getSettings ? track.getSettings() : "";
const constraints = track.getConstraints ? track.getConstraints() : "";
log("video:", dom.video.videoWidth, dom.video.videoHeight, track.label, { stream, track, settings, constraints, capabilities });
log("video:", dom.video.videoWidth, dom.video.videoHeight, stream.getVideoTracks()[0].label);
dom.canvas.onclick = () => {
if (dom.video.paused)
dom.video.play();
@ -80,18 +97,6 @@ async function detectionLoop() {
requestAnimationFrame(detectionLoop);
}
}
var ok = {
faceCount: false,
faceConfidence: false,
facingCenter: false,
eyesOpen: false,
blinkDetected: false,
faceSize: false,
antispoofCheck: false,
livenessCheck: false,
elapsedMs: 0
};
var allOk = () => ok.faceCount && ok.faceSize && ok.blinkDetected && ok.facingCenter && ok.faceConfidence && ok.antispoofCheck;
async function validationLoop() {
const interpolated = await human.next(human.result);
await human.draw.canvas(dom.video, dom.canvas);
@ -100,14 +105,22 @@ async function validationLoop() {
fps.draw = 1e3 / (now - timestamp.draw);
timestamp.draw = now;
printFPS(`fps: ${fps.detect.toFixed(1).padStart(5, " ")} detect | ${fps.draw.toFixed(1).padStart(5, " ")} draw`);
const gestures = Object.values(human.result.gesture).map((gesture) => gesture.gesture);
ok.faceCount = human.result.face.length === 1;
ok.eyesOpen = ok.eyesOpen || !(gestures.includes("blink left eye") || gestures.includes("blink right eye"));
ok.blinkDetected = ok.eyesOpen && ok.blinkDetected || gestures.includes("blink left eye") || gestures.includes("blink right eye");
ok.facingCenter = gestures.includes("facing center") && gestures.includes("looking center");
ok.faceConfidence = (human.result.face[0].boxScore || 0) > options.minConfidence && (human.result.face[0].faceScore || 0) > options.minConfidence && (human.result.face[0].genderScore || 0) > options.minConfidence;
ok.antispoofCheck = (human.result.face[0].real || 0) > options.minConfidence;
ok.faceSize = human.result.face[0].box[2] >= options.minSize && human.result.face[0].box[3] >= options.minSize;
if (ok.faceCount) {
const gestures = Object.values(human.result.gesture).map((gesture) => gesture.gesture);
if (gestures.includes("blink left eye") || gestures.includes("blink right eye"))
blink.start = human.now();
if (blink.start > 0 && !gestures.includes("blink left eye") && !gestures.includes("blink right eye"))
blink.end = human.now();
ok.blinkDetected = ok.blinkDetected || blink.end - blink.start > options.blinkMin && blink.end - blink.start < options.blinkMax;
if (ok.blinkDetected && blink.time === 0)
blink.time = Math.trunc(blink.end - blink.start);
ok.facingCenter = gestures.includes("facing center") && gestures.includes("looking center");
ok.faceConfidence = (human.result.face[0].boxScore || 0) > options.minConfidence && (human.result.face[0].faceScore || 0) > options.minConfidence && (human.result.face[0].genderScore || 0) > options.minConfidence;
ok.antispoofCheck = (human.result.face[0].real || 0) > options.minConfidence;
ok.livenessCheck = (human.result.face[0].live || 0) > options.minConfidence;
ok.faceSize = human.result.face[0].box[2] >= options.minSize && human.result.face[0].box[3] >= options.minSize;
}
printStatus(ok);
if (allOk()) {
dom.video.pause();
@ -135,10 +148,19 @@ async function detectFace(face) {
dom.canvas.style.width = "";
human.tf.browser.toPixels(face.tensor, dom.canvas);
human.tf.dispose(face.tensor);
const arr = db.map((rec) => rec.embedding);
const res = await human.match(face.embedding, arr);
log(`found best match: ${db[res.index].name} similarity: ${Math.round(1e3 * res.similarity) / 10}% source: ${db[res.index].source}`);
}
async function loadFaceDB() {
const res = await fetch(options.faceDB);
db = res && res.ok ? await res.json() : [];
log("loaded face db:", options.faceDB, "records:", db.length);
}
async function main() {
log("human version:", human.version, "| tfjs version:", human.tf.version_core);
printFPS("loading...");
await loadFaceDB();
await human.load();
printFPS("initializing...");
await human.warmup();

View File

@ -18,7 +18,8 @@ const humanConfig = { // user configuration for human, used to fine-tune behavio
description: { enabled: true },
iris: { enabled: true }, // needed to determine gaze direction
emotion: { enabled: false }, // not needed
antispoof: { enabled: true }, // enable optional antispoof as well
antispoof: { enabled: true }, // enable optional antispoof module
liveness: { enabled: true }, // enable optional liveness module
},
body: { enabled: false },
hand: { enabled: false },
@ -27,11 +28,33 @@ const humanConfig = { // user configuration for human, used to fine-tune behavio
};
const options = {
faceDB: '../facematch/faces.json',
minConfidence: 0.6, // overal face confidence for box, face, gender, real
minSize: 224, // min input to face descriptor model before degradation
maxTime: 10000, // max time before giving up
blinkMin: 10, // minimum duration of a valid blink
blinkMax: 800, // maximum duration of a valid blink
};
const ok = { // must meet all rules
faceCount: false,
faceConfidence: false,
facingCenter: false,
blinkDetected: false,
faceSize: false,
antispoofCheck: false,
livenessCheck: false,
elapsedMs: 0, // total time while waiting for valid face
};
const allOk = () => ok.faceCount && ok.faceSize && ok.blinkDetected && ok.facingCenter && ok.faceConfidence && ok.antispoofCheck && ok.livenessCheck;
const blink = { // internal timers for blink start/end/duration
start: 0,
end: 0,
time: 0,
};
let db: Array<{ name: string, source: string, embedding: number[] }> = []; // holds loaded face descriptor database
const human = new Human(humanConfig); // create instance of human with overrides from user configuration
human.env['perfadd'] = false; // is performance data showing instant or total values
@ -68,11 +91,7 @@ async function webCam() { // initialize webcam
await ready;
dom.canvas.width = dom.video.videoWidth;
dom.canvas.height = dom.video.videoHeight;
const track: MediaStreamTrack = stream.getVideoTracks()[0];
const capabilities: MediaTrackCapabilities | string = track.getCapabilities ? track.getCapabilities() : '';
const settings: MediaTrackSettings | string = track.getSettings ? track.getSettings() : '';
const constraints: MediaTrackConstraints | string = track.getConstraints ? track.getConstraints() : '';
log('video:', dom.video.videoWidth, dom.video.videoHeight, track.label, { stream, track, settings, constraints, capabilities });
log('video:', dom.video.videoWidth, dom.video.videoHeight, stream.getVideoTracks()[0].label);
dom.canvas.onclick = () => { // pause when clicked on screen and resume on next click
if (dom.video.paused) dom.video.play();
else dom.video.pause();
@ -89,19 +108,6 @@ async function detectionLoop() { // main detection loop
}
}
const ok = { // must meet all rules
faceCount: false,
faceConfidence: false,
facingCenter: false,
eyesOpen: false,
blinkDetected: false,
faceSize: false,
antispoofCheck: false,
livenessCheck: false,
elapsedMs: 0,
};
const allOk = () => ok.faceCount && ok.faceSize && ok.blinkDetected && ok.facingCenter && ok.faceConfidence && ok.antispoofCheck;
async function validationLoop(): Promise<typeof human.result.face> { // main screen refresh loop
const interpolated = await human.next(human.result); // smoothen result using last-known results
await human.draw.canvas(dom.video, dom.canvas); // draw canvas to screen
@ -111,14 +117,19 @@ async function validationLoop(): Promise<typeof human.result.face> { // main scr
timestamp.draw = now;
printFPS(`fps: ${fps.detect.toFixed(1).padStart(5, ' ')} detect | ${fps.draw.toFixed(1).padStart(5, ' ')} draw`); // write status
const gestures: string[] = Object.values(human.result.gesture).map((gesture) => gesture.gesture); // flatten all gestures
ok.faceCount = human.result.face.length === 1; // must be exactly detected face
ok.eyesOpen = ok.eyesOpen || !(gestures.includes('blink left eye') || gestures.includes('blink right eye')); // blink validation is only ok once both eyes are open
ok.blinkDetected = ok.eyesOpen && ok.blinkDetected || gestures.includes('blink left eye') || gestures.includes('blink right eye'); // need to detect blink only once
ok.facingCenter = gestures.includes('facing center') && gestures.includes('looking center'); // must face camera and look at camera
ok.faceConfidence = (human.result.face[0].boxScore || 0) > options.minConfidence && (human.result.face[0].faceScore || 0) > options.minConfidence && (human.result.face[0].genderScore || 0) > options.minConfidence;
ok.antispoofCheck = (human.result.face[0].real || 0) > options.minConfidence;
ok.faceSize = human.result.face[0].box[2] >= options.minSize && human.result.face[0].box[3] >= options.minSize;
if (ok.faceCount) { // skip the rest if no face
const gestures: string[] = Object.values(human.result.gesture).map((gesture) => gesture.gesture); // flatten all gestures
if (gestures.includes('blink left eye') || gestures.includes('blink right eye')) blink.start = human.now(); // blink starts when eyes get closed
if (blink.start > 0 && !gestures.includes('blink left eye') && !gestures.includes('blink right eye')) blink.end = human.now(); // if blink started how long until eyes are back open
ok.blinkDetected = ok.blinkDetected || (blink.end - blink.start > options.blinkMin && blink.end - blink.start < options.blinkMax);
if (ok.blinkDetected && blink.time === 0) blink.time = Math.trunc(blink.end - blink.start);
ok.facingCenter = gestures.includes('facing center') && gestures.includes('looking center'); // must face camera and look at camera
ok.faceConfidence = (human.result.face[0].boxScore || 0) > options.minConfidence && (human.result.face[0].faceScore || 0) > options.minConfidence && (human.result.face[0].genderScore || 0) > options.minConfidence;
ok.antispoofCheck = (human.result.face[0].real || 0) > options.minConfidence;
ok.livenessCheck = (human.result.face[0].live || 0) > options.minConfidence;
ok.faceSize = human.result.face[0].box[2] >= options.minSize && human.result.face[0].box[3] >= options.minSize;
}
printStatus(ok);
@ -150,13 +161,21 @@ async function detectFace(face) {
human.tf.browser.toPixels(face.tensor, dom.canvas);
human.tf.dispose(face.tensor);
// run detection using human.match and use face.embedding as input descriptor
// tbd
const arr = db.map((rec) => rec.embedding);
const res = await human.match(face.embedding, arr);
log(`found best match: ${db[res.index].name} similarity: ${Math.round(1000 * res.similarity) / 10}% source: ${db[res.index].source}`);
}
async function loadFaceDB() {
const res = await fetch(options.faceDB);
db = (res && res.ok) ? await res.json() : [];
log('loaded face db:', options.faceDB, 'records:', db.length);
}
async function main() { // main entry point
log('human version:', human.version, '| tfjs version:', human.tf.version_core);
printFPS('loading...');
await loadFaceDB();
await human.load(); // preload all models
printFPS('initializing...');
await human.warmup(); // warmup function to initialize backend for future faster detection

View File

@ -50,6 +50,9 @@ export interface FaceEmotionConfig extends GenericConfig {
/** Anti-spoofing part of face configuration */
export interface FaceAntiSpoofConfig extends GenericConfig {}
/** Liveness part of face configuration */
export interface FaceLivenessConfig extends GenericConfig {}
/** Configures all face-specific options: face detection, mesh analysis, age, gender, emotion detection and face description */
export interface FaceConfig extends GenericConfig {
detector: Partial<FaceDetectorConfig>,
@ -58,6 +61,7 @@ export interface FaceConfig extends GenericConfig {
description: Partial<FaceDescriptionConfig>,
emotion: Partial<FaceEmotionConfig>,
antispoof: Partial<FaceAntiSpoofConfig>,
liveness: Partial<FaceLivenessConfig>,
}
/** Configures all body detection specific options */
@ -340,6 +344,12 @@ const config: Config = {
skipTime: 4000,
modelPath: 'antispoof.json',
},
liveness: {
enabled: false,
skipFrames: 99,
skipTime: 4000,
modelPath: 'liveness.json',
},
},
body: {
enabled: true,

View File

@ -10,6 +10,7 @@ import * as facemesh from './facemesh';
import * as emotion from '../gear/emotion';
import * as faceres from './faceres';
import * as antispoof from './antispoof';
import * as liveness from './liveness';
import type { FaceResult } from '../result';
import type { Tensor } from '../tfjs/types';
import { calculateFaceAngle } from './angles';
@ -24,6 +25,7 @@ export const detectFace = async (parent /* instance of human */, input: Tensor):
let emotionRes;
let embeddingRes;
let antispoofRes;
let livenessRes;
let descRes;
const faceRes: Array<FaceResult> = [];
parent.state = 'run:face';
@ -70,6 +72,18 @@ export const detectFace = async (parent /* instance of human */, input: Tensor):
}
parent.analyze('End AntiSpoof:');
// run liveness, inherits face from blazeface
parent.analyze('Start Liveness:');
if (parent.config.async) {
livenessRes = parent.config.face.liveness.enabled ? liveness.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
} else {
parent.state = 'run:liveness';
timeStamp = now();
livenessRes = parent.config.face.liveness.enabled ? await liveness.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
parent.performance.antispoof = env.perfadd ? (parent.performance.antispoof || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
}
parent.analyze('End Liveness:');
// run gear, inherits face from blazeface
/*
parent.analyze('Start GEAR:');
@ -98,7 +112,7 @@ export const detectFace = async (parent /* instance of human */, input: Tensor):
// if async wait for results
if (parent.config.async) {
[ageRes, genderRes, emotionRes, embeddingRes, descRes, gearRes, antispoofRes] = await Promise.all([ageRes, genderRes, emotionRes, embeddingRes, descRes, gearRes, antispoofRes]);
[ageRes, genderRes, emotionRes, embeddingRes, descRes, gearRes, antispoofRes, livenessRes] = await Promise.all([ageRes, genderRes, emotionRes, embeddingRes, descRes, gearRes, antispoofRes, livenessRes]);
}
parent.analyze('Finish Face:');
@ -131,6 +145,7 @@ export const detectFace = async (parent /* instance of human */, input: Tensor):
embedding: descRes?.descriptor,
emotion: emotionRes,
real: antispoofRes,
live: livenessRes,
iris: irisSize !== 0 ? Math.trunc(500 / irisSize / 11.7) / 100 : 0,
rotation,
tensor,

46
src/face/liveness.ts Normal file
View File

@ -0,0 +1,46 @@
/**
* Anti-spoofing model implementation
*/
import { log, join, now } from '../util/util';
import type { Config } from '../config';
import type { GraphModel, Tensor } from '../tfjs/types';
import * as tf from '../../dist/tfjs.esm.js';
import { env } from '../util/env';
let model: GraphModel | null;
const cached: Array<number> = [];
let skipped = Number.MAX_SAFE_INTEGER;
let lastCount = 0;
let lastTime = 0;
export async function load(config: Config): Promise<GraphModel> {
if (env.initial) model = null;
if (!model) {
model = await tf.loadGraphModel(join(config.modelBasePath, config.face.liveness?.modelPath || '')) as unknown as GraphModel;
if (!model || !model['modelUrl']) log('load model failed:', config.face.liveness?.modelPath);
else if (config.debug) log('load model:', model['modelUrl']);
} else if (config.debug) log('cached model:', model['modelUrl']);
return model;
}
export async function predict(image: Tensor, config: Config, idx, count) {
if (!model) return null;
const skipTime = (config.face.liveness?.skipTime || 0) > (now() - lastTime);
const skipFrame = skipped < (config.face.liveness?.skipFrames || 0);
if (config.skipAllowed && skipTime && skipFrame && (lastCount === count) && cached[idx]) {
skipped++;
return cached[idx];
}
skipped = 0;
return new Promise(async (resolve) => {
const resize = tf.image.resizeBilinear(image, [model?.inputs[0].shape ? model.inputs[0].shape[2] : 0, model?.inputs[0].shape ? model.inputs[0].shape[1] : 0], false);
const res = model?.execute(resize) as Tensor;
const num = (await res.data())[0];
cached[idx] = Math.round(100 * num) / 100;
lastCount = count;
lastTime = now();
tf.dispose([resize, res]);
resolve(cached[idx]);
});
}

View File

@ -16,6 +16,7 @@ import * as faceres from './face/faceres';
import * as handpose from './hand/handpose';
import * as handtrack from './hand/handtrack';
import * as iris from './face/iris';
import * as liveness from './face/liveness';
import * as movenet from './body/movenet';
import * as nanodet from './object/nanodet';
import * as posenet from './body/posenet';
@ -46,6 +47,7 @@ export class Models {
handpose: null | GraphModel | Promise<GraphModel> = null;
handskeleton: null | GraphModel | Promise<GraphModel> = null;
handtrack: null | GraphModel | Promise<GraphModel> = null;
liveness: null | GraphModel | Promise<GraphModel> = null;
movenet: null | GraphModel | Promise<GraphModel> = null;
nanodet: null | GraphModel | Promise<GraphModel> = null;
posenet: null | GraphModel | Promise<GraphModel> = null;
@ -65,24 +67,25 @@ export async function load(instance: Human): Promise<void> {
if (!instance.models.handpose && instance.config.hand.detector?.modelPath?.includes('handdetect')) [instance.models.handpose, instance.models.handskeleton] = await handpose.load(instance.config);
if (!instance.models.handskeleton && instance.config.hand.landmarks && instance.config.hand.detector?.modelPath?.includes('handdetect')) [instance.models.handpose, instance.models.handskeleton] = await handpose.load(instance.config);
}
if (instance.config.face.enabled && !instance.models.facedetect) instance.models.facedetect = blazeface.load(instance.config);
if (instance.config.face.enabled && instance.config.face.mesh?.enabled && !instance.models.facemesh) instance.models.facemesh = facemesh.load(instance.config);
if (instance.config.face.enabled && instance.config.face.iris?.enabled && !instance.models.faceiris) instance.models.faceiris = iris.load(instance.config);
if (instance.config.face.enabled && instance.config.face.antispoof?.enabled && !instance.models.antispoof) instance.models.antispoof = antispoof.load(instance.config);
if (instance.config.hand.enabled && !instance.models.handtrack && instance.config.hand.detector?.modelPath?.includes('handtrack')) instance.models.handtrack = handtrack.loadDetect(instance.config);
if (instance.config.hand.enabled && instance.config.hand.landmarks && !instance.models.handskeleton && instance.config.hand.detector?.modelPath?.includes('handtrack')) instance.models.handskeleton = handtrack.loadSkeleton(instance.config);
if (instance.config.body.enabled && !instance.models.posenet && instance.config.body?.modelPath?.includes('posenet')) instance.models.posenet = posenet.load(instance.config);
if (instance.config.body.enabled && !instance.models.efficientpose && instance.config.body?.modelPath?.includes('efficientpose')) instance.models.efficientpose = efficientpose.load(instance.config);
if (instance.config.body.enabled && !instance.models.blazepose && instance.config.body?.modelPath?.includes('blazepose')) instance.models.blazepose = blazepose.loadPose(instance.config);
if (instance.config.body.enabled && !instance.models.blazeposedetect && instance.config.body.detector?.modelPath && instance.config.body?.modelPath?.includes('blazepose')) instance.models.blazeposedetect = blazepose.loadDetect(instance.config);
if (instance.config.body.enabled && !instance.models.efficientpose && instance.config.body?.modelPath?.includes('efficientpose')) instance.models.efficientpose = efficientpose.load(instance.config);
if (instance.config.body.enabled && !instance.models.efficientpose && instance.config.body?.modelPath?.includes('efficientpose')) instance.models.efficientpose = efficientpose.load(instance.config);
if (instance.config.body.enabled && !instance.models.movenet && instance.config.body?.modelPath?.includes('movenet')) instance.models.movenet = movenet.load(instance.config);
if (instance.config.object.enabled && !instance.models.nanodet && instance.config.object?.modelPath?.includes('nanodet')) instance.models.nanodet = nanodet.load(instance.config);
if (instance.config.object.enabled && !instance.models.centernet && instance.config.object?.modelPath?.includes('centernet')) instance.models.centernet = centernet.load(instance.config);
if (instance.config.face.enabled && instance.config.face.emotion?.enabled && !instance.models.emotion) instance.models.emotion = emotion.load(instance.config);
if (instance.config.body.enabled && !instance.models.posenet && instance.config.body?.modelPath?.includes('posenet')) instance.models.posenet = posenet.load(instance.config);
if (instance.config.face.enabled && !instance.models.facedetect) instance.models.facedetect = blazeface.load(instance.config);
if (instance.config.face.enabled && instance.config.face.antispoof?.enabled && !instance.models.antispoof) instance.models.antispoof = antispoof.load(instance.config);
if (instance.config.face.enabled && instance.config.face.liveness?.enabled && !instance.models.liveness) instance.models.liveness = liveness.load(instance.config);
if (instance.config.face.enabled && instance.config.face.description?.enabled && !instance.models.faceres) instance.models.faceres = faceres.load(instance.config);
if (instance.config.segmentation.enabled && !instance.models.segmentation) instance.models.segmentation = segmentation.load(instance.config);
if (instance.config.face.enabled && instance.config.face.emotion?.enabled && !instance.models.emotion) instance.models.emotion = emotion.load(instance.config);
if (instance.config.face.enabled && instance.config.face.iris?.enabled && !instance.models.faceiris) instance.models.faceiris = iris.load(instance.config);
if (instance.config.face.enabled && instance.config.face.mesh?.enabled && !instance.models.facemesh) instance.models.facemesh = facemesh.load(instance.config);
if (instance.config.face.enabled && instance.config.face['agegenderrace']?.enabled && !instance.models.agegenderrace) instance.models.agegenderrace = agegenderrace.load(instance.config);
if (instance.config.hand.enabled && !instance.models.handtrack && instance.config.hand.detector?.modelPath?.includes('handtrack')) instance.models.handtrack = handtrack.loadDetect(instance.config);
if (instance.config.hand.enabled && instance.config.hand.landmarks && !instance.models.handskeleton && instance.config.hand.detector?.modelPath?.includes('handtrack')) instance.models.handskeleton = handtrack.loadSkeleton(instance.config);
if (instance.config.object.enabled && !instance.models.centernet && instance.config.object?.modelPath?.includes('centernet')) instance.models.centernet = centernet.load(instance.config);
if (instance.config.object.enabled && !instance.models.nanodet && instance.config.object?.modelPath?.includes('nanodet')) instance.models.nanodet = nanodet.load(instance.config);
if (instance.config.segmentation.enabled && !instance.models.segmentation) instance.models.segmentation = segmentation.load(instance.config);
// models are loaded in parallel asynchronously so lets wait until they are actually loaded
for await (const model of Object.keys(instance.models)) {

View File

@ -47,6 +47,8 @@ export interface FaceResult {
iris?: number,
/** face anti-spoofing result confidence */
real?: number,
/** face liveness result confidence */
live?: number,
/** face rotation details */
rotation?: {
angle: { roll: number, yaw: number, pitch: number },

View File

@ -213,6 +213,7 @@ export async function face(inCanvas: AnyCanvas, result: Array<FaceResult>, drawO
if (f.age) labels.push(`age: ${f.age || ''}`);
if (f.iris) labels.push(`distance: ${f.iris}`);
if (f.real) labels.push(`real: ${Math.trunc(100 * f.real)}%`);
if (f.live) labels.push(`live: ${Math.trunc(100 * f.live)}%`);
if (f.emotion && f.emotion.length > 0) {
const emotion = f.emotion.map((a) => `${Math.trunc(100 * a.score)}% ${a.emotion}`);
if (emotion.length > 3) emotion.length = 3;

View File

@ -156,6 +156,8 @@ async function verifyDetails(human) {
verify(face.age > 23 && face.age < 24 && face.gender === 'female' && face.genderScore > 0.9 && face.iris > 70 && face.iris < 80, 'details face age/gender', face.age, face.gender, face.genderScore, face.iris);
verify(face.box.length === 4 && face.boxRaw.length === 4 && face.mesh.length === 478 && face.meshRaw.length === 478 && face.embedding.length === 1024, 'details face arrays', face.box.length, face.mesh.length, face.embedding.length);
verify(face.emotion.length === 3 && face.emotion[0].score > 0.45 && face.emotion[0].emotion === 'neutral', 'details face emotion', face.emotion.length, face.emotion[0]);
verify(face.real > 0.8, 'details face anti-spoofing', face.real);
verify(face.live > 0.8, 'details face liveness', face.live);
}
verify(res.body.length === 1, 'details body length', res.body.length);
for (const body of res.body) {
@ -220,7 +222,7 @@ async function test(Human, inputConfig) {
await human.load();
const models = Object.keys(human.models).map((model) => ({ name: model, loaded: (human.models[model] !== null) }));
const loaded = models.filter((model) => model.loaded);
if (models.length === 21 && loaded.length === 10) log('state', 'passed: models loaded', models.length, loaded.length, models);
if (models.length === 22 && loaded.length === 12) log('state', 'passed: models loaded', models.length, loaded.length, models);
else log('error', 'failed: models loaded', models.length, loaded.length, models);
// increase defaults

View File

@ -15,6 +15,8 @@ const config = {
iris: { enabled: true },
description: { enabled: true },
emotion: { enabled: true },
antispoof: { enabled: true },
liveness: { enabled: true },
},
hand: { enabled: true },
body: { enabled: true },

View File

@ -25,6 +25,8 @@ const config = {
iris: { enabled: true },
description: { enabled: true },
emotion: { enabled: true },
antispoof: { enabled: true },
liveness: { enabled: true },
},
hand: { enabled: true, rotation: false },
body: { enabled: true },

View File

@ -15,6 +15,8 @@ const config = {
iris: { enabled: true },
description: { enabled: true },
emotion: { enabled: true },
antispoof: { enabled: true },
liveness: { enabled: true },
},
hand: { enabled: true },
body: { enabled: true },