implement optional face masking

pull/356/head
Vladimir Mandic 2021-11-12 15:07:23 -05:00
parent 8b56de5140
commit 4f2993a2f5
20 changed files with 271 additions and 180 deletions

View File

@ -40,6 +40,7 @@
"@typescript-eslint/explicit-module-boundary-types": "off",
"@typescript-eslint/no-shadow": "error",
"@typescript-eslint/no-var-requires": "off",
"@typescript-eslint/prefer-as-const": "off",
"@typescript-eslint/triple-slash-reference": "off",
"@typescript-eslint/no-inferrable-types": "off",
"@typescript-eslint/no-empty-interface": ["error", { "allowSingleExtends": true }],

View File

@ -11,6 +11,8 @@
### **HEAD -> main** 2021/11/11 mandic00@live.com
- add similarity score range normalization
- add faceid demo
- documentation overhaul
- auto tensor shape and channels handling
- disable use of path2d in node

View File

@ -18,14 +18,15 @@
html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
body { margin: 0; padding: 16px; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
body::-webkit-scrollbar { display: none; }
.button { padding: 2px; cursor: pointer; box-shadow: 2px 2px black; width: 64px; text-align: center; margin-left: 16px; height: 16px; display: none }
.button { padding: 2px; cursor: pointer; box-shadow: 2px 2px black; width: 64px; text-align: center; place-content: center; margin-left: 16px; height: 16px; display: none }
.ok { position: absolute; top: 64px; right: 20px; width: 100px; background-color: grey; padding: 4px; color: black; font-size: 14px }
</style>
</head>
<body>
<canvas id="canvas" style="padding: 8px"></canvas>
<canvas id="source" style="padding: 8px"></canvas>
<video id="video" playsinline style="display: none"></video>
<pre id="fps" style="position: absolute; top: 12px; right: 20px; background-color: grey; padding: 8px; box-shadow: 2px 2px black"></pre>
<pre id="fps" style="position: absolute; bottom: 16px; right: 20px; background-color: grey; padding: 8px; box-shadow: 2px 2px black"></pre>
<pre id="log" style="padding: 8px"></pre>
<div id="match" style="display: none; padding: 8px">
<label for="name">name:</label>
@ -33,7 +34,7 @@
<span id="save" class="button" style="background-color: royalblue">save</span>
<span id="delete" class="button" style="background-color: lightcoral">delete</span>
</div>
<div id="retry" class="button" style="background-color: darkslategray; width: 350px">retry</div>
<div id="status" style="position: absolute; bottom: 0; width: 100%; padding: 8px; font-size: 0.8rem;"></div>
<div id="retry" class="button" style="background-color: darkslategray; width: 350px; margin-top: 32px; padding: 4px">retry</div>
<div id="ok"></div>
</body>
</html>

View File

@ -47,6 +47,15 @@ async function load() {
};
});
}
async function count() {
if (!db)
await open();
return new Promise((resolve) => {
const store = db.transaction([table], "readwrite").objectStore(table).count();
store.onerror = (evt) => log("count error:", evt);
store.onsuccess = () => resolve(store.result);
});
}
async function save(faceRecord) {
if (!db)
await open();
@ -62,15 +71,12 @@ async function remove(faceRecord) {
}
// demo/faceid/index.ts
var db2 = [];
var face;
var current;
var humanConfig = {
modelBasePath: "../../models",
filter: { equalization: true },
face: {
enabled: true,
detector: { rotation: true, return: true },
detector: { rotation: true, return: true, cropFactor: 1.6, mask: false },
description: { enabled: true },
iris: { enabled: true },
emotion: { enabled: false },
@ -88,19 +94,24 @@ var options = {
maxTime: 1e4,
blinkMin: 10,
blinkMax: 800,
threshold: 0.5
threshold: 0.5,
mask: humanConfig.face.detector.mask,
rotation: humanConfig.face.detector.rotation,
cropFactor: humanConfig.face.detector.cropFactor
};
var ok = {
faceCount: false,
faceConfidence: false,
facingCenter: false,
lookingCenter: false,
blinkDetected: false,
faceSize: false,
antispoofCheck: false,
livenessCheck: false,
elapsedMs: 0
};
var allOk = () => ok.faceCount && ok.faceSize && ok.blinkDetected && ok.facingCenter && ok.faceConfidence && ok.antispoofCheck && ok.livenessCheck;
var allOk = () => ok.faceCount && ok.faceSize && ok.blinkDetected && ok.facingCenter && ok.lookingCenter && ok.faceConfidence && ok.antispoofCheck && ok.livenessCheck;
var current = { face: null, record: null };
var blink = {
start: 0,
end: 0,
@ -115,13 +126,13 @@ var dom = {
canvas: document.getElementById("canvas"),
log: document.getElementById("log"),
fps: document.getElementById("fps"),
status: document.getElementById("status"),
match: document.getElementById("match"),
name: document.getElementById("name"),
save: document.getElementById("save"),
delete: document.getElementById("delete"),
retry: document.getElementById("retry"),
source: document.getElementById("source")
source: document.getElementById("source"),
ok: document.getElementById("ok")
};
var timestamp = { detect: 0, draw: 0 };
var fps = { detect: 0, draw: 0 };
@ -131,7 +142,6 @@ var log2 = (...msg) => {
console.log(...msg);
};
var printFPS = (msg) => dom.fps.innerText = msg;
var printStatus = (msg) => dom.status.innerText = "status: " + JSON.stringify(msg).replace(/"|{|}/g, "").replace(/,/g, " | ");
async function webCam() {
printFPS("starting webcam...");
const cameraOptions = { audio: false, video: { facingMode: "user", resizeMode: "none", width: { ideal: document.body.clientWidth } } };
@ -155,8 +165,8 @@ async function webCam() {
}
async function detectionLoop() {
if (!dom.video.paused) {
if (face && face.tensor)
human.tf.dispose(face.tensor);
if (current.face && current.face.tensor)
human.tf.dispose(current.face.tensor);
await human.detect(dom.video);
const now = human.now();
fps.detect = 1e3 / (now - timestamp.detect);
@ -179,16 +189,32 @@ async function validationLoop() {
blink.start = human.now();
if (blink.start > 0 && !gestures.includes("blink left eye") && !gestures.includes("blink right eye"))
blink.end = human.now();
ok.blinkDetected = ok.blinkDetected || blink.end - blink.start > options.blinkMin && blink.end - blink.start < options.blinkMax;
ok.blinkDetected = ok.blinkDetected || Math.abs(blink.end - blink.start) > options.blinkMin && Math.abs(blink.end - blink.start) < options.blinkMax;
if (ok.blinkDetected && blink.time === 0)
blink.time = Math.trunc(blink.end - blink.start);
ok.facingCenter = gestures.includes("facing center") && gestures.includes("looking center");
ok.facingCenter = gestures.includes("facing center");
ok.lookingCenter = gestures.includes("looking center");
ok.faceConfidence = (human.result.face[0].boxScore || 0) > options.minConfidence && (human.result.face[0].faceScore || 0) > options.minConfidence && (human.result.face[0].genderScore || 0) > options.minConfidence;
ok.antispoofCheck = (human.result.face[0].real || 0) > options.minConfidence;
ok.livenessCheck = (human.result.face[0].live || 0) > options.minConfidence;
ok.faceSize = human.result.face[0].box[2] >= options.minSize && human.result.face[0].box[3] >= options.minSize;
}
printStatus(ok);
let y = 32;
for (const [key, val] of Object.entries(ok)) {
let el = document.getElementById(`ok-${key}`);
if (!el) {
el = document.createElement("div");
el.innerText = key;
el.className = "ok";
el.style.top = `${y}px`;
dom.ok.appendChild(el);
}
if (typeof val === "boolean")
el.style.backgroundColor = val ? "lightgreen" : "lightcoral";
else
el.innerText = `${key}:${val}`;
y += 28;
}
if (allOk()) {
dom.video.pause();
return human.result.face[0];
@ -208,46 +234,48 @@ async function validationLoop() {
}
}
async function saveRecords() {
var _a;
var _a, _b;
if (dom.name.value.length > 0) {
const image = (_a = dom.canvas.getContext("2d")) == null ? void 0 : _a.getImageData(0, 0, dom.canvas.width, dom.canvas.height);
const rec = { id: 0, name: dom.name.value, descriptor: face.embedding, image };
const rec = { id: 0, name: dom.name.value, descriptor: (_b = current.face) == null ? void 0 : _b.embedding, image };
await save(rec);
log2("saved face record:", rec.name);
db2.push(rec);
} else {
log2("invalid name");
}
}
async function deleteRecord() {
if (current.id > 0) {
await remove(current);
if (current.record && current.record.id > 0) {
await remove(current.record);
}
}
async function detectFace() {
var _a, _b;
(_a = dom.canvas.getContext("2d")) == null ? void 0 : _a.clearRect(0, 0, options.minSize, options.minSize);
if (!face || !face.tensor || !face.embedding)
return 0;
human.tf.browser.toPixels(face.tensor, dom.canvas);
const descriptors = db2.map((rec) => rec.descriptor);
const res = await human.match(face.embedding, descriptors);
if (res.index === -1) {
log2("no matches");
if (!current.face || !current.face.tensor || !current.face.embedding)
return false;
human.tf.browser.toPixels(current.face.tensor, dom.canvas);
if (await count() === 0) {
log2("face database is empty");
document.body.style.background = "black";
dom.delete.style.display = "none";
dom.source.style.display = "none";
} else {
current = db2[res.index];
log2(`best match: ${current.name} | id: ${current.id} | similarity: ${Math.round(1e3 * res.similarity) / 10}%`);
dom.delete.style.display = "";
dom.name.value = current.name;
dom.source.style.display = "";
(_b = dom.source.getContext("2d")) == null ? void 0 : _b.putImageData(current.image, 0, 0);
return false;
}
const db2 = await load();
const descriptors = db2.map((rec) => rec.descriptor);
const res = await human.match(current.face.embedding, descriptors);
current.record = db2[res.index] || null;
if (current.record) {
log2(`best match: ${current.record.name} | id: ${current.record.id} | similarity: ${Math.round(1e3 * res.similarity) / 10}%`);
dom.name.value = current.record.name;
dom.source.style.display = "";
(_b = dom.source.getContext("2d")) == null ? void 0 : _b.putImageData(current.record.image, 0, 0);
}
document.body.style.background = res.similarity > options.threshold ? "darkgreen" : "maroon";
return res.similarity > options.threshold;
}
async function main() {
var _a, _b;
var _a, _b, _c, _d;
ok.faceCount = false;
ok.faceConfidence = false;
ok.facingCenter = false;
@ -258,34 +286,33 @@ async function main() {
ok.elapsedMs = 0;
dom.match.style.display = "none";
dom.retry.style.display = "none";
dom.source.style.display = "none";
document.body.style.background = "black";
await webCam();
await detectionLoop();
startTime = human.now();
face = await validationLoop();
dom.fps.style.display = "none";
dom.canvas.width = ((_a = face == null ? void 0 : face.tensor) == null ? void 0 : _a.shape[1]) || options.minSize;
dom.canvas.height = ((_b = face == null ? void 0 : face.tensor) == null ? void 0 : _b.shape[0]) || options.minSize;
current.face = await validationLoop();
dom.canvas.width = ((_b = (_a = current.face) == null ? void 0 : _a.tensor) == null ? void 0 : _b.shape[1]) || options.minSize;
dom.canvas.height = ((_d = (_c = current.face) == null ? void 0 : _c.tensor) == null ? void 0 : _d.shape[0]) || options.minSize;
dom.source.width = dom.canvas.width;
dom.source.height = dom.canvas.height;
dom.canvas.style.width = "";
dom.match.style.display = "flex";
dom.save.style.display = "flex";
dom.delete.style.display = "flex";
dom.retry.style.display = "block";
if (!allOk()) {
log2("did not find valid face");
return false;
} else {
const res = await detectFace();
document.body.style.background = res ? "darkgreen" : "maroon";
return res;
return detectFace();
}
}
async function init() {
log2("human version:", human.version, "| tfjs version:", human.tf.version_core);
log2("options:", JSON.stringify(options).replace(/{|}|"|\[|\]/g, "").replace(/,/g, " "));
printFPS("loading...");
db2 = await load();
log2("loaded face records:", db2.length);
log2("known face records:", await count());
await webCam();
await human.load();
printFPS("initializing...");

View File

@ -10,16 +10,12 @@
import { Human, TensorLike, FaceResult } from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
import * as indexDb from './indexdb'; // methods to deal with indexdb
let db: Array<indexDb.FaceRecord> = []; // face descriptor database stored in indexdb
let face: FaceResult; // face result from human.detect
let current: indexDb.FaceRecord; // currently matched db record
const humanConfig = { // user configuration for human, used to fine-tune behavior
modelBasePath: '../../models',
filter: { equalization: true }, // lets run with histogram equilizer
face: {
enabled: true,
detector: { rotation: true, return: true }, // return tensor is used to get detected face image
detector: { rotation: true, return: true, cropFactor: 1.6, mask: false }, // return tensor is used to get detected face image
description: { enabled: true },
iris: { enabled: true }, // needed to determine gaze direction
emotion: { enabled: false }, // not needed
@ -39,19 +35,24 @@ const options = {
blinkMin: 10, // minimum duration of a valid blink
blinkMax: 800, // maximum duration of a valid blink
threshold: 0.5, // minimum similarity
mask: humanConfig.face.detector.mask,
rotation: humanConfig.face.detector.rotation,
cropFactor: humanConfig.face.detector.cropFactor,
};
const ok = { // must meet all rules
faceCount: false,
faceConfidence: false,
facingCenter: false,
lookingCenter: false,
blinkDetected: false,
faceSize: false,
antispoofCheck: false,
livenessCheck: false,
elapsedMs: 0, // total time while waiting for valid face
};
const allOk = () => ok.faceCount && ok.faceSize && ok.blinkDetected && ok.facingCenter && ok.faceConfidence && ok.antispoofCheck && ok.livenessCheck;
const allOk = () => ok.faceCount && ok.faceSize && ok.blinkDetected && ok.facingCenter && ok.lookingCenter && ok.faceConfidence && ok.antispoofCheck && ok.livenessCheck;
const current: { face: FaceResult | null, record: indexDb.FaceRecord | null } = { face: null, record: null }; // current face record and matched database record
const blink = { // internal timers for blink start/end/duration
start: 0,
@ -71,13 +72,13 @@ const dom = { // grab instances of dom objects so we dont have to look them up l
canvas: document.getElementById('canvas') as HTMLCanvasElement,
log: document.getElementById('log') as HTMLPreElement,
fps: document.getElementById('fps') as HTMLPreElement,
status: document.getElementById('status') as HTMLPreElement,
match: document.getElementById('match') as HTMLDivElement,
name: document.getElementById('name') as HTMLInputElement,
save: document.getElementById('save') as HTMLSpanElement,
delete: document.getElementById('delete') as HTMLSpanElement,
retry: document.getElementById('retry') as HTMLDivElement,
source: document.getElementById('source') as HTMLCanvasElement,
ok: document.getElementById('ok') as HTMLDivElement,
};
const timestamp = { detect: 0, draw: 0 }; // holds information used to calculate performance and possible memory leaks
const fps = { detect: 0, draw: 0 }; // holds calculated fps information for both detect and screen refresh
@ -89,7 +90,6 @@ const log = (...msg) => { // helper method to output messages
console.log(...msg);
};
const printFPS = (msg) => dom.fps.innerText = msg; // print status element
const printStatus = (msg) => dom.status.innerText = 'status: ' + JSON.stringify(msg).replace(/"|{|}/g, '').replace(/,/g, ' | '); // print status element
async function webCam() { // initialize webcam
printFPS('starting webcam...');
@ -111,7 +111,7 @@ async function webCam() { // initialize webcam
async function detectionLoop() { // main detection loop
if (!dom.video.paused) {
if (face && face.tensor) human.tf.dispose(face.tensor); // dispose previous tensor
if (current.face && current.face.tensor) human.tf.dispose(current.face.tensor); // dispose previous tensor
await human.detect(dom.video); // actual detection; were not capturing output in a local variable as it can also be reached via human.result
const now = human.now();
fps.detect = 1000 / (now - timestamp.detect);
@ -133,15 +133,29 @@ async function validationLoop(): Promise<FaceResult> { // main screen refresh lo
const gestures: string[] = Object.values(human.result.gesture).map((gesture) => gesture.gesture); // flatten all gestures
if (gestures.includes('blink left eye') || gestures.includes('blink right eye')) blink.start = human.now(); // blink starts when eyes get closed
if (blink.start > 0 && !gestures.includes('blink left eye') && !gestures.includes('blink right eye')) blink.end = human.now(); // if blink started how long until eyes are back open
ok.blinkDetected = ok.blinkDetected || (blink.end - blink.start > options.blinkMin && blink.end - blink.start < options.blinkMax);
ok.blinkDetected = ok.blinkDetected || (Math.abs(blink.end - blink.start) > options.blinkMin && Math.abs(blink.end - blink.start) < options.blinkMax);
if (ok.blinkDetected && blink.time === 0) blink.time = Math.trunc(blink.end - blink.start);
ok.facingCenter = gestures.includes('facing center') && gestures.includes('looking center'); // must face camera and look at camera
ok.facingCenter = gestures.includes('facing center');
ok.lookingCenter = gestures.includes('looking center'); // must face camera and look at camera
ok.faceConfidence = (human.result.face[0].boxScore || 0) > options.minConfidence && (human.result.face[0].faceScore || 0) > options.minConfidence && (human.result.face[0].genderScore || 0) > options.minConfidence;
ok.antispoofCheck = (human.result.face[0].real || 0) > options.minConfidence;
ok.livenessCheck = (human.result.face[0].live || 0) > options.minConfidence;
ok.faceSize = human.result.face[0].box[2] >= options.minSize && human.result.face[0].box[3] >= options.minSize;
}
printStatus(ok);
let y = 32;
for (const [key, val] of Object.entries(ok)) {
let el = document.getElementById(`ok-${key}`);
if (!el) {
el = document.createElement('div');
el.innerText = key;
el.className = 'ok';
el.style.top = `${y}px`;
dom.ok.appendChild(el);
}
if (typeof val === 'boolean') el.style.backgroundColor = val ? 'lightgreen' : 'lightcoral';
else el.innerText = `${key}:${val}`;
y += 28;
}
if (allOk()) { // all criteria met
dom.video.pause();
return human.result.face[0];
@ -163,39 +177,41 @@ async function validationLoop(): Promise<FaceResult> { // main screen refresh lo
async function saveRecords() {
if (dom.name.value.length > 0) {
const image = dom.canvas.getContext('2d')?.getImageData(0, 0, dom.canvas.width, dom.canvas.height) as ImageData;
const rec = { id: 0, name: dom.name.value, descriptor: face.embedding as number[], image };
const rec = { id: 0, name: dom.name.value, descriptor: current.face?.embedding as number[], image };
await indexDb.save(rec);
log('saved face record:', rec.name);
db.push(rec);
} else {
log('invalid name');
}
}
async function deleteRecord() {
if (current.id > 0) {
await indexDb.remove(current);
if (current.record && current.record.id > 0) {
await indexDb.remove(current.record);
}
}
async function detectFace() {
dom.canvas.getContext('2d')?.clearRect(0, 0, options.minSize, options.minSize);
if (!face || !face.tensor || !face.embedding) return 0;
human.tf.browser.toPixels(face.tensor as unknown as TensorLike, dom.canvas);
const descriptors = db.map((rec) => rec.descriptor);
const res = await human.match(face.embedding, descriptors);
if (res.index === -1) {
log('no matches');
if (!current.face || !current.face.tensor || !current.face.embedding) return false;
human.tf.browser.toPixels(current.face.tensor as unknown as TensorLike, dom.canvas);
if (await indexDb.count() === 0) {
log('face database is empty');
document.body.style.background = 'black';
dom.delete.style.display = 'none';
dom.source.style.display = 'none';
} else {
current = db[res.index];
log(`best match: ${current.name} | id: ${current.id} | similarity: ${Math.round(1000 * res.similarity) / 10}%`);
dom.delete.style.display = '';
dom.name.value = current.name;
dom.source.style.display = '';
dom.source.getContext('2d')?.putImageData(current.image, 0, 0);
return false;
}
const db = await indexDb.load();
const descriptors = db.map((rec) => rec.descriptor);
const res = await human.match(current.face.embedding, descriptors);
current.record = db[res.index] || null;
if (current.record) {
log(`best match: ${current.record.name} | id: ${current.record.id} | similarity: ${Math.round(1000 * res.similarity) / 10}%`);
dom.name.value = current.record.name;
dom.source.style.display = '';
dom.source.getContext('2d')?.putImageData(current.record.image, 0, 0);
}
document.body.style.background = res.similarity > options.threshold ? 'darkgreen' : 'maroon';
return res.similarity > options.threshold;
}
@ -210,27 +226,26 @@ async function main() { // main entry point
ok.elapsedMs = 0;
dom.match.style.display = 'none';
dom.retry.style.display = 'none';
dom.source.style.display = 'none';
document.body.style.background = 'black';
await webCam();
await detectionLoop(); // start detection loop
startTime = human.now();
face = await validationLoop(); // start validation loop
dom.fps.style.display = 'none';
dom.canvas.width = face?.tensor?.shape[1] || options.minSize;
dom.canvas.height = face?.tensor?.shape[0] || options.minSize;
current.face = await validationLoop(); // start validation loop
dom.canvas.width = current.face?.tensor?.shape[1] || options.minSize;
dom.canvas.height = current.face?.tensor?.shape[0] || options.minSize;
dom.source.width = dom.canvas.width;
dom.source.height = dom.canvas.height;
dom.canvas.style.width = '';
dom.match.style.display = 'flex';
dom.save.style.display = 'flex';
dom.delete.style.display = 'flex';
dom.retry.style.display = 'block';
if (!allOk()) {
if (!allOk()) { // is all criteria met?
log('did not find valid face');
return false;
} else {
// log('found valid face');
const res = await detectFace();
document.body.style.background = res ? 'darkgreen' : 'maroon';
return res;
return detectFace();
}
}
@ -238,8 +253,7 @@ async function init() {
log('human version:', human.version, '| tfjs version:', human.tf.version_core);
log('options:', JSON.stringify(options).replace(/{|}|"|\[|\]/g, '').replace(/,/g, ' '));
printFPS('loading...');
db = await indexDb.load(); // load face database from indexdb
log('loaded face records:', db.length);
log('known face records:', await indexDb.count());
await webCam(); // start webcam
await human.load(); // preload all models
printFPS('initializing...');

View File

@ -43,6 +43,15 @@ export async function load(): Promise<FaceRecord[]> {
});
}
export async function count(): Promise<number> {
if (!db) await open(); // open or create if not already done
return new Promise((resolve) => {
const store: IDBRequest = db.transaction([table], 'readwrite').objectStore(table).count();
store.onerror = (evt) => log('count error:', evt);
store.onsuccess = () => resolve(store.result);
});
}
export async function save(faceRecord: FaceRecord) {
if (!db) await open(); // open or create if not already done
const newRecord = { name: faceRecord.name, descriptor: faceRecord.descriptor, image: faceRecord.image }; // omit id as its autoincrement

View File

@ -135,12 +135,10 @@ async function SelectFaceCanvas(face) {
title('Selected Face');
}
function AddFaceCanvas(index, res, fileName) {
async function AddFaceCanvas(index, res, fileName) {
all[index] = res.face;
let ok = false;
for (const i in res.face) {
if (res.face[i].mesh.length === 0) continue;
ok = true;
if (res.face[i].mesh.length === 0 || !res.face[i].tensor) continue; // did not get valid results
all[index][i].fileName = fileName;
const canvas = document.createElement('canvas');
canvas.tag = { sample: index, face: i, source: fileName };
@ -155,27 +153,22 @@ function AddFaceCanvas(index, res, fileName) {
gender: ${Math.round(100 * res.face[i].genderScore)}% ${res.face[i].gender}
emotion: ${emotion}
`.replace(/ /g, ' ');
// mouse click on any face canvas triggers analysis
await human.tf.browser.toPixels(res.face[i].tensor, canvas);
const ctx = canvas.getContext('2d');
if (!ctx) return false;
ctx.font = 'small-caps 0.8rem "Lato"';
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
ctx.fillText(`${res.face[i].age}y ${(100 * (res.face[i].genderScore || 0)).toFixed(1)}% ${res.face[i].gender}`, 4, canvas.height - 6);
const arr = db.map((rec) => rec.embedding);
const result = human.match(res.face[i].embedding, arr);
ctx.font = 'small-caps 1rem "Lato"';
if (result.similarity && res.similarity > minScore) ctx.fillText(`${(100 * result.similarity).toFixed(1)}% ${db[result.index].name}`, 4, canvas.height - 30);
document.getElementById('faces').appendChild(canvas);
canvas.addEventListener('click', (evt) => {
log('Select:', 'Image:', evt.target.tag.sample, 'Face:', evt.target.tag.face, 'Source:', evt.target.tag.source, all[evt.target.tag.sample][evt.target.tag.face]);
SelectFaceCanvas(all[evt.target.tag.sample][evt.target.tag.face]);
});
// if we actually got face image tensor, draw canvas with that face
if (res.face[i].tensor) {
human.tf.browser.toPixels(res.face[i].tensor, canvas);
document.getElementById('faces').appendChild(canvas);
const ctx = canvas.getContext('2d');
if (!ctx) return false;
ctx.font = 'small-caps 0.8rem "Lato"';
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
ctx.fillText(`${res.face[i].age}y ${(100 * (res.face[i].genderScore || 0)).toFixed(1)}% ${res.face[i].gender}`, 4, canvas.height - 6);
const arr = db.map((rec) => rec.embedding);
const result = human.match(res.face[i].embedding, arr);
ctx.font = 'small-caps 1rem "Lato"';
if (result.similarity && res.similarity > minScore) ctx.fillText(`${(100 * result.similarity).toFixed(1)}% ${db[result.index].name}`, 4, canvas.height - 30);
}
}
return ok;
}
async function AddImageElement(index, image, length) {
@ -185,8 +178,8 @@ async function AddImageElement(index, image, length) {
const img = new Image(128, 128);
img.onload = () => { // must wait until image is loaded
human.detect(img, userConfig).then((res) => {
const ok = AddFaceCanvas(index, res, image); // then wait until image is analyzed
if (ok) document.getElementById('images').appendChild(img); // and finally we can add it
AddFaceCanvas(index, res, image); // then wait until image is analyzed
document.getElementById('images').appendChild(img); // and finally we can add it
resolve(true);
});
};

View File

@ -23,6 +23,14 @@ export interface FaceDetectorConfig extends GenericConfig {
minConfidence: number,
/** @property minimum overlap between two detected faces before one is discarded */
iouThreshold: number,
/** @property factor used to expand detected face before further analysis
* - default: 1.6
* - for high-quality inputs can be reduced to increase precision
* - for video inputs or low-quality inputs can be increased to allow for more flexible tracking
*/
cropFactor: number,
/** @property should child models perform on masked image of a face */
mask: boolean,
/** @property should face detection return face tensor to be used in some other extenrnal model? */
return: boolean,
}
@ -314,6 +322,8 @@ const config: Config = {
skipTime: 2500,
minConfidence: 0.2,
iouThreshold: 0.1,
cropFactor: 1.6,
mask: false,
return: false,
},
mesh: {

View File

@ -17,7 +17,6 @@ let anchorsData: [number, number][] = [];
let anchors: Tensor | null = null;
let inputSize = 0;
// export const size = () => (model && model.inputs[0].shape ? model.inputs[0].shape[2] : 0);
export const size = () => inputSize;
export async function load(config: Config): Promise<GraphModel> {

View File

@ -9,13 +9,15 @@ import * as tf from '../../dist/tfjs.esm.js';
import * as facemesh from './facemesh';
import * as emotion from '../gear/emotion';
import * as faceres from './faceres';
import * as mask from './mask';
import * as antispoof from './antispoof';
import * as liveness from './liveness';
import type { FaceResult } from '../result';
import type { Tensor } from '../tfjs/types';
import type { Human } from '../human';
import { calculateFaceAngle } from './angles';
export const detectFace = async (parent /* instance of human */, input: Tensor): Promise<FaceResult[]> => {
export const detectFace = async (parent: Human /* instance of human */, input: Tensor): Promise<FaceResult[]> => {
// run facemesh, includes blazeface and iris
// eslint-disable-next-line no-async-promise-executor
let timeStamp;
@ -46,16 +48,24 @@ export const detectFace = async (parent /* instance of human */, input: Tensor):
continue;
}
const rotation = calculateFaceAngle(faces[i], [input.shape[2], input.shape[1]]);
// optional face mask
if (parent.config.face.detector?.mask) {
const masked = await mask.mask(faces[i]);
tf.dispose(faces[i].tensor);
faces[i].tensor = masked as Tensor;
}
// calculate face angles
const rotation = faces[i].mesh && (faces[i].mesh.length > 200) ? calculateFaceAngle(faces[i], [input.shape[2], input.shape[1]]) : null;
// run emotion, inherits face from blazeface
parent.analyze('Start Emotion:');
if (parent.config.async) {
emotionRes = parent.config.face.emotion.enabled ? emotion.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
emotionRes = parent.config.face.emotion?.enabled ? emotion.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
} else {
parent.state = 'run:emotion';
timeStamp = now();
emotionRes = parent.config.face.emotion.enabled ? await emotion.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
emotionRes = parent.config.face.emotion?.enabled ? await emotion.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
parent.performance.emotion = env.perfadd ? (parent.performance.emotion || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
}
parent.analyze('End Emotion:');
@ -63,11 +73,11 @@ export const detectFace = async (parent /* instance of human */, input: Tensor):
// run antispoof, inherits face from blazeface
parent.analyze('Start AntiSpoof:');
if (parent.config.async) {
antispoofRes = parent.config.face.antispoof.enabled ? antispoof.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
antispoofRes = parent.config.face.antispoof?.enabled ? antispoof.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
} else {
parent.state = 'run:antispoof';
timeStamp = now();
antispoofRes = parent.config.face.antispoof.enabled ? await antispoof.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
antispoofRes = parent.config.face.antispoof?.enabled ? await antispoof.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
parent.performance.antispoof = env.perfadd ? (parent.performance.antispoof || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
}
parent.analyze('End AntiSpoof:');
@ -75,11 +85,11 @@ export const detectFace = async (parent /* instance of human */, input: Tensor):
// run liveness, inherits face from blazeface
parent.analyze('Start Liveness:');
if (parent.config.async) {
livenessRes = parent.config.face.liveness.enabled ? liveness.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
livenessRes = parent.config.face.liveness?.enabled ? liveness.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
} else {
parent.state = 'run:liveness';
timeStamp = now();
livenessRes = parent.config.face.liveness.enabled ? await liveness.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
livenessRes = parent.config.face.liveness?.enabled ? await liveness.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
parent.performance.antispoof = env.perfadd ? (parent.performance.antispoof || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
}
parent.analyze('End Liveness:');
@ -101,11 +111,11 @@ export const detectFace = async (parent /* instance of human */, input: Tensor):
// run emotion, inherits face from blazeface
parent.analyze('Start Description:');
if (parent.config.async) {
descRes = parent.config.face.description.enabled ? faceres.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
descRes = parent.config.face.description?.enabled ? faceres.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
} else {
parent.state = 'run:description';
timeStamp = now();
descRes = parent.config.face.description.enabled ? await faceres.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
descRes = parent.config.face.description?.enabled ? await faceres.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
parent.performance.description = env.perfadd ? (parent.performance.description || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
}
parent.analyze('End Description:');
@ -119,7 +129,7 @@ export const detectFace = async (parent /* instance of human */, input: Tensor):
// calculate iris distance
// iris: array[ center, left, top, right, bottom]
if (!parent.config.face.iris.enabled && faces[i]?.annotations?.leftEyeIris && faces[i]?.annotations?.rightEyeIris) {
if (!parent.config.face.iris?.enabled && faces[i]?.annotations?.leftEyeIris && faces[i]?.annotations?.rightEyeIris) {
delete faces[i].annotations.leftEyeIris;
delete faces[i].annotations.rightEyeIris;
}
@ -130,7 +140,7 @@ export const detectFace = async (parent /* instance of human */, input: Tensor):
: 0; // note: average human iris size is 11.7mm
// optionally return tensor
const tensor = parent.config.face.detector.return ? tf.squeeze(faces[i].tensor) : null;
const tensor = parent.config.face.detector?.return ? tf.squeeze(faces[i].tensor) : null;
// dispose original face tensor
tf.dispose(faces[i].tensor);
// delete temp face image

View File

@ -25,11 +25,9 @@ let model: GraphModel | null = null;
let inputSize = 0;
let skipped = Number.MAX_SAFE_INTEGER;
let lastTime = 0;
const enlargeFact = 1.6;
export async function predict(input: Tensor, config: Config): Promise<FaceResult[]> {
// reset cached boxes
const skipTime = (config.face.detector?.skipTime || 0) > (now() - lastTime);
const skipFrame = skipped < (config.face.detector?.skipFrames || 0);
if (!config.skipAllowed || !skipTime || !skipFrame || boxCache.length === 0) {
@ -43,7 +41,7 @@ export async function predict(input: Tensor, config: Config): Promise<FaceResult
landmarks: possible.landmarks,
confidence: possible.confidence,
};
boxCache.push(util.squarifyBox(util.enlargeBox(util.scaleBoxCoordinates(box, possibleBoxes.scaleFactor), Math.sqrt(enlargeFact))));
boxCache.push(util.squarifyBox(util.enlargeBox(util.scaleBoxCoordinates(box, possibleBoxes.scaleFactor), Math.sqrt(config.face.detector?.cropFactor || 1.6))));
}
skipped = 0;
} else {
@ -68,7 +66,7 @@ export async function predict(input: Tensor, config: Config): Promise<FaceResult
annotations: {},
};
[angle, rotationMatrix, face.tensor] = util.correctFaceRotation(false && config.face.detector?.rotation, box, input, inputSize); // optional rotate based on detector data // disabled
[angle, rotationMatrix, face.tensor] = util.correctFaceRotation(false && config.face.detector?.rotation, box, input, config.face.mesh?.enabled ? inputSize : blazeface.size()); // optional rotate based on detector data
if (config?.filter?.equalization) {
const equilized = await histogramEqualization(face.tensor as Tensor);
tf.dispose(face.tensor);
@ -101,7 +99,7 @@ export async function predict(input: Tensor, config: Config): Promise<FaceResult
face.mesh = util.transformRawCoords(rawCoords, box, angle, rotationMatrix, inputSize); // get processed mesh
face.meshRaw = face.mesh.map((pt) => [pt[0] / (input.shape[2] || 0), pt[1] / (input.shape[1] || 0), (pt[2] || 0) / inputSize]);
for (const key of Object.keys(coords.meshAnnotations)) face.annotations[key] = coords.meshAnnotations[key].map((index) => face.mesh[index]); // add annotations
box = util.squarifyBox({ ...util.enlargeBox(util.calculateLandmarksBoundingBox(face.mesh), enlargeFact), confidence: box.confidence }); // redefine box with mesh calculated one
box = util.squarifyBox({ ...util.enlargeBox(util.calculateLandmarksBoundingBox(face.mesh), (config.face.detector?.cropFactor || 1.6)), confidence: box.confidence }); // redefine box with mesh calculated one
face.box = util.getClampedBox(box, input); // update detected box with box around the face mesh
face.boxRaw = util.getRawBox(box, input);
face.score = face.faceScore;

View File

@ -39,22 +39,17 @@ export async function load(config: Config): Promise<GraphModel> {
export function enhance(input): Tensor {
const tensor = (input.image || input.tensor || input) as Tensor; // input received from detector is already normalized to 0..1, input is also assumed to be straightened
if (!model?.inputs[0].shape) return tensor; // model has no shape so no point continuing
// do a tight crop of image and resize it to fit the model
const crop = tf.image.resizeBilinear(tensor, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
const norm = tf.mul(crop, 255);
tf.dispose(crop);
return norm;
/*
// do a tight crop of image and resize it to fit the model
const box = [[0.05, 0.15, 0.85, 0.85]]; // empyrical values for top, left, bottom, right
const crop = (tensor.shape.length === 3)
? tf.image.cropAndResize(tf.expandDims(tensor, 0), box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]) // add batch dimension if missing
: tf.image.cropAndResize(tensor, box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]);
*/
/*
// just resize to fit the embedding model instead of cropping
const crop = tf.image.resizeBilinear(tensor, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
*/
/*
// convert to black&white to avoid colorization impact
const rgb = [0.2989, 0.5870, 0.1140]; // factors for red/green/blue colors when converting to grayscale: https://www.mathworks.com/help/matlab/ref/rgb2gray.html
@ -65,22 +60,6 @@ export function enhance(input): Tensor {
const grayscale = tf.addN([redNorm, greenNorm, blueNorm]);
const merge = tf.stack([grayscale, grayscale, grayscale], 3).squeeze(4);
*/
/*
// increase image pseudo-contrast 100%
// (or do it per-channel so mean is done on each channel)
// (or calculate histogram and do it based on histogram)
const mean = merge.mean();
const factor = 2;
const contrast = merge.sub(mean).mul(factor).add(mean);
*/
/*
// normalize brightness from 0..1
// silly way of creating pseudo-hdr of image
const darken = crop.sub(crop.min());
const lighten = darken.div(darken.max());
*/
}
export async function predict(image: Tensor, config: Config, idx, count) {

40
src/face/mask.ts Normal file
View File

@ -0,0 +1,40 @@
import type { Tensor } from '../tfjs/types';
import type { FaceResult } from '../result';
import * as tf from '../../dist/tfjs.esm.js';
import { meshAnnotations } from './facemeshcoords';
const expandFact = 0.1;
const alpha = 0.5;
// point inclusion in polygon based on https://wrf.ecse.rpi.edu/Research/Short_Notes/pnpoly.html
function insidePoly(x: number, y: number, polygon: Array<{ x: number, y: number }>): boolean {
let inside = false;
let j = polygon.length - 1;
for (let i = 0; i < polygon.length; j = i++) {
if (((polygon[i].y > y) !== (polygon[j].y > y)) && (x < (polygon[j].x - polygon[i].x) * (y - polygon[i].y) / (polygon[j].y - polygon[i].y) + polygon[i].x)) inside = !inside;
}
return inside;
}
export async function mask(face: FaceResult): Promise<Tensor | undefined> {
if (!face.tensor) return face.tensor;
const width = face.tensor.shape[2] || 0;
const height = face.tensor.shape[1] || 0;
const buffer = await face.tensor.buffer();
let silhouette: Array<{ x: number, y: number }> = [];
for (const pt of meshAnnotations.silhouette) silhouette.push({ x: (face.mesh[pt][0] - face.box[0]) / face.box[2], y: (face.mesh[pt][1] - face.box[1]) / face.box[3] }); // add all silhouette points scaled to local box
if (expandFact && expandFact > 0) silhouette = silhouette.map((pt) => ({ x: pt.x > 0.5 ? pt.x + expandFact : pt.x - expandFact, y: pt.y > 0.5 ? pt.y + expandFact : pt.y - expandFact })); // expand silhouette
for (let x = 0; x < width; x++) {
for (let y = 0; y < height; y++) {
const inside = insidePoly(x / width, y / width, silhouette);
if (!inside) {
buffer.set(alpha * buffer.get(0, y, x, 0), 0, y, x, 0);
buffer.set(alpha * buffer.get(0, y, x, 1), 0, y, x, 1);
buffer.set(alpha * buffer.get(0, y, x, 2), 0, y, x, 2);
}
}
}
const output = buffer.toTensor();
tf.dispose(buffer);
return output;
}

View File

@ -66,9 +66,10 @@ export const face = (res): GestureResult[] => {
const gestures: Array<{ face: number, gesture: FaceGesture }> = [];
for (let i = 0; i < res.length; i++) {
if (res[i].mesh && res[i].mesh.length > 450) {
const eyeFacing = res[i].mesh[33][2] - res[i].mesh[263][2];
if (Math.abs(eyeFacing) < 10) gestures.push({ face: i, gesture: 'facing center' });
else gestures.push({ face: i, gesture: `facing ${eyeFacing < 0 ? 'left' : 'right'}` });
const zDiff = res[i].mesh[33][2] - res[i].mesh[263][2];
const xDiff = res[i].mesh[33][0] - res[i].mesh[263][0];
if (Math.abs(zDiff / xDiff) <= 0.15) gestures.push({ face: i, gesture: 'facing center' });
else gestures.push({ face: i, gesture: `facing ${zDiff < 0 ? 'left' : 'right'}` });
const openLeft = Math.abs(res[i].mesh[374][1] - res[i].mesh[386][1]) / Math.abs(res[i].mesh[443][1] - res[i].mesh[450][1]); // center of eye inner lid y coord div center of wider eye border y coord
if (openLeft < 0.2) gestures.push({ face: i, gesture: 'blink left eye' });
const openRight = Math.abs(res[i].mesh[145][1] - res[i].mesh[159][1]) / Math.abs(res[i].mesh[223][1] - res[i].mesh[230][1]); // center of eye inner lid y coord div center of wider eye border y coord

View File

@ -253,7 +253,7 @@ export class Human {
* - `canvas` as canvas which is input image filtered with segementation data and optionally merged with background image. canvas alpha values are set to segmentation values for easy merging
* - `alpha` as grayscale canvas that represents segmentation alpha values
*/
async segmentation(input: Input, background?: Input): Promise<{ data: number[], canvas: HTMLCanvasElement | OffscreenCanvas | null, alpha: HTMLCanvasElement | OffscreenCanvas | null }> {
async segmentation(input: Input, background?: Input): Promise<{ data: number[] | Tensor, canvas: HTMLCanvasElement | OffscreenCanvas | null, alpha: HTMLCanvasElement | OffscreenCanvas | null }> {
return segmentation.process(input, background, this.config);
}

View File

@ -54,7 +54,7 @@ export interface FaceResult {
angle: { roll: number, yaw: number, pitch: number },
matrix: [number, number, number, number, number, number, number, number, number],
gaze: { bearing: number, strength: number },
}
} | null,
/** detected face as tensor that can be used in further pipelines */
tensor?: Tensor,
}

View File

@ -1,5 +1,6 @@
/** TFJS backend initialization and customization */
import type { Human } from '../human';
import { log, now } from '../util/util';
import { env } from '../util/env';
import * as humangl from './humangl';
@ -26,7 +27,7 @@ function registerCustomOps() {
}
}
export async function check(instance, force = false) {
export async function check(instance: Human, force = false) {
instance.state = 'backend';
if (force || env.initial || (instance.config.backend && (instance.config.backend.length > 0) && (tf.getBackend() !== instance.config.backend))) {
const timeStamp = now();

View File

@ -1,5 +1,6 @@
/** TFJS custom backend registration */
import type { Human } from '../human';
import { log } from '../util/util';
import * as tf from '../../dist/tfjs.esm.js';
import * as image from '../image/image';
@ -40,7 +41,7 @@ function extensions(): void {
*
* @returns void
*/
export async function register(instance): Promise<void> {
export async function register(instance: Human): Promise<void> {
// force backend reload if gl context is not valid
if (instance.config.backend !== 'humangl') return;
if ((config.name in tf.engine().registry) && (!config.gl || !config.gl.getParameter(config.gl.VERSION))) {

View File

@ -114,23 +114,26 @@ export function calc(newResult: Result, config: Config): Result {
.map((b, j) => ((bufferedFactor - 1) * bufferedResult.face[i].box[j] + b) / bufferedFactor)) as Box;
const boxRaw = (newResult.face[i].boxRaw // update boxRaw
.map((b, j) => ((bufferedFactor - 1) * bufferedResult.face[i].boxRaw[j] + b) / bufferedFactor)) as Box;
const rotation: {
matrix: [number, number, number, number, number, number, number, number, number],
angle: { roll: number, yaw: number, pitch: number },
gaze: { bearing: number, strength: number }
} = { matrix: [0, 0, 0, 0, 0, 0, 0, 0, 0], angle: { roll: 0, yaw: 0, pitch: 0 }, gaze: { bearing: 0, strength: 0 } };
rotation.matrix = newResult.face[i].rotation?.matrix as [number, number, number, number, number, number, number, number, number];
rotation.angle = {
roll: ((bufferedFactor - 1) * (bufferedResult.face[i].rotation?.angle?.roll || 0) + (newResult.face[i].rotation?.angle?.roll || 0)) / bufferedFactor,
yaw: ((bufferedFactor - 1) * (bufferedResult.face[i].rotation?.angle?.yaw || 0) + (newResult.face[i].rotation?.angle?.yaw || 0)) / bufferedFactor,
pitch: ((bufferedFactor - 1) * (bufferedResult.face[i].rotation?.angle?.pitch || 0) + (newResult.face[i].rotation?.angle?.pitch || 0)) / bufferedFactor,
};
rotation.gaze = {
// not fully correct due projection on circle, also causes wrap-around draw on jump from negative to positive
bearing: ((bufferedFactor - 1) * (bufferedResult.face[i].rotation?.gaze?.bearing || 0) + (newResult.face[i].rotation?.gaze?.bearing || 0)) / bufferedFactor,
strength: ((bufferedFactor - 1) * (bufferedResult.face[i].rotation?.gaze?.strength || 0) + (newResult.face[i].rotation?.gaze?.strength || 0)) / bufferedFactor,
};
bufferedResult.face[i] = { ...newResult.face[i], rotation, box, boxRaw }; // shallow clone plus updated values
if (newResult.face[i].rotation) {
const rotation: {
matrix: [number, number, number, number, number, number, number, number, number],
angle: { roll: number, yaw: number, pitch: number },
gaze: { bearing: number, strength: number }
} = { matrix: [0, 0, 0, 0, 0, 0, 0, 0, 0], angle: { roll: 0, yaw: 0, pitch: 0 }, gaze: { bearing: 0, strength: 0 } };
rotation.matrix = newResult.face[i].rotation?.matrix as [number, number, number, number, number, number, number, number, number];
rotation.angle = {
roll: ((bufferedFactor - 1) * (bufferedResult.face[i].rotation?.angle?.roll || 0) + (newResult.face[i].rotation?.angle?.roll || 0)) / bufferedFactor,
yaw: ((bufferedFactor - 1) * (bufferedResult.face[i].rotation?.angle?.yaw || 0) + (newResult.face[i].rotation?.angle?.yaw || 0)) / bufferedFactor,
pitch: ((bufferedFactor - 1) * (bufferedResult.face[i].rotation?.angle?.pitch || 0) + (newResult.face[i].rotation?.angle?.pitch || 0)) / bufferedFactor,
};
rotation.gaze = {
// not fully correct due projection on circle, also causes wrap-around draw on jump from negative to positive
bearing: ((bufferedFactor - 1) * (bufferedResult.face[i].rotation?.gaze?.bearing || 0) + (newResult.face[i].rotation?.gaze?.bearing || 0)) / bufferedFactor,
strength: ((bufferedFactor - 1) * (bufferedResult.face[i].rotation?.gaze?.strength || 0) + (newResult.face[i].rotation?.gaze?.strength || 0)) / bufferedFactor,
};
bufferedResult.face[i] = { ...newResult.face[i], rotation, box, boxRaw }; // shallow clone plus updated values
}
bufferedResult.face[i] = { ...newResult.face[i], box, boxRaw }; // shallow clone plus updated values
}
}

View File

@ -8,9 +8,11 @@ import * as tf from '../dist/tfjs.esm.js';
import * as image from './image/image';
import type { Config } from './config';
import type { Result } from './result';
import type { Human } from './human';
import type { Tensor } from './tfjs/types';
import { env } from './util/env';
async function warmupBitmap(instance) {
async function warmupBitmap(instance: Human) {
const b64toBlob = (base64: string, type = 'application/octet-stream') => fetch(`data:${type};base64,${base64}`).then((res) => res.blob());
let blob;
let res;
@ -28,7 +30,7 @@ async function warmupBitmap(instance) {
return res;
}
async function warmupCanvas(instance) {
async function warmupCanvas(instance: Human) {
return new Promise((resolve) => {
let src;
// let size = 0;
@ -60,7 +62,7 @@ async function warmupCanvas(instance) {
if (ctx) ctx.drawImage(img, 0, 0);
// const data = ctx?.getImageData(0, 0, canvas.height, canvas.width);
const tensor = await instance.image(canvas);
const res = await instance.detect(tensor.tensor, instance.config);
const res = await instance.detect(tensor.tensor as Tensor, instance.config);
resolve(res);
}
};
@ -69,7 +71,7 @@ async function warmupCanvas(instance) {
});
}
async function warmupNode(instance) {
async function warmupNode(instance: Human) {
const atob = (str: string) => Buffer.from(str, 'base64');
let img;
if (instance.config.warmup === 'face') img = atob(sample.face);
@ -101,7 +103,7 @@ async function warmupNode(instance) {
* - only used for `webgl` and `humangl` backends
* @param userConfig?: Config
*/
export async function warmup(instance, userConfig?: Partial<Config>): Promise<Result | { error }> {
export async function warmup(instance: Human, userConfig?: Partial<Config>): Promise<Result | { error }> {
const t0 = now();
instance.state = 'warmup';
if (userConfig) instance.config = mergeDeep(instance.config, userConfig) as Config;