mirror of https://github.com/vladmandic/human
implement optional face masking
parent
8b56de5140
commit
4f2993a2f5
|
@ -40,6 +40,7 @@
|
||||||
"@typescript-eslint/explicit-module-boundary-types": "off",
|
"@typescript-eslint/explicit-module-boundary-types": "off",
|
||||||
"@typescript-eslint/no-shadow": "error",
|
"@typescript-eslint/no-shadow": "error",
|
||||||
"@typescript-eslint/no-var-requires": "off",
|
"@typescript-eslint/no-var-requires": "off",
|
||||||
|
"@typescript-eslint/prefer-as-const": "off",
|
||||||
"@typescript-eslint/triple-slash-reference": "off",
|
"@typescript-eslint/triple-slash-reference": "off",
|
||||||
"@typescript-eslint/no-inferrable-types": "off",
|
"@typescript-eslint/no-inferrable-types": "off",
|
||||||
"@typescript-eslint/no-empty-interface": ["error", { "allowSingleExtends": true }],
|
"@typescript-eslint/no-empty-interface": ["error", { "allowSingleExtends": true }],
|
||||||
|
|
|
@ -11,6 +11,8 @@
|
||||||
|
|
||||||
### **HEAD -> main** 2021/11/11 mandic00@live.com
|
### **HEAD -> main** 2021/11/11 mandic00@live.com
|
||||||
|
|
||||||
|
- add similarity score range normalization
|
||||||
|
- add faceid demo
|
||||||
- documentation overhaul
|
- documentation overhaul
|
||||||
- auto tensor shape and channels handling
|
- auto tensor shape and channels handling
|
||||||
- disable use of path2d in node
|
- disable use of path2d in node
|
||||||
|
|
|
@ -18,14 +18,15 @@
|
||||||
html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
|
html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
|
||||||
body { margin: 0; padding: 16px; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
|
body { margin: 0; padding: 16px; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
|
||||||
body::-webkit-scrollbar { display: none; }
|
body::-webkit-scrollbar { display: none; }
|
||||||
.button { padding: 2px; cursor: pointer; box-shadow: 2px 2px black; width: 64px; text-align: center; margin-left: 16px; height: 16px; display: none }
|
.button { padding: 2px; cursor: pointer; box-shadow: 2px 2px black; width: 64px; text-align: center; place-content: center; margin-left: 16px; height: 16px; display: none }
|
||||||
|
.ok { position: absolute; top: 64px; right: 20px; width: 100px; background-color: grey; padding: 4px; color: black; font-size: 14px }
|
||||||
</style>
|
</style>
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
<canvas id="canvas" style="padding: 8px"></canvas>
|
<canvas id="canvas" style="padding: 8px"></canvas>
|
||||||
<canvas id="source" style="padding: 8px"></canvas>
|
<canvas id="source" style="padding: 8px"></canvas>
|
||||||
<video id="video" playsinline style="display: none"></video>
|
<video id="video" playsinline style="display: none"></video>
|
||||||
<pre id="fps" style="position: absolute; top: 12px; right: 20px; background-color: grey; padding: 8px; box-shadow: 2px 2px black"></pre>
|
<pre id="fps" style="position: absolute; bottom: 16px; right: 20px; background-color: grey; padding: 8px; box-shadow: 2px 2px black"></pre>
|
||||||
<pre id="log" style="padding: 8px"></pre>
|
<pre id="log" style="padding: 8px"></pre>
|
||||||
<div id="match" style="display: none; padding: 8px">
|
<div id="match" style="display: none; padding: 8px">
|
||||||
<label for="name">name:</label>
|
<label for="name">name:</label>
|
||||||
|
@ -33,7 +34,7 @@
|
||||||
<span id="save" class="button" style="background-color: royalblue">save</span>
|
<span id="save" class="button" style="background-color: royalblue">save</span>
|
||||||
<span id="delete" class="button" style="background-color: lightcoral">delete</span>
|
<span id="delete" class="button" style="background-color: lightcoral">delete</span>
|
||||||
</div>
|
</div>
|
||||||
<div id="retry" class="button" style="background-color: darkslategray; width: 350px">retry</div>
|
<div id="retry" class="button" style="background-color: darkslategray; width: 350px; margin-top: 32px; padding: 4px">retry</div>
|
||||||
<div id="status" style="position: absolute; bottom: 0; width: 100%; padding: 8px; font-size: 0.8rem;"></div>
|
<div id="ok"></div>
|
||||||
</body>
|
</body>
|
||||||
</html>
|
</html>
|
||||||
|
|
|
@ -47,6 +47,15 @@ async function load() {
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
async function count() {
|
||||||
|
if (!db)
|
||||||
|
await open();
|
||||||
|
return new Promise((resolve) => {
|
||||||
|
const store = db.transaction([table], "readwrite").objectStore(table).count();
|
||||||
|
store.onerror = (evt) => log("count error:", evt);
|
||||||
|
store.onsuccess = () => resolve(store.result);
|
||||||
|
});
|
||||||
|
}
|
||||||
async function save(faceRecord) {
|
async function save(faceRecord) {
|
||||||
if (!db)
|
if (!db)
|
||||||
await open();
|
await open();
|
||||||
|
@ -62,15 +71,12 @@ async function remove(faceRecord) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// demo/faceid/index.ts
|
// demo/faceid/index.ts
|
||||||
var db2 = [];
|
|
||||||
var face;
|
|
||||||
var current;
|
|
||||||
var humanConfig = {
|
var humanConfig = {
|
||||||
modelBasePath: "../../models",
|
modelBasePath: "../../models",
|
||||||
filter: { equalization: true },
|
filter: { equalization: true },
|
||||||
face: {
|
face: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
detector: { rotation: true, return: true },
|
detector: { rotation: true, return: true, cropFactor: 1.6, mask: false },
|
||||||
description: { enabled: true },
|
description: { enabled: true },
|
||||||
iris: { enabled: true },
|
iris: { enabled: true },
|
||||||
emotion: { enabled: false },
|
emotion: { enabled: false },
|
||||||
|
@ -88,19 +94,24 @@ var options = {
|
||||||
maxTime: 1e4,
|
maxTime: 1e4,
|
||||||
blinkMin: 10,
|
blinkMin: 10,
|
||||||
blinkMax: 800,
|
blinkMax: 800,
|
||||||
threshold: 0.5
|
threshold: 0.5,
|
||||||
|
mask: humanConfig.face.detector.mask,
|
||||||
|
rotation: humanConfig.face.detector.rotation,
|
||||||
|
cropFactor: humanConfig.face.detector.cropFactor
|
||||||
};
|
};
|
||||||
var ok = {
|
var ok = {
|
||||||
faceCount: false,
|
faceCount: false,
|
||||||
faceConfidence: false,
|
faceConfidence: false,
|
||||||
facingCenter: false,
|
facingCenter: false,
|
||||||
|
lookingCenter: false,
|
||||||
blinkDetected: false,
|
blinkDetected: false,
|
||||||
faceSize: false,
|
faceSize: false,
|
||||||
antispoofCheck: false,
|
antispoofCheck: false,
|
||||||
livenessCheck: false,
|
livenessCheck: false,
|
||||||
elapsedMs: 0
|
elapsedMs: 0
|
||||||
};
|
};
|
||||||
var allOk = () => ok.faceCount && ok.faceSize && ok.blinkDetected && ok.facingCenter && ok.faceConfidence && ok.antispoofCheck && ok.livenessCheck;
|
var allOk = () => ok.faceCount && ok.faceSize && ok.blinkDetected && ok.facingCenter && ok.lookingCenter && ok.faceConfidence && ok.antispoofCheck && ok.livenessCheck;
|
||||||
|
var current = { face: null, record: null };
|
||||||
var blink = {
|
var blink = {
|
||||||
start: 0,
|
start: 0,
|
||||||
end: 0,
|
end: 0,
|
||||||
|
@ -115,13 +126,13 @@ var dom = {
|
||||||
canvas: document.getElementById("canvas"),
|
canvas: document.getElementById("canvas"),
|
||||||
log: document.getElementById("log"),
|
log: document.getElementById("log"),
|
||||||
fps: document.getElementById("fps"),
|
fps: document.getElementById("fps"),
|
||||||
status: document.getElementById("status"),
|
|
||||||
match: document.getElementById("match"),
|
match: document.getElementById("match"),
|
||||||
name: document.getElementById("name"),
|
name: document.getElementById("name"),
|
||||||
save: document.getElementById("save"),
|
save: document.getElementById("save"),
|
||||||
delete: document.getElementById("delete"),
|
delete: document.getElementById("delete"),
|
||||||
retry: document.getElementById("retry"),
|
retry: document.getElementById("retry"),
|
||||||
source: document.getElementById("source")
|
source: document.getElementById("source"),
|
||||||
|
ok: document.getElementById("ok")
|
||||||
};
|
};
|
||||||
var timestamp = { detect: 0, draw: 0 };
|
var timestamp = { detect: 0, draw: 0 };
|
||||||
var fps = { detect: 0, draw: 0 };
|
var fps = { detect: 0, draw: 0 };
|
||||||
|
@ -131,7 +142,6 @@ var log2 = (...msg) => {
|
||||||
console.log(...msg);
|
console.log(...msg);
|
||||||
};
|
};
|
||||||
var printFPS = (msg) => dom.fps.innerText = msg;
|
var printFPS = (msg) => dom.fps.innerText = msg;
|
||||||
var printStatus = (msg) => dom.status.innerText = "status: " + JSON.stringify(msg).replace(/"|{|}/g, "").replace(/,/g, " | ");
|
|
||||||
async function webCam() {
|
async function webCam() {
|
||||||
printFPS("starting webcam...");
|
printFPS("starting webcam...");
|
||||||
const cameraOptions = { audio: false, video: { facingMode: "user", resizeMode: "none", width: { ideal: document.body.clientWidth } } };
|
const cameraOptions = { audio: false, video: { facingMode: "user", resizeMode: "none", width: { ideal: document.body.clientWidth } } };
|
||||||
|
@ -155,8 +165,8 @@ async function webCam() {
|
||||||
}
|
}
|
||||||
async function detectionLoop() {
|
async function detectionLoop() {
|
||||||
if (!dom.video.paused) {
|
if (!dom.video.paused) {
|
||||||
if (face && face.tensor)
|
if (current.face && current.face.tensor)
|
||||||
human.tf.dispose(face.tensor);
|
human.tf.dispose(current.face.tensor);
|
||||||
await human.detect(dom.video);
|
await human.detect(dom.video);
|
||||||
const now = human.now();
|
const now = human.now();
|
||||||
fps.detect = 1e3 / (now - timestamp.detect);
|
fps.detect = 1e3 / (now - timestamp.detect);
|
||||||
|
@ -179,16 +189,32 @@ async function validationLoop() {
|
||||||
blink.start = human.now();
|
blink.start = human.now();
|
||||||
if (blink.start > 0 && !gestures.includes("blink left eye") && !gestures.includes("blink right eye"))
|
if (blink.start > 0 && !gestures.includes("blink left eye") && !gestures.includes("blink right eye"))
|
||||||
blink.end = human.now();
|
blink.end = human.now();
|
||||||
ok.blinkDetected = ok.blinkDetected || blink.end - blink.start > options.blinkMin && blink.end - blink.start < options.blinkMax;
|
ok.blinkDetected = ok.blinkDetected || Math.abs(blink.end - blink.start) > options.blinkMin && Math.abs(blink.end - blink.start) < options.blinkMax;
|
||||||
if (ok.blinkDetected && blink.time === 0)
|
if (ok.blinkDetected && blink.time === 0)
|
||||||
blink.time = Math.trunc(blink.end - blink.start);
|
blink.time = Math.trunc(blink.end - blink.start);
|
||||||
ok.facingCenter = gestures.includes("facing center") && gestures.includes("looking center");
|
ok.facingCenter = gestures.includes("facing center");
|
||||||
|
ok.lookingCenter = gestures.includes("looking center");
|
||||||
ok.faceConfidence = (human.result.face[0].boxScore || 0) > options.minConfidence && (human.result.face[0].faceScore || 0) > options.minConfidence && (human.result.face[0].genderScore || 0) > options.minConfidence;
|
ok.faceConfidence = (human.result.face[0].boxScore || 0) > options.minConfidence && (human.result.face[0].faceScore || 0) > options.minConfidence && (human.result.face[0].genderScore || 0) > options.minConfidence;
|
||||||
ok.antispoofCheck = (human.result.face[0].real || 0) > options.minConfidence;
|
ok.antispoofCheck = (human.result.face[0].real || 0) > options.minConfidence;
|
||||||
ok.livenessCheck = (human.result.face[0].live || 0) > options.minConfidence;
|
ok.livenessCheck = (human.result.face[0].live || 0) > options.minConfidence;
|
||||||
ok.faceSize = human.result.face[0].box[2] >= options.minSize && human.result.face[0].box[3] >= options.minSize;
|
ok.faceSize = human.result.face[0].box[2] >= options.minSize && human.result.face[0].box[3] >= options.minSize;
|
||||||
}
|
}
|
||||||
printStatus(ok);
|
let y = 32;
|
||||||
|
for (const [key, val] of Object.entries(ok)) {
|
||||||
|
let el = document.getElementById(`ok-${key}`);
|
||||||
|
if (!el) {
|
||||||
|
el = document.createElement("div");
|
||||||
|
el.innerText = key;
|
||||||
|
el.className = "ok";
|
||||||
|
el.style.top = `${y}px`;
|
||||||
|
dom.ok.appendChild(el);
|
||||||
|
}
|
||||||
|
if (typeof val === "boolean")
|
||||||
|
el.style.backgroundColor = val ? "lightgreen" : "lightcoral";
|
||||||
|
else
|
||||||
|
el.innerText = `${key}:${val}`;
|
||||||
|
y += 28;
|
||||||
|
}
|
||||||
if (allOk()) {
|
if (allOk()) {
|
||||||
dom.video.pause();
|
dom.video.pause();
|
||||||
return human.result.face[0];
|
return human.result.face[0];
|
||||||
|
@ -208,46 +234,48 @@ async function validationLoop() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
async function saveRecords() {
|
async function saveRecords() {
|
||||||
var _a;
|
var _a, _b;
|
||||||
if (dom.name.value.length > 0) {
|
if (dom.name.value.length > 0) {
|
||||||
const image = (_a = dom.canvas.getContext("2d")) == null ? void 0 : _a.getImageData(0, 0, dom.canvas.width, dom.canvas.height);
|
const image = (_a = dom.canvas.getContext("2d")) == null ? void 0 : _a.getImageData(0, 0, dom.canvas.width, dom.canvas.height);
|
||||||
const rec = { id: 0, name: dom.name.value, descriptor: face.embedding, image };
|
const rec = { id: 0, name: dom.name.value, descriptor: (_b = current.face) == null ? void 0 : _b.embedding, image };
|
||||||
await save(rec);
|
await save(rec);
|
||||||
log2("saved face record:", rec.name);
|
log2("saved face record:", rec.name);
|
||||||
db2.push(rec);
|
|
||||||
} else {
|
} else {
|
||||||
log2("invalid name");
|
log2("invalid name");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
async function deleteRecord() {
|
async function deleteRecord() {
|
||||||
if (current.id > 0) {
|
if (current.record && current.record.id > 0) {
|
||||||
await remove(current);
|
await remove(current.record);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
async function detectFace() {
|
async function detectFace() {
|
||||||
var _a, _b;
|
var _a, _b;
|
||||||
(_a = dom.canvas.getContext("2d")) == null ? void 0 : _a.clearRect(0, 0, options.minSize, options.minSize);
|
(_a = dom.canvas.getContext("2d")) == null ? void 0 : _a.clearRect(0, 0, options.minSize, options.minSize);
|
||||||
if (!face || !face.tensor || !face.embedding)
|
if (!current.face || !current.face.tensor || !current.face.embedding)
|
||||||
return 0;
|
return false;
|
||||||
human.tf.browser.toPixels(face.tensor, dom.canvas);
|
human.tf.browser.toPixels(current.face.tensor, dom.canvas);
|
||||||
const descriptors = db2.map((rec) => rec.descriptor);
|
if (await count() === 0) {
|
||||||
const res = await human.match(face.embedding, descriptors);
|
log2("face database is empty");
|
||||||
if (res.index === -1) {
|
document.body.style.background = "black";
|
||||||
log2("no matches");
|
|
||||||
dom.delete.style.display = "none";
|
dom.delete.style.display = "none";
|
||||||
dom.source.style.display = "none";
|
return false;
|
||||||
} else {
|
|
||||||
current = db2[res.index];
|
|
||||||
log2(`best match: ${current.name} | id: ${current.id} | similarity: ${Math.round(1e3 * res.similarity) / 10}%`);
|
|
||||||
dom.delete.style.display = "";
|
|
||||||
dom.name.value = current.name;
|
|
||||||
dom.source.style.display = "";
|
|
||||||
(_b = dom.source.getContext("2d")) == null ? void 0 : _b.putImageData(current.image, 0, 0);
|
|
||||||
}
|
}
|
||||||
|
const db2 = await load();
|
||||||
|
const descriptors = db2.map((rec) => rec.descriptor);
|
||||||
|
const res = await human.match(current.face.embedding, descriptors);
|
||||||
|
current.record = db2[res.index] || null;
|
||||||
|
if (current.record) {
|
||||||
|
log2(`best match: ${current.record.name} | id: ${current.record.id} | similarity: ${Math.round(1e3 * res.similarity) / 10}%`);
|
||||||
|
dom.name.value = current.record.name;
|
||||||
|
dom.source.style.display = "";
|
||||||
|
(_b = dom.source.getContext("2d")) == null ? void 0 : _b.putImageData(current.record.image, 0, 0);
|
||||||
|
}
|
||||||
|
document.body.style.background = res.similarity > options.threshold ? "darkgreen" : "maroon";
|
||||||
return res.similarity > options.threshold;
|
return res.similarity > options.threshold;
|
||||||
}
|
}
|
||||||
async function main() {
|
async function main() {
|
||||||
var _a, _b;
|
var _a, _b, _c, _d;
|
||||||
ok.faceCount = false;
|
ok.faceCount = false;
|
||||||
ok.faceConfidence = false;
|
ok.faceConfidence = false;
|
||||||
ok.facingCenter = false;
|
ok.facingCenter = false;
|
||||||
|
@ -258,34 +286,33 @@ async function main() {
|
||||||
ok.elapsedMs = 0;
|
ok.elapsedMs = 0;
|
||||||
dom.match.style.display = "none";
|
dom.match.style.display = "none";
|
||||||
dom.retry.style.display = "none";
|
dom.retry.style.display = "none";
|
||||||
|
dom.source.style.display = "none";
|
||||||
document.body.style.background = "black";
|
document.body.style.background = "black";
|
||||||
await webCam();
|
await webCam();
|
||||||
await detectionLoop();
|
await detectionLoop();
|
||||||
startTime = human.now();
|
startTime = human.now();
|
||||||
face = await validationLoop();
|
current.face = await validationLoop();
|
||||||
dom.fps.style.display = "none";
|
dom.canvas.width = ((_b = (_a = current.face) == null ? void 0 : _a.tensor) == null ? void 0 : _b.shape[1]) || options.minSize;
|
||||||
dom.canvas.width = ((_a = face == null ? void 0 : face.tensor) == null ? void 0 : _a.shape[1]) || options.minSize;
|
dom.canvas.height = ((_d = (_c = current.face) == null ? void 0 : _c.tensor) == null ? void 0 : _d.shape[0]) || options.minSize;
|
||||||
dom.canvas.height = ((_b = face == null ? void 0 : face.tensor) == null ? void 0 : _b.shape[0]) || options.minSize;
|
|
||||||
dom.source.width = dom.canvas.width;
|
dom.source.width = dom.canvas.width;
|
||||||
dom.source.height = dom.canvas.height;
|
dom.source.height = dom.canvas.height;
|
||||||
dom.canvas.style.width = "";
|
dom.canvas.style.width = "";
|
||||||
dom.match.style.display = "flex";
|
dom.match.style.display = "flex";
|
||||||
|
dom.save.style.display = "flex";
|
||||||
|
dom.delete.style.display = "flex";
|
||||||
dom.retry.style.display = "block";
|
dom.retry.style.display = "block";
|
||||||
if (!allOk()) {
|
if (!allOk()) {
|
||||||
log2("did not find valid face");
|
log2("did not find valid face");
|
||||||
return false;
|
return false;
|
||||||
} else {
|
} else {
|
||||||
const res = await detectFace();
|
return detectFace();
|
||||||
document.body.style.background = res ? "darkgreen" : "maroon";
|
|
||||||
return res;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
async function init() {
|
async function init() {
|
||||||
log2("human version:", human.version, "| tfjs version:", human.tf.version_core);
|
log2("human version:", human.version, "| tfjs version:", human.tf.version_core);
|
||||||
log2("options:", JSON.stringify(options).replace(/{|}|"|\[|\]/g, "").replace(/,/g, " "));
|
log2("options:", JSON.stringify(options).replace(/{|}|"|\[|\]/g, "").replace(/,/g, " "));
|
||||||
printFPS("loading...");
|
printFPS("loading...");
|
||||||
db2 = await load();
|
log2("known face records:", await count());
|
||||||
log2("loaded face records:", db2.length);
|
|
||||||
await webCam();
|
await webCam();
|
||||||
await human.load();
|
await human.load();
|
||||||
printFPS("initializing...");
|
printFPS("initializing...");
|
||||||
|
|
|
@ -10,16 +10,12 @@
|
||||||
import { Human, TensorLike, FaceResult } from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
|
import { Human, TensorLike, FaceResult } from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human
|
||||||
import * as indexDb from './indexdb'; // methods to deal with indexdb
|
import * as indexDb from './indexdb'; // methods to deal with indexdb
|
||||||
|
|
||||||
let db: Array<indexDb.FaceRecord> = []; // face descriptor database stored in indexdb
|
|
||||||
let face: FaceResult; // face result from human.detect
|
|
||||||
let current: indexDb.FaceRecord; // currently matched db record
|
|
||||||
|
|
||||||
const humanConfig = { // user configuration for human, used to fine-tune behavior
|
const humanConfig = { // user configuration for human, used to fine-tune behavior
|
||||||
modelBasePath: '../../models',
|
modelBasePath: '../../models',
|
||||||
filter: { equalization: true }, // lets run with histogram equilizer
|
filter: { equalization: true }, // lets run with histogram equilizer
|
||||||
face: {
|
face: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
detector: { rotation: true, return: true }, // return tensor is used to get detected face image
|
detector: { rotation: true, return: true, cropFactor: 1.6, mask: false }, // return tensor is used to get detected face image
|
||||||
description: { enabled: true },
|
description: { enabled: true },
|
||||||
iris: { enabled: true }, // needed to determine gaze direction
|
iris: { enabled: true }, // needed to determine gaze direction
|
||||||
emotion: { enabled: false }, // not needed
|
emotion: { enabled: false }, // not needed
|
||||||
|
@ -39,19 +35,24 @@ const options = {
|
||||||
blinkMin: 10, // minimum duration of a valid blink
|
blinkMin: 10, // minimum duration of a valid blink
|
||||||
blinkMax: 800, // maximum duration of a valid blink
|
blinkMax: 800, // maximum duration of a valid blink
|
||||||
threshold: 0.5, // minimum similarity
|
threshold: 0.5, // minimum similarity
|
||||||
|
mask: humanConfig.face.detector.mask,
|
||||||
|
rotation: humanConfig.face.detector.rotation,
|
||||||
|
cropFactor: humanConfig.face.detector.cropFactor,
|
||||||
};
|
};
|
||||||
|
|
||||||
const ok = { // must meet all rules
|
const ok = { // must meet all rules
|
||||||
faceCount: false,
|
faceCount: false,
|
||||||
faceConfidence: false,
|
faceConfidence: false,
|
||||||
facingCenter: false,
|
facingCenter: false,
|
||||||
|
lookingCenter: false,
|
||||||
blinkDetected: false,
|
blinkDetected: false,
|
||||||
faceSize: false,
|
faceSize: false,
|
||||||
antispoofCheck: false,
|
antispoofCheck: false,
|
||||||
livenessCheck: false,
|
livenessCheck: false,
|
||||||
elapsedMs: 0, // total time while waiting for valid face
|
elapsedMs: 0, // total time while waiting for valid face
|
||||||
};
|
};
|
||||||
const allOk = () => ok.faceCount && ok.faceSize && ok.blinkDetected && ok.facingCenter && ok.faceConfidence && ok.antispoofCheck && ok.livenessCheck;
|
const allOk = () => ok.faceCount && ok.faceSize && ok.blinkDetected && ok.facingCenter && ok.lookingCenter && ok.faceConfidence && ok.antispoofCheck && ok.livenessCheck;
|
||||||
|
const current: { face: FaceResult | null, record: indexDb.FaceRecord | null } = { face: null, record: null }; // current face record and matched database record
|
||||||
|
|
||||||
const blink = { // internal timers for blink start/end/duration
|
const blink = { // internal timers for blink start/end/duration
|
||||||
start: 0,
|
start: 0,
|
||||||
|
@ -71,13 +72,13 @@ const dom = { // grab instances of dom objects so we dont have to look them up l
|
||||||
canvas: document.getElementById('canvas') as HTMLCanvasElement,
|
canvas: document.getElementById('canvas') as HTMLCanvasElement,
|
||||||
log: document.getElementById('log') as HTMLPreElement,
|
log: document.getElementById('log') as HTMLPreElement,
|
||||||
fps: document.getElementById('fps') as HTMLPreElement,
|
fps: document.getElementById('fps') as HTMLPreElement,
|
||||||
status: document.getElementById('status') as HTMLPreElement,
|
|
||||||
match: document.getElementById('match') as HTMLDivElement,
|
match: document.getElementById('match') as HTMLDivElement,
|
||||||
name: document.getElementById('name') as HTMLInputElement,
|
name: document.getElementById('name') as HTMLInputElement,
|
||||||
save: document.getElementById('save') as HTMLSpanElement,
|
save: document.getElementById('save') as HTMLSpanElement,
|
||||||
delete: document.getElementById('delete') as HTMLSpanElement,
|
delete: document.getElementById('delete') as HTMLSpanElement,
|
||||||
retry: document.getElementById('retry') as HTMLDivElement,
|
retry: document.getElementById('retry') as HTMLDivElement,
|
||||||
source: document.getElementById('source') as HTMLCanvasElement,
|
source: document.getElementById('source') as HTMLCanvasElement,
|
||||||
|
ok: document.getElementById('ok') as HTMLDivElement,
|
||||||
};
|
};
|
||||||
const timestamp = { detect: 0, draw: 0 }; // holds information used to calculate performance and possible memory leaks
|
const timestamp = { detect: 0, draw: 0 }; // holds information used to calculate performance and possible memory leaks
|
||||||
const fps = { detect: 0, draw: 0 }; // holds calculated fps information for both detect and screen refresh
|
const fps = { detect: 0, draw: 0 }; // holds calculated fps information for both detect and screen refresh
|
||||||
|
@ -89,7 +90,6 @@ const log = (...msg) => { // helper method to output messages
|
||||||
console.log(...msg);
|
console.log(...msg);
|
||||||
};
|
};
|
||||||
const printFPS = (msg) => dom.fps.innerText = msg; // print status element
|
const printFPS = (msg) => dom.fps.innerText = msg; // print status element
|
||||||
const printStatus = (msg) => dom.status.innerText = 'status: ' + JSON.stringify(msg).replace(/"|{|}/g, '').replace(/,/g, ' | '); // print status element
|
|
||||||
|
|
||||||
async function webCam() { // initialize webcam
|
async function webCam() { // initialize webcam
|
||||||
printFPS('starting webcam...');
|
printFPS('starting webcam...');
|
||||||
|
@ -111,7 +111,7 @@ async function webCam() { // initialize webcam
|
||||||
|
|
||||||
async function detectionLoop() { // main detection loop
|
async function detectionLoop() { // main detection loop
|
||||||
if (!dom.video.paused) {
|
if (!dom.video.paused) {
|
||||||
if (face && face.tensor) human.tf.dispose(face.tensor); // dispose previous tensor
|
if (current.face && current.face.tensor) human.tf.dispose(current.face.tensor); // dispose previous tensor
|
||||||
await human.detect(dom.video); // actual detection; were not capturing output in a local variable as it can also be reached via human.result
|
await human.detect(dom.video); // actual detection; were not capturing output in a local variable as it can also be reached via human.result
|
||||||
const now = human.now();
|
const now = human.now();
|
||||||
fps.detect = 1000 / (now - timestamp.detect);
|
fps.detect = 1000 / (now - timestamp.detect);
|
||||||
|
@ -133,15 +133,29 @@ async function validationLoop(): Promise<FaceResult> { // main screen refresh lo
|
||||||
const gestures: string[] = Object.values(human.result.gesture).map((gesture) => gesture.gesture); // flatten all gestures
|
const gestures: string[] = Object.values(human.result.gesture).map((gesture) => gesture.gesture); // flatten all gestures
|
||||||
if (gestures.includes('blink left eye') || gestures.includes('blink right eye')) blink.start = human.now(); // blink starts when eyes get closed
|
if (gestures.includes('blink left eye') || gestures.includes('blink right eye')) blink.start = human.now(); // blink starts when eyes get closed
|
||||||
if (blink.start > 0 && !gestures.includes('blink left eye') && !gestures.includes('blink right eye')) blink.end = human.now(); // if blink started how long until eyes are back open
|
if (blink.start > 0 && !gestures.includes('blink left eye') && !gestures.includes('blink right eye')) blink.end = human.now(); // if blink started how long until eyes are back open
|
||||||
ok.blinkDetected = ok.blinkDetected || (blink.end - blink.start > options.blinkMin && blink.end - blink.start < options.blinkMax);
|
ok.blinkDetected = ok.blinkDetected || (Math.abs(blink.end - blink.start) > options.blinkMin && Math.abs(blink.end - blink.start) < options.blinkMax);
|
||||||
if (ok.blinkDetected && blink.time === 0) blink.time = Math.trunc(blink.end - blink.start);
|
if (ok.blinkDetected && blink.time === 0) blink.time = Math.trunc(blink.end - blink.start);
|
||||||
ok.facingCenter = gestures.includes('facing center') && gestures.includes('looking center'); // must face camera and look at camera
|
ok.facingCenter = gestures.includes('facing center');
|
||||||
|
ok.lookingCenter = gestures.includes('looking center'); // must face camera and look at camera
|
||||||
ok.faceConfidence = (human.result.face[0].boxScore || 0) > options.minConfidence && (human.result.face[0].faceScore || 0) > options.minConfidence && (human.result.face[0].genderScore || 0) > options.minConfidence;
|
ok.faceConfidence = (human.result.face[0].boxScore || 0) > options.minConfidence && (human.result.face[0].faceScore || 0) > options.minConfidence && (human.result.face[0].genderScore || 0) > options.minConfidence;
|
||||||
ok.antispoofCheck = (human.result.face[0].real || 0) > options.minConfidence;
|
ok.antispoofCheck = (human.result.face[0].real || 0) > options.minConfidence;
|
||||||
ok.livenessCheck = (human.result.face[0].live || 0) > options.minConfidence;
|
ok.livenessCheck = (human.result.face[0].live || 0) > options.minConfidence;
|
||||||
ok.faceSize = human.result.face[0].box[2] >= options.minSize && human.result.face[0].box[3] >= options.minSize;
|
ok.faceSize = human.result.face[0].box[2] >= options.minSize && human.result.face[0].box[3] >= options.minSize;
|
||||||
}
|
}
|
||||||
printStatus(ok);
|
let y = 32;
|
||||||
|
for (const [key, val] of Object.entries(ok)) {
|
||||||
|
let el = document.getElementById(`ok-${key}`);
|
||||||
|
if (!el) {
|
||||||
|
el = document.createElement('div');
|
||||||
|
el.innerText = key;
|
||||||
|
el.className = 'ok';
|
||||||
|
el.style.top = `${y}px`;
|
||||||
|
dom.ok.appendChild(el);
|
||||||
|
}
|
||||||
|
if (typeof val === 'boolean') el.style.backgroundColor = val ? 'lightgreen' : 'lightcoral';
|
||||||
|
else el.innerText = `${key}:${val}`;
|
||||||
|
y += 28;
|
||||||
|
}
|
||||||
if (allOk()) { // all criteria met
|
if (allOk()) { // all criteria met
|
||||||
dom.video.pause();
|
dom.video.pause();
|
||||||
return human.result.face[0];
|
return human.result.face[0];
|
||||||
|
@ -163,39 +177,41 @@ async function validationLoop(): Promise<FaceResult> { // main screen refresh lo
|
||||||
async function saveRecords() {
|
async function saveRecords() {
|
||||||
if (dom.name.value.length > 0) {
|
if (dom.name.value.length > 0) {
|
||||||
const image = dom.canvas.getContext('2d')?.getImageData(0, 0, dom.canvas.width, dom.canvas.height) as ImageData;
|
const image = dom.canvas.getContext('2d')?.getImageData(0, 0, dom.canvas.width, dom.canvas.height) as ImageData;
|
||||||
const rec = { id: 0, name: dom.name.value, descriptor: face.embedding as number[], image };
|
const rec = { id: 0, name: dom.name.value, descriptor: current.face?.embedding as number[], image };
|
||||||
await indexDb.save(rec);
|
await indexDb.save(rec);
|
||||||
log('saved face record:', rec.name);
|
log('saved face record:', rec.name);
|
||||||
db.push(rec);
|
|
||||||
} else {
|
} else {
|
||||||
log('invalid name');
|
log('invalid name');
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async function deleteRecord() {
|
async function deleteRecord() {
|
||||||
if (current.id > 0) {
|
if (current.record && current.record.id > 0) {
|
||||||
await indexDb.remove(current);
|
await indexDb.remove(current.record);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async function detectFace() {
|
async function detectFace() {
|
||||||
dom.canvas.getContext('2d')?.clearRect(0, 0, options.minSize, options.minSize);
|
dom.canvas.getContext('2d')?.clearRect(0, 0, options.minSize, options.minSize);
|
||||||
if (!face || !face.tensor || !face.embedding) return 0;
|
if (!current.face || !current.face.tensor || !current.face.embedding) return false;
|
||||||
human.tf.browser.toPixels(face.tensor as unknown as TensorLike, dom.canvas);
|
human.tf.browser.toPixels(current.face.tensor as unknown as TensorLike, dom.canvas);
|
||||||
const descriptors = db.map((rec) => rec.descriptor);
|
if (await indexDb.count() === 0) {
|
||||||
const res = await human.match(face.embedding, descriptors);
|
log('face database is empty');
|
||||||
if (res.index === -1) {
|
document.body.style.background = 'black';
|
||||||
log('no matches');
|
|
||||||
dom.delete.style.display = 'none';
|
dom.delete.style.display = 'none';
|
||||||
dom.source.style.display = 'none';
|
return false;
|
||||||
} else {
|
|
||||||
current = db[res.index];
|
|
||||||
log(`best match: ${current.name} | id: ${current.id} | similarity: ${Math.round(1000 * res.similarity) / 10}%`);
|
|
||||||
dom.delete.style.display = '';
|
|
||||||
dom.name.value = current.name;
|
|
||||||
dom.source.style.display = '';
|
|
||||||
dom.source.getContext('2d')?.putImageData(current.image, 0, 0);
|
|
||||||
}
|
}
|
||||||
|
const db = await indexDb.load();
|
||||||
|
const descriptors = db.map((rec) => rec.descriptor);
|
||||||
|
const res = await human.match(current.face.embedding, descriptors);
|
||||||
|
current.record = db[res.index] || null;
|
||||||
|
if (current.record) {
|
||||||
|
log(`best match: ${current.record.name} | id: ${current.record.id} | similarity: ${Math.round(1000 * res.similarity) / 10}%`);
|
||||||
|
dom.name.value = current.record.name;
|
||||||
|
dom.source.style.display = '';
|
||||||
|
dom.source.getContext('2d')?.putImageData(current.record.image, 0, 0);
|
||||||
|
}
|
||||||
|
document.body.style.background = res.similarity > options.threshold ? 'darkgreen' : 'maroon';
|
||||||
return res.similarity > options.threshold;
|
return res.similarity > options.threshold;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -210,27 +226,26 @@ async function main() { // main entry point
|
||||||
ok.elapsedMs = 0;
|
ok.elapsedMs = 0;
|
||||||
dom.match.style.display = 'none';
|
dom.match.style.display = 'none';
|
||||||
dom.retry.style.display = 'none';
|
dom.retry.style.display = 'none';
|
||||||
|
dom.source.style.display = 'none';
|
||||||
document.body.style.background = 'black';
|
document.body.style.background = 'black';
|
||||||
await webCam();
|
await webCam();
|
||||||
await detectionLoop(); // start detection loop
|
await detectionLoop(); // start detection loop
|
||||||
startTime = human.now();
|
startTime = human.now();
|
||||||
face = await validationLoop(); // start validation loop
|
current.face = await validationLoop(); // start validation loop
|
||||||
dom.fps.style.display = 'none';
|
dom.canvas.width = current.face?.tensor?.shape[1] || options.minSize;
|
||||||
dom.canvas.width = face?.tensor?.shape[1] || options.minSize;
|
dom.canvas.height = current.face?.tensor?.shape[0] || options.minSize;
|
||||||
dom.canvas.height = face?.tensor?.shape[0] || options.minSize;
|
|
||||||
dom.source.width = dom.canvas.width;
|
dom.source.width = dom.canvas.width;
|
||||||
dom.source.height = dom.canvas.height;
|
dom.source.height = dom.canvas.height;
|
||||||
dom.canvas.style.width = '';
|
dom.canvas.style.width = '';
|
||||||
dom.match.style.display = 'flex';
|
dom.match.style.display = 'flex';
|
||||||
|
dom.save.style.display = 'flex';
|
||||||
|
dom.delete.style.display = 'flex';
|
||||||
dom.retry.style.display = 'block';
|
dom.retry.style.display = 'block';
|
||||||
if (!allOk()) {
|
if (!allOk()) { // is all criteria met?
|
||||||
log('did not find valid face');
|
log('did not find valid face');
|
||||||
return false;
|
return false;
|
||||||
} else {
|
} else {
|
||||||
// log('found valid face');
|
return detectFace();
|
||||||
const res = await detectFace();
|
|
||||||
document.body.style.background = res ? 'darkgreen' : 'maroon';
|
|
||||||
return res;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -238,8 +253,7 @@ async function init() {
|
||||||
log('human version:', human.version, '| tfjs version:', human.tf.version_core);
|
log('human version:', human.version, '| tfjs version:', human.tf.version_core);
|
||||||
log('options:', JSON.stringify(options).replace(/{|}|"|\[|\]/g, '').replace(/,/g, ' '));
|
log('options:', JSON.stringify(options).replace(/{|}|"|\[|\]/g, '').replace(/,/g, ' '));
|
||||||
printFPS('loading...');
|
printFPS('loading...');
|
||||||
db = await indexDb.load(); // load face database from indexdb
|
log('known face records:', await indexDb.count());
|
||||||
log('loaded face records:', db.length);
|
|
||||||
await webCam(); // start webcam
|
await webCam(); // start webcam
|
||||||
await human.load(); // preload all models
|
await human.load(); // preload all models
|
||||||
printFPS('initializing...');
|
printFPS('initializing...');
|
||||||
|
|
|
@ -43,6 +43,15 @@ export async function load(): Promise<FaceRecord[]> {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export async function count(): Promise<number> {
|
||||||
|
if (!db) await open(); // open or create if not already done
|
||||||
|
return new Promise((resolve) => {
|
||||||
|
const store: IDBRequest = db.transaction([table], 'readwrite').objectStore(table).count();
|
||||||
|
store.onerror = (evt) => log('count error:', evt);
|
||||||
|
store.onsuccess = () => resolve(store.result);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
export async function save(faceRecord: FaceRecord) {
|
export async function save(faceRecord: FaceRecord) {
|
||||||
if (!db) await open(); // open or create if not already done
|
if (!db) await open(); // open or create if not already done
|
||||||
const newRecord = { name: faceRecord.name, descriptor: faceRecord.descriptor, image: faceRecord.image }; // omit id as its autoincrement
|
const newRecord = { name: faceRecord.name, descriptor: faceRecord.descriptor, image: faceRecord.image }; // omit id as its autoincrement
|
||||||
|
|
|
@ -135,12 +135,10 @@ async function SelectFaceCanvas(face) {
|
||||||
title('Selected Face');
|
title('Selected Face');
|
||||||
}
|
}
|
||||||
|
|
||||||
function AddFaceCanvas(index, res, fileName) {
|
async function AddFaceCanvas(index, res, fileName) {
|
||||||
all[index] = res.face;
|
all[index] = res.face;
|
||||||
let ok = false;
|
|
||||||
for (const i in res.face) {
|
for (const i in res.face) {
|
||||||
if (res.face[i].mesh.length === 0) continue;
|
if (res.face[i].mesh.length === 0 || !res.face[i].tensor) continue; // did not get valid results
|
||||||
ok = true;
|
|
||||||
all[index][i].fileName = fileName;
|
all[index][i].fileName = fileName;
|
||||||
const canvas = document.createElement('canvas');
|
const canvas = document.createElement('canvas');
|
||||||
canvas.tag = { sample: index, face: i, source: fileName };
|
canvas.tag = { sample: index, face: i, source: fileName };
|
||||||
|
@ -155,15 +153,7 @@ function AddFaceCanvas(index, res, fileName) {
|
||||||
gender: ${Math.round(100 * res.face[i].genderScore)}% ${res.face[i].gender}
|
gender: ${Math.round(100 * res.face[i].genderScore)}% ${res.face[i].gender}
|
||||||
emotion: ${emotion}
|
emotion: ${emotion}
|
||||||
`.replace(/ /g, ' ');
|
`.replace(/ /g, ' ');
|
||||||
// mouse click on any face canvas triggers analysis
|
await human.tf.browser.toPixels(res.face[i].tensor, canvas);
|
||||||
canvas.addEventListener('click', (evt) => {
|
|
||||||
log('Select:', 'Image:', evt.target.tag.sample, 'Face:', evt.target.tag.face, 'Source:', evt.target.tag.source, all[evt.target.tag.sample][evt.target.tag.face]);
|
|
||||||
SelectFaceCanvas(all[evt.target.tag.sample][evt.target.tag.face]);
|
|
||||||
});
|
|
||||||
// if we actually got face image tensor, draw canvas with that face
|
|
||||||
if (res.face[i].tensor) {
|
|
||||||
human.tf.browser.toPixels(res.face[i].tensor, canvas);
|
|
||||||
document.getElementById('faces').appendChild(canvas);
|
|
||||||
const ctx = canvas.getContext('2d');
|
const ctx = canvas.getContext('2d');
|
||||||
if (!ctx) return false;
|
if (!ctx) return false;
|
||||||
ctx.font = 'small-caps 0.8rem "Lato"';
|
ctx.font = 'small-caps 0.8rem "Lato"';
|
||||||
|
@ -173,9 +163,12 @@ function AddFaceCanvas(index, res, fileName) {
|
||||||
const result = human.match(res.face[i].embedding, arr);
|
const result = human.match(res.face[i].embedding, arr);
|
||||||
ctx.font = 'small-caps 1rem "Lato"';
|
ctx.font = 'small-caps 1rem "Lato"';
|
||||||
if (result.similarity && res.similarity > minScore) ctx.fillText(`${(100 * result.similarity).toFixed(1)}% ${db[result.index].name}`, 4, canvas.height - 30);
|
if (result.similarity && res.similarity > minScore) ctx.fillText(`${(100 * result.similarity).toFixed(1)}% ${db[result.index].name}`, 4, canvas.height - 30);
|
||||||
|
document.getElementById('faces').appendChild(canvas);
|
||||||
|
canvas.addEventListener('click', (evt) => {
|
||||||
|
log('Select:', 'Image:', evt.target.tag.sample, 'Face:', evt.target.tag.face, 'Source:', evt.target.tag.source, all[evt.target.tag.sample][evt.target.tag.face]);
|
||||||
|
SelectFaceCanvas(all[evt.target.tag.sample][evt.target.tag.face]);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return ok;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async function AddImageElement(index, image, length) {
|
async function AddImageElement(index, image, length) {
|
||||||
|
@ -185,8 +178,8 @@ async function AddImageElement(index, image, length) {
|
||||||
const img = new Image(128, 128);
|
const img = new Image(128, 128);
|
||||||
img.onload = () => { // must wait until image is loaded
|
img.onload = () => { // must wait until image is loaded
|
||||||
human.detect(img, userConfig).then((res) => {
|
human.detect(img, userConfig).then((res) => {
|
||||||
const ok = AddFaceCanvas(index, res, image); // then wait until image is analyzed
|
AddFaceCanvas(index, res, image); // then wait until image is analyzed
|
||||||
if (ok) document.getElementById('images').appendChild(img); // and finally we can add it
|
document.getElementById('images').appendChild(img); // and finally we can add it
|
||||||
resolve(true);
|
resolve(true);
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
|
@ -23,6 +23,14 @@ export interface FaceDetectorConfig extends GenericConfig {
|
||||||
minConfidence: number,
|
minConfidence: number,
|
||||||
/** @property minimum overlap between two detected faces before one is discarded */
|
/** @property minimum overlap between two detected faces before one is discarded */
|
||||||
iouThreshold: number,
|
iouThreshold: number,
|
||||||
|
/** @property factor used to expand detected face before further analysis
|
||||||
|
* - default: 1.6
|
||||||
|
* - for high-quality inputs can be reduced to increase precision
|
||||||
|
* - for video inputs or low-quality inputs can be increased to allow for more flexible tracking
|
||||||
|
*/
|
||||||
|
cropFactor: number,
|
||||||
|
/** @property should child models perform on masked image of a face */
|
||||||
|
mask: boolean,
|
||||||
/** @property should face detection return face tensor to be used in some other extenrnal model? */
|
/** @property should face detection return face tensor to be used in some other extenrnal model? */
|
||||||
return: boolean,
|
return: boolean,
|
||||||
}
|
}
|
||||||
|
@ -314,6 +322,8 @@ const config: Config = {
|
||||||
skipTime: 2500,
|
skipTime: 2500,
|
||||||
minConfidence: 0.2,
|
minConfidence: 0.2,
|
||||||
iouThreshold: 0.1,
|
iouThreshold: 0.1,
|
||||||
|
cropFactor: 1.6,
|
||||||
|
mask: false,
|
||||||
return: false,
|
return: false,
|
||||||
},
|
},
|
||||||
mesh: {
|
mesh: {
|
||||||
|
|
|
@ -17,7 +17,6 @@ let anchorsData: [number, number][] = [];
|
||||||
let anchors: Tensor | null = null;
|
let anchors: Tensor | null = null;
|
||||||
let inputSize = 0;
|
let inputSize = 0;
|
||||||
|
|
||||||
// export const size = () => (model && model.inputs[0].shape ? model.inputs[0].shape[2] : 0);
|
|
||||||
export const size = () => inputSize;
|
export const size = () => inputSize;
|
||||||
|
|
||||||
export async function load(config: Config): Promise<GraphModel> {
|
export async function load(config: Config): Promise<GraphModel> {
|
||||||
|
|
|
@ -9,13 +9,15 @@ import * as tf from '../../dist/tfjs.esm.js';
|
||||||
import * as facemesh from './facemesh';
|
import * as facemesh from './facemesh';
|
||||||
import * as emotion from '../gear/emotion';
|
import * as emotion from '../gear/emotion';
|
||||||
import * as faceres from './faceres';
|
import * as faceres from './faceres';
|
||||||
|
import * as mask from './mask';
|
||||||
import * as antispoof from './antispoof';
|
import * as antispoof from './antispoof';
|
||||||
import * as liveness from './liveness';
|
import * as liveness from './liveness';
|
||||||
import type { FaceResult } from '../result';
|
import type { FaceResult } from '../result';
|
||||||
import type { Tensor } from '../tfjs/types';
|
import type { Tensor } from '../tfjs/types';
|
||||||
|
import type { Human } from '../human';
|
||||||
import { calculateFaceAngle } from './angles';
|
import { calculateFaceAngle } from './angles';
|
||||||
|
|
||||||
export const detectFace = async (parent /* instance of human */, input: Tensor): Promise<FaceResult[]> => {
|
export const detectFace = async (parent: Human /* instance of human */, input: Tensor): Promise<FaceResult[]> => {
|
||||||
// run facemesh, includes blazeface and iris
|
// run facemesh, includes blazeface and iris
|
||||||
// eslint-disable-next-line no-async-promise-executor
|
// eslint-disable-next-line no-async-promise-executor
|
||||||
let timeStamp;
|
let timeStamp;
|
||||||
|
@ -46,16 +48,24 @@ export const detectFace = async (parent /* instance of human */, input: Tensor):
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
const rotation = calculateFaceAngle(faces[i], [input.shape[2], input.shape[1]]);
|
// optional face mask
|
||||||
|
if (parent.config.face.detector?.mask) {
|
||||||
|
const masked = await mask.mask(faces[i]);
|
||||||
|
tf.dispose(faces[i].tensor);
|
||||||
|
faces[i].tensor = masked as Tensor;
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculate face angles
|
||||||
|
const rotation = faces[i].mesh && (faces[i].mesh.length > 200) ? calculateFaceAngle(faces[i], [input.shape[2], input.shape[1]]) : null;
|
||||||
|
|
||||||
// run emotion, inherits face from blazeface
|
// run emotion, inherits face from blazeface
|
||||||
parent.analyze('Start Emotion:');
|
parent.analyze('Start Emotion:');
|
||||||
if (parent.config.async) {
|
if (parent.config.async) {
|
||||||
emotionRes = parent.config.face.emotion.enabled ? emotion.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
|
emotionRes = parent.config.face.emotion?.enabled ? emotion.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
|
||||||
} else {
|
} else {
|
||||||
parent.state = 'run:emotion';
|
parent.state = 'run:emotion';
|
||||||
timeStamp = now();
|
timeStamp = now();
|
||||||
emotionRes = parent.config.face.emotion.enabled ? await emotion.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
|
emotionRes = parent.config.face.emotion?.enabled ? await emotion.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
|
||||||
parent.performance.emotion = env.perfadd ? (parent.performance.emotion || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
parent.performance.emotion = env.perfadd ? (parent.performance.emotion || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
||||||
}
|
}
|
||||||
parent.analyze('End Emotion:');
|
parent.analyze('End Emotion:');
|
||||||
|
@ -63,11 +73,11 @@ export const detectFace = async (parent /* instance of human */, input: Tensor):
|
||||||
// run antispoof, inherits face from blazeface
|
// run antispoof, inherits face from blazeface
|
||||||
parent.analyze('Start AntiSpoof:');
|
parent.analyze('Start AntiSpoof:');
|
||||||
if (parent.config.async) {
|
if (parent.config.async) {
|
||||||
antispoofRes = parent.config.face.antispoof.enabled ? antispoof.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
|
antispoofRes = parent.config.face.antispoof?.enabled ? antispoof.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
|
||||||
} else {
|
} else {
|
||||||
parent.state = 'run:antispoof';
|
parent.state = 'run:antispoof';
|
||||||
timeStamp = now();
|
timeStamp = now();
|
||||||
antispoofRes = parent.config.face.antispoof.enabled ? await antispoof.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
|
antispoofRes = parent.config.face.antispoof?.enabled ? await antispoof.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
|
||||||
parent.performance.antispoof = env.perfadd ? (parent.performance.antispoof || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
parent.performance.antispoof = env.perfadd ? (parent.performance.antispoof || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
||||||
}
|
}
|
||||||
parent.analyze('End AntiSpoof:');
|
parent.analyze('End AntiSpoof:');
|
||||||
|
@ -75,11 +85,11 @@ export const detectFace = async (parent /* instance of human */, input: Tensor):
|
||||||
// run liveness, inherits face from blazeface
|
// run liveness, inherits face from blazeface
|
||||||
parent.analyze('Start Liveness:');
|
parent.analyze('Start Liveness:');
|
||||||
if (parent.config.async) {
|
if (parent.config.async) {
|
||||||
livenessRes = parent.config.face.liveness.enabled ? liveness.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
|
livenessRes = parent.config.face.liveness?.enabled ? liveness.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
|
||||||
} else {
|
} else {
|
||||||
parent.state = 'run:liveness';
|
parent.state = 'run:liveness';
|
||||||
timeStamp = now();
|
timeStamp = now();
|
||||||
livenessRes = parent.config.face.liveness.enabled ? await liveness.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
|
livenessRes = parent.config.face.liveness?.enabled ? await liveness.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
|
||||||
parent.performance.antispoof = env.perfadd ? (parent.performance.antispoof || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
parent.performance.antispoof = env.perfadd ? (parent.performance.antispoof || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
||||||
}
|
}
|
||||||
parent.analyze('End Liveness:');
|
parent.analyze('End Liveness:');
|
||||||
|
@ -101,11 +111,11 @@ export const detectFace = async (parent /* instance of human */, input: Tensor):
|
||||||
// run emotion, inherits face from blazeface
|
// run emotion, inherits face from blazeface
|
||||||
parent.analyze('Start Description:');
|
parent.analyze('Start Description:');
|
||||||
if (parent.config.async) {
|
if (parent.config.async) {
|
||||||
descRes = parent.config.face.description.enabled ? faceres.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
|
descRes = parent.config.face.description?.enabled ? faceres.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
|
||||||
} else {
|
} else {
|
||||||
parent.state = 'run:description';
|
parent.state = 'run:description';
|
||||||
timeStamp = now();
|
timeStamp = now();
|
||||||
descRes = parent.config.face.description.enabled ? await faceres.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
|
descRes = parent.config.face.description?.enabled ? await faceres.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
|
||||||
parent.performance.description = env.perfadd ? (parent.performance.description || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
parent.performance.description = env.perfadd ? (parent.performance.description || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
||||||
}
|
}
|
||||||
parent.analyze('End Description:');
|
parent.analyze('End Description:');
|
||||||
|
@ -119,7 +129,7 @@ export const detectFace = async (parent /* instance of human */, input: Tensor):
|
||||||
|
|
||||||
// calculate iris distance
|
// calculate iris distance
|
||||||
// iris: array[ center, left, top, right, bottom]
|
// iris: array[ center, left, top, right, bottom]
|
||||||
if (!parent.config.face.iris.enabled && faces[i]?.annotations?.leftEyeIris && faces[i]?.annotations?.rightEyeIris) {
|
if (!parent.config.face.iris?.enabled && faces[i]?.annotations?.leftEyeIris && faces[i]?.annotations?.rightEyeIris) {
|
||||||
delete faces[i].annotations.leftEyeIris;
|
delete faces[i].annotations.leftEyeIris;
|
||||||
delete faces[i].annotations.rightEyeIris;
|
delete faces[i].annotations.rightEyeIris;
|
||||||
}
|
}
|
||||||
|
@ -130,7 +140,7 @@ export const detectFace = async (parent /* instance of human */, input: Tensor):
|
||||||
: 0; // note: average human iris size is 11.7mm
|
: 0; // note: average human iris size is 11.7mm
|
||||||
|
|
||||||
// optionally return tensor
|
// optionally return tensor
|
||||||
const tensor = parent.config.face.detector.return ? tf.squeeze(faces[i].tensor) : null;
|
const tensor = parent.config.face.detector?.return ? tf.squeeze(faces[i].tensor) : null;
|
||||||
// dispose original face tensor
|
// dispose original face tensor
|
||||||
tf.dispose(faces[i].tensor);
|
tf.dispose(faces[i].tensor);
|
||||||
// delete temp face image
|
// delete temp face image
|
||||||
|
|
|
@ -25,11 +25,9 @@ let model: GraphModel | null = null;
|
||||||
let inputSize = 0;
|
let inputSize = 0;
|
||||||
let skipped = Number.MAX_SAFE_INTEGER;
|
let skipped = Number.MAX_SAFE_INTEGER;
|
||||||
let lastTime = 0;
|
let lastTime = 0;
|
||||||
const enlargeFact = 1.6;
|
|
||||||
|
|
||||||
export async function predict(input: Tensor, config: Config): Promise<FaceResult[]> {
|
export async function predict(input: Tensor, config: Config): Promise<FaceResult[]> {
|
||||||
// reset cached boxes
|
// reset cached boxes
|
||||||
|
|
||||||
const skipTime = (config.face.detector?.skipTime || 0) > (now() - lastTime);
|
const skipTime = (config.face.detector?.skipTime || 0) > (now() - lastTime);
|
||||||
const skipFrame = skipped < (config.face.detector?.skipFrames || 0);
|
const skipFrame = skipped < (config.face.detector?.skipFrames || 0);
|
||||||
if (!config.skipAllowed || !skipTime || !skipFrame || boxCache.length === 0) {
|
if (!config.skipAllowed || !skipTime || !skipFrame || boxCache.length === 0) {
|
||||||
|
@ -43,7 +41,7 @@ export async function predict(input: Tensor, config: Config): Promise<FaceResult
|
||||||
landmarks: possible.landmarks,
|
landmarks: possible.landmarks,
|
||||||
confidence: possible.confidence,
|
confidence: possible.confidence,
|
||||||
};
|
};
|
||||||
boxCache.push(util.squarifyBox(util.enlargeBox(util.scaleBoxCoordinates(box, possibleBoxes.scaleFactor), Math.sqrt(enlargeFact))));
|
boxCache.push(util.squarifyBox(util.enlargeBox(util.scaleBoxCoordinates(box, possibleBoxes.scaleFactor), Math.sqrt(config.face.detector?.cropFactor || 1.6))));
|
||||||
}
|
}
|
||||||
skipped = 0;
|
skipped = 0;
|
||||||
} else {
|
} else {
|
||||||
|
@ -68,7 +66,7 @@ export async function predict(input: Tensor, config: Config): Promise<FaceResult
|
||||||
annotations: {},
|
annotations: {},
|
||||||
};
|
};
|
||||||
|
|
||||||
[angle, rotationMatrix, face.tensor] = util.correctFaceRotation(false && config.face.detector?.rotation, box, input, inputSize); // optional rotate based on detector data // disabled
|
[angle, rotationMatrix, face.tensor] = util.correctFaceRotation(false && config.face.detector?.rotation, box, input, config.face.mesh?.enabled ? inputSize : blazeface.size()); // optional rotate based on detector data
|
||||||
if (config?.filter?.equalization) {
|
if (config?.filter?.equalization) {
|
||||||
const equilized = await histogramEqualization(face.tensor as Tensor);
|
const equilized = await histogramEqualization(face.tensor as Tensor);
|
||||||
tf.dispose(face.tensor);
|
tf.dispose(face.tensor);
|
||||||
|
@ -101,7 +99,7 @@ export async function predict(input: Tensor, config: Config): Promise<FaceResult
|
||||||
face.mesh = util.transformRawCoords(rawCoords, box, angle, rotationMatrix, inputSize); // get processed mesh
|
face.mesh = util.transformRawCoords(rawCoords, box, angle, rotationMatrix, inputSize); // get processed mesh
|
||||||
face.meshRaw = face.mesh.map((pt) => [pt[0] / (input.shape[2] || 0), pt[1] / (input.shape[1] || 0), (pt[2] || 0) / inputSize]);
|
face.meshRaw = face.mesh.map((pt) => [pt[0] / (input.shape[2] || 0), pt[1] / (input.shape[1] || 0), (pt[2] || 0) / inputSize]);
|
||||||
for (const key of Object.keys(coords.meshAnnotations)) face.annotations[key] = coords.meshAnnotations[key].map((index) => face.mesh[index]); // add annotations
|
for (const key of Object.keys(coords.meshAnnotations)) face.annotations[key] = coords.meshAnnotations[key].map((index) => face.mesh[index]); // add annotations
|
||||||
box = util.squarifyBox({ ...util.enlargeBox(util.calculateLandmarksBoundingBox(face.mesh), enlargeFact), confidence: box.confidence }); // redefine box with mesh calculated one
|
box = util.squarifyBox({ ...util.enlargeBox(util.calculateLandmarksBoundingBox(face.mesh), (config.face.detector?.cropFactor || 1.6)), confidence: box.confidence }); // redefine box with mesh calculated one
|
||||||
face.box = util.getClampedBox(box, input); // update detected box with box around the face mesh
|
face.box = util.getClampedBox(box, input); // update detected box with box around the face mesh
|
||||||
face.boxRaw = util.getRawBox(box, input);
|
face.boxRaw = util.getRawBox(box, input);
|
||||||
face.score = face.faceScore;
|
face.score = face.faceScore;
|
||||||
|
|
|
@ -39,22 +39,17 @@ export async function load(config: Config): Promise<GraphModel> {
|
||||||
export function enhance(input): Tensor {
|
export function enhance(input): Tensor {
|
||||||
const tensor = (input.image || input.tensor || input) as Tensor; // input received from detector is already normalized to 0..1, input is also assumed to be straightened
|
const tensor = (input.image || input.tensor || input) as Tensor; // input received from detector is already normalized to 0..1, input is also assumed to be straightened
|
||||||
if (!model?.inputs[0].shape) return tensor; // model has no shape so no point continuing
|
if (!model?.inputs[0].shape) return tensor; // model has no shape so no point continuing
|
||||||
// do a tight crop of image and resize it to fit the model
|
|
||||||
const crop = tf.image.resizeBilinear(tensor, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
|
const crop = tf.image.resizeBilinear(tensor, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
|
||||||
const norm = tf.mul(crop, 255);
|
const norm = tf.mul(crop, 255);
|
||||||
tf.dispose(crop);
|
tf.dispose(crop);
|
||||||
return norm;
|
return norm;
|
||||||
/*
|
/*
|
||||||
|
// do a tight crop of image and resize it to fit the model
|
||||||
const box = [[0.05, 0.15, 0.85, 0.85]]; // empyrical values for top, left, bottom, right
|
const box = [[0.05, 0.15, 0.85, 0.85]]; // empyrical values for top, left, bottom, right
|
||||||
const crop = (tensor.shape.length === 3)
|
const crop = (tensor.shape.length === 3)
|
||||||
? tf.image.cropAndResize(tf.expandDims(tensor, 0), box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]) // add batch dimension if missing
|
? tf.image.cropAndResize(tf.expandDims(tensor, 0), box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]) // add batch dimension if missing
|
||||||
: tf.image.cropAndResize(tensor, box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]);
|
: tf.image.cropAndResize(tensor, box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]);
|
||||||
*/
|
*/
|
||||||
/*
|
|
||||||
// just resize to fit the embedding model instead of cropping
|
|
||||||
const crop = tf.image.resizeBilinear(tensor, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
// convert to black&white to avoid colorization impact
|
// convert to black&white to avoid colorization impact
|
||||||
const rgb = [0.2989, 0.5870, 0.1140]; // factors for red/green/blue colors when converting to grayscale: https://www.mathworks.com/help/matlab/ref/rgb2gray.html
|
const rgb = [0.2989, 0.5870, 0.1140]; // factors for red/green/blue colors when converting to grayscale: https://www.mathworks.com/help/matlab/ref/rgb2gray.html
|
||||||
|
@ -65,22 +60,6 @@ export function enhance(input): Tensor {
|
||||||
const grayscale = tf.addN([redNorm, greenNorm, blueNorm]);
|
const grayscale = tf.addN([redNorm, greenNorm, blueNorm]);
|
||||||
const merge = tf.stack([grayscale, grayscale, grayscale], 3).squeeze(4);
|
const merge = tf.stack([grayscale, grayscale, grayscale], 3).squeeze(4);
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
|
||||||
// increase image pseudo-contrast 100%
|
|
||||||
// (or do it per-channel so mean is done on each channel)
|
|
||||||
// (or calculate histogram and do it based on histogram)
|
|
||||||
const mean = merge.mean();
|
|
||||||
const factor = 2;
|
|
||||||
const contrast = merge.sub(mean).mul(factor).add(mean);
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
// normalize brightness from 0..1
|
|
||||||
// silly way of creating pseudo-hdr of image
|
|
||||||
const darken = crop.sub(crop.min());
|
|
||||||
const lighten = darken.div(darken.max());
|
|
||||||
*/
|
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function predict(image: Tensor, config: Config, idx, count) {
|
export async function predict(image: Tensor, config: Config, idx, count) {
|
||||||
|
|
|
@ -0,0 +1,40 @@
|
||||||
|
import type { Tensor } from '../tfjs/types';
|
||||||
|
import type { FaceResult } from '../result';
|
||||||
|
import * as tf from '../../dist/tfjs.esm.js';
|
||||||
|
import { meshAnnotations } from './facemeshcoords';
|
||||||
|
|
||||||
|
const expandFact = 0.1;
|
||||||
|
const alpha = 0.5;
|
||||||
|
|
||||||
|
// point inclusion in polygon based on https://wrf.ecse.rpi.edu/Research/Short_Notes/pnpoly.html
|
||||||
|
function insidePoly(x: number, y: number, polygon: Array<{ x: number, y: number }>): boolean {
|
||||||
|
let inside = false;
|
||||||
|
let j = polygon.length - 1;
|
||||||
|
for (let i = 0; i < polygon.length; j = i++) {
|
||||||
|
if (((polygon[i].y > y) !== (polygon[j].y > y)) && (x < (polygon[j].x - polygon[i].x) * (y - polygon[i].y) / (polygon[j].y - polygon[i].y) + polygon[i].x)) inside = !inside;
|
||||||
|
}
|
||||||
|
return inside;
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function mask(face: FaceResult): Promise<Tensor | undefined> {
|
||||||
|
if (!face.tensor) return face.tensor;
|
||||||
|
const width = face.tensor.shape[2] || 0;
|
||||||
|
const height = face.tensor.shape[1] || 0;
|
||||||
|
const buffer = await face.tensor.buffer();
|
||||||
|
let silhouette: Array<{ x: number, y: number }> = [];
|
||||||
|
for (const pt of meshAnnotations.silhouette) silhouette.push({ x: (face.mesh[pt][0] - face.box[0]) / face.box[2], y: (face.mesh[pt][1] - face.box[1]) / face.box[3] }); // add all silhouette points scaled to local box
|
||||||
|
if (expandFact && expandFact > 0) silhouette = silhouette.map((pt) => ({ x: pt.x > 0.5 ? pt.x + expandFact : pt.x - expandFact, y: pt.y > 0.5 ? pt.y + expandFact : pt.y - expandFact })); // expand silhouette
|
||||||
|
for (let x = 0; x < width; x++) {
|
||||||
|
for (let y = 0; y < height; y++) {
|
||||||
|
const inside = insidePoly(x / width, y / width, silhouette);
|
||||||
|
if (!inside) {
|
||||||
|
buffer.set(alpha * buffer.get(0, y, x, 0), 0, y, x, 0);
|
||||||
|
buffer.set(alpha * buffer.get(0, y, x, 1), 0, y, x, 1);
|
||||||
|
buffer.set(alpha * buffer.get(0, y, x, 2), 0, y, x, 2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const output = buffer.toTensor();
|
||||||
|
tf.dispose(buffer);
|
||||||
|
return output;
|
||||||
|
}
|
|
@ -66,9 +66,10 @@ export const face = (res): GestureResult[] => {
|
||||||
const gestures: Array<{ face: number, gesture: FaceGesture }> = [];
|
const gestures: Array<{ face: number, gesture: FaceGesture }> = [];
|
||||||
for (let i = 0; i < res.length; i++) {
|
for (let i = 0; i < res.length; i++) {
|
||||||
if (res[i].mesh && res[i].mesh.length > 450) {
|
if (res[i].mesh && res[i].mesh.length > 450) {
|
||||||
const eyeFacing = res[i].mesh[33][2] - res[i].mesh[263][2];
|
const zDiff = res[i].mesh[33][2] - res[i].mesh[263][2];
|
||||||
if (Math.abs(eyeFacing) < 10) gestures.push({ face: i, gesture: 'facing center' });
|
const xDiff = res[i].mesh[33][0] - res[i].mesh[263][0];
|
||||||
else gestures.push({ face: i, gesture: `facing ${eyeFacing < 0 ? 'left' : 'right'}` });
|
if (Math.abs(zDiff / xDiff) <= 0.15) gestures.push({ face: i, gesture: 'facing center' });
|
||||||
|
else gestures.push({ face: i, gesture: `facing ${zDiff < 0 ? 'left' : 'right'}` });
|
||||||
const openLeft = Math.abs(res[i].mesh[374][1] - res[i].mesh[386][1]) / Math.abs(res[i].mesh[443][1] - res[i].mesh[450][1]); // center of eye inner lid y coord div center of wider eye border y coord
|
const openLeft = Math.abs(res[i].mesh[374][1] - res[i].mesh[386][1]) / Math.abs(res[i].mesh[443][1] - res[i].mesh[450][1]); // center of eye inner lid y coord div center of wider eye border y coord
|
||||||
if (openLeft < 0.2) gestures.push({ face: i, gesture: 'blink left eye' });
|
if (openLeft < 0.2) gestures.push({ face: i, gesture: 'blink left eye' });
|
||||||
const openRight = Math.abs(res[i].mesh[145][1] - res[i].mesh[159][1]) / Math.abs(res[i].mesh[223][1] - res[i].mesh[230][1]); // center of eye inner lid y coord div center of wider eye border y coord
|
const openRight = Math.abs(res[i].mesh[145][1] - res[i].mesh[159][1]) / Math.abs(res[i].mesh[223][1] - res[i].mesh[230][1]); // center of eye inner lid y coord div center of wider eye border y coord
|
||||||
|
|
|
@ -253,7 +253,7 @@ export class Human {
|
||||||
* - `canvas` as canvas which is input image filtered with segementation data and optionally merged with background image. canvas alpha values are set to segmentation values for easy merging
|
* - `canvas` as canvas which is input image filtered with segementation data and optionally merged with background image. canvas alpha values are set to segmentation values for easy merging
|
||||||
* - `alpha` as grayscale canvas that represents segmentation alpha values
|
* - `alpha` as grayscale canvas that represents segmentation alpha values
|
||||||
*/
|
*/
|
||||||
async segmentation(input: Input, background?: Input): Promise<{ data: number[], canvas: HTMLCanvasElement | OffscreenCanvas | null, alpha: HTMLCanvasElement | OffscreenCanvas | null }> {
|
async segmentation(input: Input, background?: Input): Promise<{ data: number[] | Tensor, canvas: HTMLCanvasElement | OffscreenCanvas | null, alpha: HTMLCanvasElement | OffscreenCanvas | null }> {
|
||||||
return segmentation.process(input, background, this.config);
|
return segmentation.process(input, background, this.config);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -54,7 +54,7 @@ export interface FaceResult {
|
||||||
angle: { roll: number, yaw: number, pitch: number },
|
angle: { roll: number, yaw: number, pitch: number },
|
||||||
matrix: [number, number, number, number, number, number, number, number, number],
|
matrix: [number, number, number, number, number, number, number, number, number],
|
||||||
gaze: { bearing: number, strength: number },
|
gaze: { bearing: number, strength: number },
|
||||||
}
|
} | null,
|
||||||
/** detected face as tensor that can be used in further pipelines */
|
/** detected face as tensor that can be used in further pipelines */
|
||||||
tensor?: Tensor,
|
tensor?: Tensor,
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
/** TFJS backend initialization and customization */
|
/** TFJS backend initialization and customization */
|
||||||
|
|
||||||
|
import type { Human } from '../human';
|
||||||
import { log, now } from '../util/util';
|
import { log, now } from '../util/util';
|
||||||
import { env } from '../util/env';
|
import { env } from '../util/env';
|
||||||
import * as humangl from './humangl';
|
import * as humangl from './humangl';
|
||||||
|
@ -26,7 +27,7 @@ function registerCustomOps() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function check(instance, force = false) {
|
export async function check(instance: Human, force = false) {
|
||||||
instance.state = 'backend';
|
instance.state = 'backend';
|
||||||
if (force || env.initial || (instance.config.backend && (instance.config.backend.length > 0) && (tf.getBackend() !== instance.config.backend))) {
|
if (force || env.initial || (instance.config.backend && (instance.config.backend.length > 0) && (tf.getBackend() !== instance.config.backend))) {
|
||||||
const timeStamp = now();
|
const timeStamp = now();
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
/** TFJS custom backend registration */
|
/** TFJS custom backend registration */
|
||||||
|
|
||||||
|
import type { Human } from '../human';
|
||||||
import { log } from '../util/util';
|
import { log } from '../util/util';
|
||||||
import * as tf from '../../dist/tfjs.esm.js';
|
import * as tf from '../../dist/tfjs.esm.js';
|
||||||
import * as image from '../image/image';
|
import * as image from '../image/image';
|
||||||
|
@ -40,7 +41,7 @@ function extensions(): void {
|
||||||
*
|
*
|
||||||
* @returns void
|
* @returns void
|
||||||
*/
|
*/
|
||||||
export async function register(instance): Promise<void> {
|
export async function register(instance: Human): Promise<void> {
|
||||||
// force backend reload if gl context is not valid
|
// force backend reload if gl context is not valid
|
||||||
if (instance.config.backend !== 'humangl') return;
|
if (instance.config.backend !== 'humangl') return;
|
||||||
if ((config.name in tf.engine().registry) && (!config.gl || !config.gl.getParameter(config.gl.VERSION))) {
|
if ((config.name in tf.engine().registry) && (!config.gl || !config.gl.getParameter(config.gl.VERSION))) {
|
||||||
|
|
|
@ -114,6 +114,7 @@ export function calc(newResult: Result, config: Config): Result {
|
||||||
.map((b, j) => ((bufferedFactor - 1) * bufferedResult.face[i].box[j] + b) / bufferedFactor)) as Box;
|
.map((b, j) => ((bufferedFactor - 1) * bufferedResult.face[i].box[j] + b) / bufferedFactor)) as Box;
|
||||||
const boxRaw = (newResult.face[i].boxRaw // update boxRaw
|
const boxRaw = (newResult.face[i].boxRaw // update boxRaw
|
||||||
.map((b, j) => ((bufferedFactor - 1) * bufferedResult.face[i].boxRaw[j] + b) / bufferedFactor)) as Box;
|
.map((b, j) => ((bufferedFactor - 1) * bufferedResult.face[i].boxRaw[j] + b) / bufferedFactor)) as Box;
|
||||||
|
if (newResult.face[i].rotation) {
|
||||||
const rotation: {
|
const rotation: {
|
||||||
matrix: [number, number, number, number, number, number, number, number, number],
|
matrix: [number, number, number, number, number, number, number, number, number],
|
||||||
angle: { roll: number, yaw: number, pitch: number },
|
angle: { roll: number, yaw: number, pitch: number },
|
||||||
|
@ -132,6 +133,8 @@ export function calc(newResult: Result, config: Config): Result {
|
||||||
};
|
};
|
||||||
bufferedResult.face[i] = { ...newResult.face[i], rotation, box, boxRaw }; // shallow clone plus updated values
|
bufferedResult.face[i] = { ...newResult.face[i], rotation, box, boxRaw }; // shallow clone plus updated values
|
||||||
}
|
}
|
||||||
|
bufferedResult.face[i] = { ...newResult.face[i], box, boxRaw }; // shallow clone plus updated values
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// interpolate object detection results
|
// interpolate object detection results
|
||||||
|
|
|
@ -8,9 +8,11 @@ import * as tf from '../dist/tfjs.esm.js';
|
||||||
import * as image from './image/image';
|
import * as image from './image/image';
|
||||||
import type { Config } from './config';
|
import type { Config } from './config';
|
||||||
import type { Result } from './result';
|
import type { Result } from './result';
|
||||||
|
import type { Human } from './human';
|
||||||
|
import type { Tensor } from './tfjs/types';
|
||||||
import { env } from './util/env';
|
import { env } from './util/env';
|
||||||
|
|
||||||
async function warmupBitmap(instance) {
|
async function warmupBitmap(instance: Human) {
|
||||||
const b64toBlob = (base64: string, type = 'application/octet-stream') => fetch(`data:${type};base64,${base64}`).then((res) => res.blob());
|
const b64toBlob = (base64: string, type = 'application/octet-stream') => fetch(`data:${type};base64,${base64}`).then((res) => res.blob());
|
||||||
let blob;
|
let blob;
|
||||||
let res;
|
let res;
|
||||||
|
@ -28,7 +30,7 @@ async function warmupBitmap(instance) {
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
async function warmupCanvas(instance) {
|
async function warmupCanvas(instance: Human) {
|
||||||
return new Promise((resolve) => {
|
return new Promise((resolve) => {
|
||||||
let src;
|
let src;
|
||||||
// let size = 0;
|
// let size = 0;
|
||||||
|
@ -60,7 +62,7 @@ async function warmupCanvas(instance) {
|
||||||
if (ctx) ctx.drawImage(img, 0, 0);
|
if (ctx) ctx.drawImage(img, 0, 0);
|
||||||
// const data = ctx?.getImageData(0, 0, canvas.height, canvas.width);
|
// const data = ctx?.getImageData(0, 0, canvas.height, canvas.width);
|
||||||
const tensor = await instance.image(canvas);
|
const tensor = await instance.image(canvas);
|
||||||
const res = await instance.detect(tensor.tensor, instance.config);
|
const res = await instance.detect(tensor.tensor as Tensor, instance.config);
|
||||||
resolve(res);
|
resolve(res);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -69,7 +71,7 @@ async function warmupCanvas(instance) {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
async function warmupNode(instance) {
|
async function warmupNode(instance: Human) {
|
||||||
const atob = (str: string) => Buffer.from(str, 'base64');
|
const atob = (str: string) => Buffer.from(str, 'base64');
|
||||||
let img;
|
let img;
|
||||||
if (instance.config.warmup === 'face') img = atob(sample.face);
|
if (instance.config.warmup === 'face') img = atob(sample.face);
|
||||||
|
@ -101,7 +103,7 @@ async function warmupNode(instance) {
|
||||||
* - only used for `webgl` and `humangl` backends
|
* - only used for `webgl` and `humangl` backends
|
||||||
* @param userConfig?: Config
|
* @param userConfig?: Config
|
||||||
*/
|
*/
|
||||||
export async function warmup(instance, userConfig?: Partial<Config>): Promise<Result | { error }> {
|
export async function warmup(instance: Human, userConfig?: Partial<Config>): Promise<Result | { error }> {
|
||||||
const t0 = now();
|
const t0 = now();
|
||||||
instance.state = 'warmup';
|
instance.state = 'warmup';
|
||||||
if (userConfig) instance.config = mergeDeep(instance.config, userConfig) as Config;
|
if (userConfig) instance.config = mergeDeep(instance.config, userConfig) as Config;
|
||||||
|
|
Loading…
Reference in New Issue