From 200bccbb43e162ff7ecb53a7b1923fe40f80d0a1 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Thu, 11 Nov 2021 11:30:55 -0500 Subject: [PATCH] add faceid demo --- .build.json | 6 +- .eslintrc.json | 3 +- CHANGELOG.md | 3 +- README.md | 2 +- TODO.md | 4 + demo/{facerecognition => faceid}/README.md | 11 +- demo/{facerecognition => faceid}/index.html | 13 +- demo/{facerecognition => faceid}/index.js | 193 ++++++++++++++++---- demo/{facerecognition => faceid}/index.ts | 149 ++++++++++----- demo/faceid/indexdb.ts | 57 ++++++ package.json | 4 +- src/exports.ts | 2 +- src/tfjs/types.ts | 2 +- wiki | 2 +- 14 files changed, 349 insertions(+), 102 deletions(-) rename demo/{facerecognition => faceid}/README.md (76%) rename demo/{facerecognition => faceid}/index.html (67%) rename demo/{facerecognition => faceid}/index.js (51%) rename demo/{facerecognition => faceid}/index.ts (64%) create mode 100644 demo/faceid/indexdb.ts diff --git a/.build.json b/.build.json index cffdaffb..2c667798 100644 --- a/.build.json +++ b/.build.json @@ -152,11 +152,11 @@ "external": ["*/human.esm.js"] }, { - "name": "demo/facerecognition", + "name": "demo/faceid", "platform": "browser", "format": "esm", - "input": "demo/facerecognition/index.ts", - "output": "demo/facerecognition/index.js", + "input": "demo/faceid/index.ts", + "output": "demo/faceid/index.js", "sourcemap": true, "external": ["*/human.esm.js"] } diff --git a/.eslintrc.json b/.eslintrc.json index e125a983..56d41d9e 100644 --- a/.eslintrc.json +++ b/.eslintrc.json @@ -29,7 +29,7 @@ "assets", "demo/helpers/*.js", "demo/typescript/*.js", - "demo/facerecognition/*.js", + "demo/faceid/*.js", "dist", "media", "models", @@ -49,6 +49,7 @@ "func-names": "off", "guard-for-in": "off", "import/extensions": "off", + "import/named": "off", "import/no-extraneous-dependencies": "off", "import/no-named-as-default": "off", "import/no-unresolved": "off", diff --git a/CHANGELOG.md b/CHANGELOG.md index 5dd820d8..5abe371d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,8 +9,9 @@ ## Changelog -### **HEAD -> main** 2021/11/09 mandic00@live.com +### **HEAD -> main** 2021/11/10 mandic00@live.com +- auto tensor shape and channels handling - disable use of path2d in node - add liveness module and facerecognition demo - initial version of facerecognition demo diff --git a/README.md b/README.md index 9f55ac4c..61249172 100644 --- a/README.md +++ b/README.md @@ -49,7 +49,7 @@ JavaScript module using TensorFlow/JS Machine Learning library - **Full** [[*Live*]](https://vladmandic.github.io/human/demo/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo): Main browser demo app that showcases all Human capabilities - **Simple** [[*Live*]](https://vladmandic.github.io/human/demo/typescript/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/typescript): Simple demo in WebCam processing demo in TypeScript - **Face Match** [[*Live*]](https://vladmandic.github.io/human/demo/facematch/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facematch): Extract faces from images, calculates face descriptors and simmilarities and matches them to known database -- **Face Recognition** [[*Live*]](https://vladmandic.github.io/human/demo/facerecognition/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facerecognition): Runs multiple checks to validate webcam input before performing face match, similar to *FaceID* +- **Face ID** [[*Live*]](https://vladmandic.github.io/human/demo/faceid/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/faceid): Runs multiple checks to validate webcam input before performing face match to faces in IndexDB - **Multi-thread** [[*Live*]](https://vladmandic.github.io/human/demo/multithread/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/multithread): Runs each `human` module in a separate web worker for highest possible performance - **Face 3D** [[*Live*]](https://vladmandic.github.io/human/demo/face3d/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/face3d): Uses WebCam as input and draws 3D render of face mesh using `Three.js` - **Virtual Avatar** [[*Live*]](https://vladmandic.github.io/human-vrm/src/human-vrm.html) [[*Details*]](https://github.com/vladmandic/human-vrm): VR model with head, face, eye, body and hand tracking diff --git a/TODO.md b/TODO.md index 794c77eb..6b036d1e 100644 --- a/TODO.md +++ b/TODO.md @@ -47,3 +47,7 @@ New: - new optional model `liveness` checks if input appears to be a real-world live image or a recording best used together with `antispoofing` that checks if input appears to have a realistic face + +Other: +- Improved **Safari** compatibility +- Documentation overhaul diff --git a/demo/facerecognition/README.md b/demo/faceid/README.md similarity index 76% rename from demo/facerecognition/README.md rename to demo/faceid/README.md index 6d2dd9ab..bbaee95a 100644 --- a/demo/facerecognition/README.md +++ b/demo/faceid/README.md @@ -1,6 +1,7 @@ -# Human Face Recognition +# Human Face Recognition: FaceID -`facerecognition` runs multiple checks to validate webcam input before performing face match, similar to *FaceID* +`faceid` runs multiple checks to validate webcam input before performing face match +Detected face image and descriptor are stored in client-side IndexDB ## Workflow - Starts webcam @@ -10,8 +11,8 @@ - Face and gaze direction - Detection scores - Blink detection (including temporal check for blink speed) to verify live input - - Runs antispoofing optional module - - Runs liveness optional module + - Runs `antispoofing` optional module + - Runs `liveness` optional module - Runs match against database of registered faces and presents best match with scores ## Notes @@ -30,4 +31,4 @@ designed to serve as a quick check when used together with other indicators: ### Liveness Module - Checks if input has obvious artifacts due to recording (e.g. playing back phone recording of a face) - Configuration: `human.config.face.liveness`.enabled -- Result: `human.result.face[0].live` as score \ No newline at end of file +- Result: `human.result.face[0].live` as score diff --git a/demo/facerecognition/index.html b/demo/faceid/index.html similarity index 67% rename from demo/facerecognition/index.html rename to demo/faceid/index.html index 907fa811..8409ab7c 100644 --- a/demo/facerecognition/index.html +++ b/demo/faceid/index.html @@ -16,15 +16,24 @@ - + +

     

+    
+    
retry
diff --git a/demo/facerecognition/index.js b/demo/faceid/index.js similarity index 51% rename from demo/facerecognition/index.js rename to demo/faceid/index.js index 972ff073..76b80bc4 100644 --- a/demo/facerecognition/index.js +++ b/demo/faceid/index.js @@ -4,8 +4,67 @@ author: ' */ -// demo/facerecognition/index.ts +// demo/faceid/index.ts import { Human } from "../../dist/human.esm.js"; + +// demo/faceid/indexdb.ts +var db; +var database = "human"; +var table = "person"; +var log = (...msg) => console.log("indexdb", ...msg); +async function open() { + if (db) + return true; + return new Promise((resolve) => { + const request = indexedDB.open(database, 1); + request.onerror = (evt) => log("error:", evt); + request.onupgradeneeded = (evt) => { + log("create:", evt.target); + db = evt.target.result; + db.createObjectStore(table, { keyPath: "id", autoIncrement: true }); + }; + request.onsuccess = (evt) => { + db = evt.target.result; + log("open:", db); + resolve(true); + }; + }); +} +async function load() { + const faceDB = []; + if (!db) + await open(); + return new Promise((resolve) => { + const cursor = db.transaction([table], "readwrite").objectStore(table).openCursor(null, "next"); + cursor.onerror = (evt) => log("load error:", evt); + cursor.onsuccess = (evt) => { + if (evt.target.result) { + faceDB.push(evt.target.result.value); + evt.target.result.continue(); + } else { + resolve(faceDB); + } + }; + }); +} +async function save(faceRecord) { + if (!db) + await open(); + const newRecord = { name: faceRecord.name, descriptor: faceRecord.descriptor, image: faceRecord.image }; + db.transaction([table], "readwrite").objectStore(table).put(newRecord); + log("save:", newRecord); +} +async function remove(faceRecord) { + if (!db) + await open(); + db.transaction([table], "readwrite").objectStore(table).delete(faceRecord.id); + log("delete:", faceRecord); +} + +// demo/faceid/index.ts +var db2 = []; +var face; +var current; var humanConfig = { modelBasePath: "../../models", filter: { equalization: true }, @@ -24,12 +83,12 @@ var humanConfig = { gesture: { enabled: true } }; var options = { - faceDB: "../facematch/faces.json", minConfidence: 0.6, minSize: 224, maxTime: 1e4, blinkMin: 10, - blinkMax: 800 + blinkMax: 800, + threshold: 0.5 }; var ok = { faceCount: false, @@ -47,7 +106,6 @@ var blink = { end: 0, time: 0 }; -var db = []; var human = new Human(humanConfig); human.env["perfadd"] = false; human.draw.options.font = 'small-caps 18px "Lato"'; @@ -57,12 +115,18 @@ var dom = { canvas: document.getElementById("canvas"), log: document.getElementById("log"), fps: document.getElementById("fps"), - status: document.getElementById("status") + status: document.getElementById("status"), + match: document.getElementById("match"), + name: document.getElementById("name"), + save: document.getElementById("save"), + delete: document.getElementById("delete"), + retry: document.getElementById("retry"), + source: document.getElementById("source") }; var timestamp = { detect: 0, draw: 0 }; var fps = { detect: 0, draw: 0 }; var startTime = 0; -var log = (...msg) => { +var log2 = (...msg) => { dom.log.innerText += msg.join(" ") + "\n"; console.log(...msg); }; @@ -80,7 +144,8 @@ async function webCam() { await ready; dom.canvas.width = dom.video.videoWidth; dom.canvas.height = dom.video.videoHeight; - log("video:", dom.video.videoWidth, dom.video.videoHeight, stream.getVideoTracks()[0].label); + if (human.env.initial) + log2("video:", dom.video.videoWidth, dom.video.videoHeight, "|", stream.getVideoTracks()[0].label); dom.canvas.onclick = () => { if (dom.video.paused) dom.video.play(); @@ -90,6 +155,8 @@ async function webCam() { } async function detectionLoop() { if (!dom.video.paused) { + if (face && face.tensor) + human.tf.dispose(face.tensor); await human.detect(dom.video); const now = human.now(); fps.detect = 1e3 / (now - timestamp.detect); @@ -124,59 +191,109 @@ async function validationLoop() { printStatus(ok); if (allOk()) { dom.video.pause(); - return human.result.face; - } else { - human.tf.dispose(human.result.face[0].tensor); + return human.result.face[0]; } if (ok.elapsedMs > options.maxTime) { dom.video.pause(); - return human.result.face; + return human.result.face[0]; } else { ok.elapsedMs = Math.trunc(human.now() - startTime); return new Promise((resolve) => { setTimeout(async () => { const res = await validationLoop(); if (res) - resolve(human.result.face); + resolve(human.result.face[0]); }, 30); }); } } -async function detectFace(face) { - dom.canvas.width = face.tensor.shape[2]; - dom.canvas.height = face.tensor.shape[1]; +async function saveRecords() { + var _a; + if (dom.name.value.length > 0) { + const image = (_a = dom.canvas.getContext("2d")) == null ? void 0 : _a.getImageData(0, 0, dom.canvas.width, dom.canvas.height); + const rec = { id: 0, name: dom.name.value, descriptor: face.embedding, image }; + await save(rec); + log2("saved face record:", rec.name); + db2.push(rec); + } else { + log2("invalid name"); + } +} +async function deleteRecord() { + if (current.id > 0) { + await remove(current); + } +} +async function detectFace() { + var _a; + if (!face || !face.tensor || !face.embedding) + return 0; + dom.canvas.width = face.tensor.shape[1] || 0; + dom.canvas.height = face.tensor.shape[0] || 0; + dom.source.width = dom.canvas.width; + dom.source.height = dom.canvas.height; dom.canvas.style.width = ""; human.tf.browser.toPixels(face.tensor, dom.canvas); - human.tf.dispose(face.tensor); - const arr = db.map((rec) => rec.embedding); - const res = await human.match(face.embedding, arr); - log(`found best match: ${db[res.index].name} similarity: ${Math.round(1e3 * res.similarity) / 10}% source: ${db[res.index].source}`); -} -async function loadFaceDB() { - const res = await fetch(options.faceDB); - db = res && res.ok ? await res.json() : []; - log("loaded face db:", options.faceDB, "records:", db.length); + const descriptors = db2.map((rec) => rec.descriptor); + const res = await human.match(face.embedding, descriptors); + dom.match.style.display = "flex"; + dom.retry.style.display = "block"; + if (res.index === -1) { + log2("no matches"); + dom.delete.style.display = "none"; + dom.source.style.display = "none"; + } else { + current = db2[res.index]; + log2(`best match: ${current.name} | id: ${current.id} | similarity: ${Math.round(1e3 * res.similarity) / 10}%`); + dom.delete.style.display = ""; + dom.name.value = current.name; + dom.source.style.display = ""; + (_a = dom.source.getContext("2d")) == null ? void 0 : _a.putImageData(current.image, 0, 0); + } + return res.similarity > options.threshold; } async function main() { - log("human version:", human.version, "| tfjs version:", human.tf.version_core); - printFPS("loading..."); - await loadFaceDB(); - await human.load(); - printFPS("initializing..."); - await human.warmup(); + ok.faceCount = false; + ok.faceConfidence = false; + ok.facingCenter = false; + ok.blinkDetected = false; + ok.faceSize = false; + ok.antispoofCheck = false; + ok.livenessCheck = false; + ok.elapsedMs = 0; + dom.match.style.display = "none"; + dom.retry.style.display = "none"; + document.body.style.background = "black"; await webCam(); await detectionLoop(); startTime = human.now(); - const face = await validationLoop(); - if (!allOk()) - log("did not find valid input", face); - else { - log("found valid face", face); - await detectFace(face[0]); - } + face = await validationLoop(); dom.fps.style.display = "none"; + if (!allOk()) { + log2("did not find valid input", face); + return 0; + } else { + const res = await detectFace(); + document.body.style.background = res ? "darkgreen" : "maroon"; + return res; + } } -window.onload = main; +async function init() { + log2("human version:", human.version, "| tfjs version:", human.tf.version_core); + log2("options:", JSON.stringify(options).replace(/{|}|"|\[|\]/g, "").replace(/,/g, " ")); + printFPS("loading..."); + db2 = await load(); + log2("loaded face records:", db2.length); + await webCam(); + await human.load(); + printFPS("initializing..."); + dom.retry.addEventListener("click", main); + dom.save.addEventListener("click", saveRecords); + dom.delete.addEventListener("click", deleteRecord); + await human.warmup(); + await main(); +} +window.onload = init; /** * Human demo for browsers * @default Human Library diff --git a/demo/facerecognition/index.ts b/demo/faceid/index.ts similarity index 64% rename from demo/facerecognition/index.ts rename to demo/faceid/index.ts index 785c7805..5f706d64 100644 --- a/demo/facerecognition/index.ts +++ b/demo/faceid/index.ts @@ -7,14 +7,19 @@ * @license MIT */ -import { Human } from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human +import { Human, TensorLike, FaceResult } from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human +import * as indexDb from './indexdb'; // methods to deal with indexdb + +let db: Array = []; // face descriptor database stored in indexdb +let face: FaceResult; // face result from human.detect +let current: indexDb.FaceRecord; // currently matched db record const humanConfig = { // user configuration for human, used to fine-tune behavior modelBasePath: '../../models', filter: { equalization: true }, // lets run with histogram equilizer face: { enabled: true, - detector: { rotation: true, return: true }, // return tensor is not really needed except to draw detected face + detector: { rotation: true, return: true }, // return tensor is used to get detected face image description: { enabled: true }, iris: { enabled: true }, // needed to determine gaze direction emotion: { enabled: false }, // not needed @@ -24,16 +29,16 @@ const humanConfig = { // user configuration for human, used to fine-tune behavio body: { enabled: false }, hand: { enabled: false }, object: { enabled: false }, - gesture: { enabled: true }, + gesture: { enabled: true }, // parses face and iris gestures }; const options = { - faceDB: '../facematch/faces.json', - minConfidence: 0.6, // overal face confidence for box, face, gender, real + minConfidence: 0.6, // overal face confidence for box, face, gender, real, live minSize: 224, // min input to face descriptor model before degradation maxTime: 10000, // max time before giving up blinkMin: 10, // minimum duration of a valid blink blinkMax: 800, // maximum duration of a valid blink + threshold: 0.5, // minimum similarity }; const ok = { // must meet all rules @@ -54,7 +59,7 @@ const blink = { // internal timers for blink start/end/duration time: 0, }; -let db: Array<{ name: string, source: string, embedding: number[] }> = []; // holds loaded face descriptor database +// let db: Array<{ name: string, source: string, embedding: number[] }> = []; // holds loaded face descriptor database const human = new Human(humanConfig); // create instance of human with overrides from user configuration human.env['perfadd'] = false; // is performance data showing instant or total values @@ -67,6 +72,12 @@ const dom = { // grab instances of dom objects so we dont have to look them up l log: document.getElementById('log') as HTMLPreElement, fps: document.getElementById('fps') as HTMLPreElement, status: document.getElementById('status') as HTMLPreElement, + match: document.getElementById('match') as HTMLDivElement, + name: document.getElementById('name') as HTMLInputElement, + save: document.getElementById('save') as HTMLSpanElement, + delete: document.getElementById('delete') as HTMLSpanElement, + retry: document.getElementById('retry') as HTMLDivElement, + source: document.getElementById('source') as HTMLCanvasElement, }; const timestamp = { detect: 0, draw: 0 }; // holds information used to calculate performance and possible memory leaks const fps = { detect: 0, draw: 0 }; // holds calculated fps information for both detect and screen refresh @@ -91,7 +102,7 @@ async function webCam() { // initialize webcam await ready; dom.canvas.width = dom.video.videoWidth; dom.canvas.height = dom.video.videoHeight; - log('video:', dom.video.videoWidth, dom.video.videoHeight, stream.getVideoTracks()[0].label); + if (human.env.initial) log('video:', dom.video.videoWidth, dom.video.videoHeight, '|', stream.getVideoTracks()[0].label); dom.canvas.onclick = () => { // pause when clicked on screen and resume on next click if (dom.video.paused) dom.video.play(); else dom.video.pause(); @@ -100,6 +111,7 @@ async function webCam() { // initialize webcam async function detectionLoop() { // main detection loop if (!dom.video.paused) { + if (face && face.tensor) human.tf.dispose(face.tensor); // dispose previous tensor await human.detect(dom.video); // actual detection; were not capturing output in a local variable as it can also be reached via human.result const now = human.now(); fps.detect = 1000 / (now - timestamp.detect); @@ -108,7 +120,7 @@ async function detectionLoop() { // main detection loop } } -async function validationLoop(): Promise { // main screen refresh loop +async function validationLoop(): Promise { // main screen refresh loop const interpolated = await human.next(human.result); // smoothen result using last-known results await human.draw.canvas(dom.video, dom.canvas); // draw canvas to screen await human.draw.all(dom.canvas, interpolated); // draw labels, boxes, lines, etc. @@ -116,7 +128,6 @@ async function validationLoop(): Promise { // main scr fps.draw = 1000 / (now - timestamp.draw); timestamp.draw = now; printFPS(`fps: ${fps.detect.toFixed(1).padStart(5, ' ')} detect | ${fps.draw.toFixed(1).padStart(5, ' ')} draw`); // write status - ok.faceCount = human.result.face.length === 1; // must be exactly detected face if (ok.faceCount) { // skip the rest if no face const gestures: string[] = Object.values(human.result.gesture).map((gesture) => gesture.gesture); // flatten all gestures @@ -130,65 +141,113 @@ async function validationLoop(): Promise { // main scr ok.livenessCheck = (human.result.face[0].live || 0) > options.minConfidence; ok.faceSize = human.result.face[0].box[2] >= options.minSize && human.result.face[0].box[3] >= options.minSize; } - printStatus(ok); - if (allOk()) { // all criteria met dom.video.pause(); - return human.result.face; - } else { - human.tf.dispose(human.result.face[0].tensor); // results are not ok, so lets dispose tensor + return human.result.face[0]; } if (ok.elapsedMs > options.maxTime) { // give up dom.video.pause(); - return human.result.face; + return human.result.face[0]; } else { // run again ok.elapsedMs = Math.trunc(human.now() - startTime); return new Promise((resolve) => { setTimeout(async () => { const res = await validationLoop(); // run validation loop until conditions are met - if (res) resolve(human.result.face); // recursive promise resolve + if (res) resolve(human.result.face[0]); // recursive promise resolve }, 30); // use to slow down refresh from max refresh rate to target of 30 fps }); } } -async function detectFace(face) { - // draw face and dispose face tensor immediatey afterwards - dom.canvas.width = face.tensor.shape[2]; - dom.canvas.height = face.tensor.shape[1]; - dom.canvas.style.width = ''; - human.tf.browser.toPixels(face.tensor, dom.canvas); - human.tf.dispose(face.tensor); - - const arr = db.map((rec) => rec.embedding); - const res = await human.match(face.embedding, arr); - log(`found best match: ${db[res.index].name} similarity: ${Math.round(1000 * res.similarity) / 10}% source: ${db[res.index].source}`); +async function saveRecords() { + if (dom.name.value.length > 0) { + const image = dom.canvas.getContext('2d')?.getImageData(0, 0, dom.canvas.width, dom.canvas.height) as ImageData; + const rec = { id: 0, name: dom.name.value, descriptor: face.embedding as number[], image }; + await indexDb.save(rec); + log('saved face record:', rec.name); + db.push(rec); + } else { + log('invalid name'); + } } -async function loadFaceDB() { - const res = await fetch(options.faceDB); - db = (res && res.ok) ? await res.json() : []; - log('loaded face db:', options.faceDB, 'records:', db.length); +async function deleteRecord() { + if (current.id > 0) { + await indexDb.remove(current); + } +} + +async function detectFace() { + // draw face and dispose face tensor immediatey afterwards + if (!face || !face.tensor || !face.embedding) return 0; + dom.canvas.width = face.tensor.shape[1] || 0; + dom.canvas.height = face.tensor.shape[0] || 0; + dom.source.width = dom.canvas.width; + dom.source.height = dom.canvas.height; + dom.canvas.style.width = ''; + human.tf.browser.toPixels(face.tensor as unknown as TensorLike, dom.canvas); + const descriptors = db.map((rec) => rec.descriptor); + const res = await human.match(face.embedding, descriptors); + dom.match.style.display = 'flex'; + dom.retry.style.display = 'block'; + if (res.index === -1) { + log('no matches'); + dom.delete.style.display = 'none'; + dom.source.style.display = 'none'; + } else { + current = db[res.index]; + log(`best match: ${current.name} | id: ${current.id} | similarity: ${Math.round(1000 * res.similarity) / 10}%`); + dom.delete.style.display = ''; + dom.name.value = current.name; + dom.source.style.display = ''; + dom.source.getContext('2d')?.putImageData(current.image, 0, 0); + } + return res.similarity > options.threshold; } async function main() { // main entry point - log('human version:', human.version, '| tfjs version:', human.tf.version_core); - printFPS('loading...'); - await loadFaceDB(); - await human.load(); // preload all models - printFPS('initializing...'); - await human.warmup(); // warmup function to initialize backend for future faster detection - await webCam(); // start webcam + ok.faceCount = false; + ok.faceConfidence = false; + ok.facingCenter = false; + ok.blinkDetected = false; + ok.faceSize = false; + ok.antispoofCheck = false; + ok.livenessCheck = false; + ok.elapsedMs = 0; + dom.match.style.display = 'none'; + dom.retry.style.display = 'none'; + document.body.style.background = 'black'; + await webCam(); await detectionLoop(); // start detection loop startTime = human.now(); - const face = await validationLoop(); // start validation loop - if (!allOk()) log('did not find valid input', face); - else { - log('found valid face', face); - await detectFace(face[0]); - } + face = await validationLoop(); // start validation loop dom.fps.style.display = 'none'; + if (!allOk()) { + log('did not find valid input', face); + return 0; + } else { + // log('found valid face'); + const res = await detectFace(); + document.body.style.background = res ? 'darkgreen' : 'maroon'; + return res; + } } -window.onload = main; +async function init() { + log('human version:', human.version, '| tfjs version:', human.tf.version_core); + log('options:', JSON.stringify(options).replace(/{|}|"|\[|\]/g, '').replace(/,/g, ' ')); + printFPS('loading...'); + db = await indexDb.load(); // load face database from indexdb + log('loaded face records:', db.length); + await webCam(); // start webcam + await human.load(); // preload all models + printFPS('initializing...'); + dom.retry.addEventListener('click', main); + dom.save.addEventListener('click', saveRecords); + dom.delete.addEventListener('click', deleteRecord); + await human.warmup(); // warmup function to initialize backend for future faster detection + await main(); +} + +window.onload = init; diff --git a/demo/faceid/indexdb.ts b/demo/faceid/indexdb.ts new file mode 100644 index 00000000..6757366a --- /dev/null +++ b/demo/faceid/indexdb.ts @@ -0,0 +1,57 @@ +let db: IDBDatabase; // instance of indexdb + +const database = 'human'; +const table = 'person'; + +export type FaceRecord = { id: number, name: string, descriptor: number[], image: ImageData }; + +// eslint-disable-next-line no-console +const log = (...msg) => console.log('indexdb', ...msg); + +export async function open() { + if (db) return true; + return new Promise((resolve) => { + const request: IDBOpenDBRequest = indexedDB.open(database, 1); + request.onerror = (evt) => log('error:', evt); + request.onupgradeneeded = (evt: IDBVersionChangeEvent) => { // create if doesnt exist + log('create:', evt.target); + db = (evt.target as IDBOpenDBRequest).result; + db.createObjectStore(table, { keyPath: 'id', autoIncrement: true }); + }; + request.onsuccess = (evt) => { // open + db = (evt.target as IDBOpenDBRequest).result as IDBDatabase; + log('open:', db); + resolve(true); + }; + }); +} + +export async function load(): Promise { + const faceDB: Array = []; + if (!db) await open(); // open or create if not already done + return new Promise((resolve) => { + const cursor: IDBRequest = db.transaction([table], 'readwrite').objectStore(table).openCursor(null, 'next'); + cursor.onerror = (evt) => log('load error:', evt); + cursor.onsuccess = (evt) => { + if ((evt.target as IDBRequest).result) { + faceDB.push((evt.target as IDBRequest).result.value); + (evt.target as IDBRequest).result.continue(); + } else { + resolve(faceDB); + } + }; + }); +} + +export async function save(faceRecord: FaceRecord) { + if (!db) await open(); // open or create if not already done + const newRecord = { name: faceRecord.name, descriptor: faceRecord.descriptor, image: faceRecord.image }; // omit id as its autoincrement + db.transaction([table], 'readwrite').objectStore(table).put(newRecord); + log('save:', newRecord); +} + +export async function remove(faceRecord: FaceRecord) { + if (!db) await open(); // open or create if not already done + db.transaction([table], 'readwrite').objectStore(table).delete(faceRecord.id); // delete based on id + log('delete:', faceRecord); +} diff --git a/package.json b/package.json index 9aa5b41c..8bc1ca49 100644 --- a/package.json +++ b/package.json @@ -32,13 +32,12 @@ "human", "human-library", "face-detection", + "faceid", "face-geometry", "face-embedding", "face-recognition", "face-description", "face-matching", - "face-api", - "faceapi", "body-tracking", "body-segmentation", "hand-tracking", @@ -49,7 +48,6 @@ "gesture-recognition", "gaze-tracking", "age-gender", - "person", "tensorflowjs", "tfjs", "tensorflow" diff --git a/src/exports.ts b/src/exports.ts index 417e13ca..480a0b44 100644 --- a/src/exports.ts +++ b/src/exports.ts @@ -4,7 +4,7 @@ import type { env } from './util/env'; export * from './config'; export * from './result'; -export type { Tensor } from './tfjs/types'; +export type { Tensor, TensorLike } from './tfjs/types'; export type { DrawOptions } from './util/draw'; export type { Descriptor } from './face/match'; export type { Box, Point } from './result'; diff --git a/src/tfjs/types.ts b/src/tfjs/types.ts index 3e5f19b8..46ecee55 100644 --- a/src/tfjs/types.ts +++ b/src/tfjs/types.ts @@ -4,7 +4,7 @@ * TensorFlow Tensor type * @external */ -export { Tensor } from '@tensorflow/tfjs-core/dist/index'; +export { Tensor, TensorLike } from '@tensorflow/tfjs-core/dist/index'; /** * TensorFlow GraphModel type diff --git a/wiki b/wiki index 2a937c42..e26b1555 160000 --- a/wiki +++ b/wiki @@ -1 +1 @@ -Subproject commit 2a937c42e7539b7aa077a9f41085ca573bba7578 +Subproject commit e26b155506e7981fa8187be228b5651de77ee8c6