diff --git a/.build.json b/.build.json index 344ce9b0..cffdaffb 100644 --- a/.build.json +++ b/.build.json @@ -143,13 +143,21 @@ "typedoc": "typedoc" }, { - "name": "demo/browser", + "name": "demo/typescript", "platform": "browser", "format": "esm", "input": "demo/typescript/index.ts", "output": "demo/typescript/index.js", "sourcemap": true, - "minify": false, + "external": ["*/human.esm.js"] + }, + { + "name": "demo/facerecognition", + "platform": "browser", + "format": "esm", + "input": "demo/facerecognition/index.ts", + "output": "demo/facerecognition/index.js", + "sourcemap": true, "external": ["*/human.esm.js"] } ] diff --git a/.eslintrc.json b/.eslintrc.json index 3f753a3b..e125a983 100644 --- a/.eslintrc.json +++ b/.eslintrc.json @@ -29,6 +29,7 @@ "assets", "demo/helpers/*.js", "demo/typescript/*.js", + "demo/facerecognition/*.js", "dist", "media", "models", @@ -60,6 +61,7 @@ "no-bitwise": "off", "no-case-declarations":"off", "no-continue": "off", + "no-else-return": "off", "no-lonely-if": "off", "no-loop-func": "off", "no-mixed-operators": "off", diff --git a/CHANGELOG.md b/CHANGELOG.md index 90fc5bf1..273e6816 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,9 +11,8 @@ ### **HEAD -> main** 2021/11/08 mandic00@live.com - -### **origin/main** 2021/11/08 mandic00@live.com - +- add type defs when working with relative path imports +- disable humangl backend if webgl 1.0 is detected ### **release: 2.5.1** 2021/11/08 mandic00@live.com diff --git a/demo/facerecognition/index.html b/demo/facerecognition/index.html new file mode 100644 index 00000000..907fa811 --- /dev/null +++ b/demo/facerecognition/index.html @@ -0,0 +1,30 @@ + + + + + Human: Face Recognition + + + + + + + + + + + + + + + +

+    

+    
+ + diff --git a/demo/facerecognition/index.js b/demo/facerecognition/index.js new file mode 100644 index 00000000..844f58c0 --- /dev/null +++ b/demo/facerecognition/index.js @@ -0,0 +1,166 @@ +/* + Human + homepage: + author: ' +*/ + +// demo/facerecognition/index.ts +import { Human } from "../../dist/human.esm.js"; +var humanConfig = { + modelBasePath: "../../models", + filter: { equalization: true }, + face: { + enabled: true, + detector: { rotation: true, return: true }, + description: { enabled: true }, + iris: { enabled: true }, + emotion: { enabled: false }, + antispoof: { enabled: true } + }, + body: { enabled: false }, + hand: { enabled: false }, + object: { enabled: false }, + gesture: { enabled: true } +}; +var options = { + minConfidence: 0.6, + minSize: 224, + maxTime: 1e4 +}; +var human = new Human(humanConfig); +human.env["perfadd"] = false; +human.draw.options.font = 'small-caps 18px "Lato"'; +human.draw.options.lineHeight = 20; +var dom = { + video: document.getElementById("video"), + canvas: document.getElementById("canvas"), + log: document.getElementById("log"), + fps: document.getElementById("fps"), + status: document.getElementById("status") +}; +var timestamp = { detect: 0, draw: 0 }; +var fps = { detect: 0, draw: 0 }; +var startTime = 0; +var log = (...msg) => { + dom.log.innerText += msg.join(" ") + "\n"; + console.log(...msg); +}; +var printFPS = (msg) => dom.fps.innerText = msg; +var printStatus = (msg) => dom.status.innerText = "status: " + JSON.stringify(msg).replace(/"|{|}/g, "").replace(/,/g, " | "); +async function webCam() { + printFPS("starting webcam..."); + const cameraOptions = { audio: false, video: { facingMode: "user", resizeMode: "none", width: { ideal: document.body.clientWidth } } }; + const stream = await navigator.mediaDevices.getUserMedia(cameraOptions); + const ready = new Promise((resolve) => { + dom.video.onloadeddata = () => resolve(true); + }); + dom.video.srcObject = stream; + dom.video.play(); + await ready; + dom.canvas.width = dom.video.videoWidth; + dom.canvas.height = dom.video.videoHeight; + const track = stream.getVideoTracks()[0]; + const capabilities = track.getCapabilities ? track.getCapabilities() : ""; + const settings = track.getSettings ? track.getSettings() : ""; + const constraints = track.getConstraints ? track.getConstraints() : ""; + log("video:", dom.video.videoWidth, dom.video.videoHeight, track.label, { stream, track, settings, constraints, capabilities }); + dom.canvas.onclick = () => { + if (dom.video.paused) + dom.video.play(); + else + dom.video.pause(); + }; +} +async function detectionLoop() { + if (!dom.video.paused) { + await human.detect(dom.video); + const now = human.now(); + fps.detect = 1e3 / (now - timestamp.detect); + timestamp.detect = now; + requestAnimationFrame(detectionLoop); + } +} +var ok = { + faceCount: false, + faceConfidence: false, + facingCenter: false, + eyesOpen: false, + blinkDetected: false, + faceSize: false, + antispoofCheck: false, + livenessCheck: false, + elapsedMs: 0 +}; +var allOk = () => ok.faceCount && ok.faceSize && ok.blinkDetected && ok.facingCenter && ok.faceConfidence && ok.antispoofCheck; +async function validationLoop() { + const interpolated = await human.next(human.result); + await human.draw.canvas(dom.video, dom.canvas); + await human.draw.all(dom.canvas, interpolated); + const now = human.now(); + fps.draw = 1e3 / (now - timestamp.draw); + timestamp.draw = now; + printFPS(`fps: ${fps.detect.toFixed(1).padStart(5, " ")} detect | ${fps.draw.toFixed(1).padStart(5, " ")} draw`); + const gestures = Object.values(human.result.gesture).map((gesture) => gesture.gesture); + ok.faceCount = human.result.face.length === 1; + ok.eyesOpen = ok.eyesOpen || !(gestures.includes("blink left eye") || gestures.includes("blink right eye")); + ok.blinkDetected = ok.eyesOpen && ok.blinkDetected || gestures.includes("blink left eye") || gestures.includes("blink right eye"); + ok.facingCenter = gestures.includes("facing center") && gestures.includes("looking center"); + ok.faceConfidence = (human.result.face[0].boxScore || 0) > options.minConfidence && (human.result.face[0].faceScore || 0) > options.minConfidence && (human.result.face[0].genderScore || 0) > options.minConfidence; + ok.antispoofCheck = (human.result.face[0].real || 0) > options.minConfidence; + ok.faceSize = human.result.face[0].box[2] >= options.minSize && human.result.face[0].box[3] >= options.minSize; + printStatus(ok); + if (allOk()) { + dom.video.pause(); + return human.result.face; + } else { + human.tf.dispose(human.result.face[0].tensor); + } + if (ok.elapsedMs > options.maxTime) { + dom.video.pause(); + return human.result.face; + } else { + ok.elapsedMs = Math.trunc(human.now() - startTime); + return new Promise((resolve) => { + setTimeout(async () => { + const res = await validationLoop(); + if (res) + resolve(human.result.face); + }, 30); + }); + } +} +async function detectFace(face) { + dom.canvas.width = face.tensor.shape[2]; + dom.canvas.height = face.tensor.shape[1]; + dom.canvas.style.width = ""; + human.tf.browser.toPixels(face.tensor, dom.canvas); + human.tf.dispose(face.tensor); +} +async function main() { + log("human version:", human.version, "| tfjs version:", human.tf.version_core); + printFPS("loading..."); + await human.load(); + printFPS("initializing..."); + await human.warmup(); + await webCam(); + await detectionLoop(); + startTime = human.now(); + const face = await validationLoop(); + if (!allOk()) + log("did not find valid input", face); + else { + log("found valid face", face); + await detectFace(face[0]); + } + dom.fps.style.display = "none"; +} +window.onload = main; +/** + * Human demo for browsers + * @default Human Library + * @summary + * @author + * @copyright + * @license MIT + */ +//# sourceMappingURL=index.js.map diff --git a/demo/facerecognition/index.ts b/demo/facerecognition/index.ts new file mode 100644 index 00000000..e8865c47 --- /dev/null +++ b/demo/facerecognition/index.ts @@ -0,0 +1,175 @@ +/** + * Human demo for browsers + * @default Human Library + * @summary + * @author + * @copyright + * @license MIT + */ + +import { Human } from '../../dist/human.esm.js'; // equivalent of @vladmandic/Human + +const humanConfig = { // user configuration for human, used to fine-tune behavior + modelBasePath: '../../models', + filter: { equalization: true }, // lets run with histogram equilizer + face: { + enabled: true, + detector: { rotation: true, return: true }, // return tensor is not really needed except to draw detected face + description: { enabled: true }, + iris: { enabled: true }, // needed to determine gaze direction + emotion: { enabled: false }, // not needed + antispoof: { enabled: true }, // enable optional antispoof as well + }, + body: { enabled: false }, + hand: { enabled: false }, + object: { enabled: false }, + gesture: { enabled: true }, +}; + +const options = { + minConfidence: 0.6, // overal face confidence for box, face, gender, real + minSize: 224, // min input to face descriptor model before degradation + maxTime: 10000, // max time before giving up +}; + +const human = new Human(humanConfig); // create instance of human with overrides from user configuration + +human.env['perfadd'] = false; // is performance data showing instant or total values +human.draw.options.font = 'small-caps 18px "Lato"'; // set font used to draw labels when using draw methods +human.draw.options.lineHeight = 20; + +const dom = { // grab instances of dom objects so we dont have to look them up later + video: document.getElementById('video') as HTMLVideoElement, + canvas: document.getElementById('canvas') as HTMLCanvasElement, + log: document.getElementById('log') as HTMLPreElement, + fps: document.getElementById('fps') as HTMLPreElement, + status: document.getElementById('status') as HTMLPreElement, +}; +const timestamp = { detect: 0, draw: 0 }; // holds information used to calculate performance and possible memory leaks +const fps = { detect: 0, draw: 0 }; // holds calculated fps information for both detect and screen refresh +let startTime = 0; + +const log = (...msg) => { // helper method to output messages + dom.log.innerText += msg.join(' ') + '\n'; + // eslint-disable-next-line no-console + console.log(...msg); +}; +const printFPS = (msg) => dom.fps.innerText = msg; // print status element +const printStatus = (msg) => dom.status.innerText = 'status: ' + JSON.stringify(msg).replace(/"|{|}/g, '').replace(/,/g, ' | '); // print status element + +async function webCam() { // initialize webcam + printFPS('starting webcam...'); + // @ts-ignore resizeMode is not yet defined in tslib + const cameraOptions: MediaStreamConstraints = { audio: false, video: { facingMode: 'user', resizeMode: 'none', width: { ideal: document.body.clientWidth } } }; + const stream: MediaStream = await navigator.mediaDevices.getUserMedia(cameraOptions); + const ready = new Promise((resolve) => { dom.video.onloadeddata = () => resolve(true); }); + dom.video.srcObject = stream; + dom.video.play(); + await ready; + dom.canvas.width = dom.video.videoWidth; + dom.canvas.height = dom.video.videoHeight; + const track: MediaStreamTrack = stream.getVideoTracks()[0]; + const capabilities: MediaTrackCapabilities | string = track.getCapabilities ? track.getCapabilities() : ''; + const settings: MediaTrackSettings | string = track.getSettings ? track.getSettings() : ''; + const constraints: MediaTrackConstraints | string = track.getConstraints ? track.getConstraints() : ''; + log('video:', dom.video.videoWidth, dom.video.videoHeight, track.label, { stream, track, settings, constraints, capabilities }); + dom.canvas.onclick = () => { // pause when clicked on screen and resume on next click + if (dom.video.paused) dom.video.play(); + else dom.video.pause(); + }; +} + +async function detectionLoop() { // main detection loop + if (!dom.video.paused) { + await human.detect(dom.video); // actual detection; were not capturing output in a local variable as it can also be reached via human.result + const now = human.now(); + fps.detect = 1000 / (now - timestamp.detect); + timestamp.detect = now; + requestAnimationFrame(detectionLoop); // start new frame immediately + } +} + +const ok = { // must meet all rules + faceCount: false, + faceConfidence: false, + facingCenter: false, + eyesOpen: false, + blinkDetected: false, + faceSize: false, + antispoofCheck: false, + livenessCheck: false, + elapsedMs: 0, +}; +const allOk = () => ok.faceCount && ok.faceSize && ok.blinkDetected && ok.facingCenter && ok.faceConfidence && ok.antispoofCheck; + +async function validationLoop(): Promise { // main screen refresh loop + const interpolated = await human.next(human.result); // smoothen result using last-known results + await human.draw.canvas(dom.video, dom.canvas); // draw canvas to screen + await human.draw.all(dom.canvas, interpolated); // draw labels, boxes, lines, etc. + const now = human.now(); + fps.draw = 1000 / (now - timestamp.draw); + timestamp.draw = now; + printFPS(`fps: ${fps.detect.toFixed(1).padStart(5, ' ')} detect | ${fps.draw.toFixed(1).padStart(5, ' ')} draw`); // write status + + const gestures: string[] = Object.values(human.result.gesture).map((gesture) => gesture.gesture); // flatten all gestures + ok.faceCount = human.result.face.length === 1; // must be exactly detected face + ok.eyesOpen = ok.eyesOpen || !(gestures.includes('blink left eye') || gestures.includes('blink right eye')); // blink validation is only ok once both eyes are open + ok.blinkDetected = ok.eyesOpen && ok.blinkDetected || gestures.includes('blink left eye') || gestures.includes('blink right eye'); // need to detect blink only once + ok.facingCenter = gestures.includes('facing center') && gestures.includes('looking center'); // must face camera and look at camera + ok.faceConfidence = (human.result.face[0].boxScore || 0) > options.minConfidence && (human.result.face[0].faceScore || 0) > options.minConfidence && (human.result.face[0].genderScore || 0) > options.minConfidence; + ok.antispoofCheck = (human.result.face[0].real || 0) > options.minConfidence; + ok.faceSize = human.result.face[0].box[2] >= options.minSize && human.result.face[0].box[3] >= options.minSize; + + printStatus(ok); + + if (allOk()) { // all criteria met + dom.video.pause(); + return human.result.face; + } else { + human.tf.dispose(human.result.face[0].tensor); // results are not ok, so lets dispose tensor + } + if (ok.elapsedMs > options.maxTime) { // give up + dom.video.pause(); + return human.result.face; + } else { // run again + ok.elapsedMs = Math.trunc(human.now() - startTime); + return new Promise((resolve) => { + setTimeout(async () => { + const res = await validationLoop(); // run validation loop until conditions are met + if (res) resolve(human.result.face); // recursive promise resolve + }, 30); // use to slow down refresh from max refresh rate to target of 30 fps + }); + } +} + +async function detectFace(face) { + // draw face and dispose face tensor immediatey afterwards + dom.canvas.width = face.tensor.shape[2]; + dom.canvas.height = face.tensor.shape[1]; + dom.canvas.style.width = ''; + human.tf.browser.toPixels(face.tensor, dom.canvas); + human.tf.dispose(face.tensor); + + // run detection using human.match and use face.embedding as input descriptor + // tbd +} + +async function main() { // main entry point + log('human version:', human.version, '| tfjs version:', human.tf.version_core); + printFPS('loading...'); + await human.load(); // preload all models + printFPS('initializing...'); + await human.warmup(); // warmup function to initialize backend for future faster detection + await webCam(); // start webcam + await detectionLoop(); // start detection loop + startTime = human.now(); + const face = await validationLoop(); // start validation loop + if (!allOk()) log('did not find valid input', face); + else { + log('found valid face', face); + await detectFace(face[0]); + } + dom.fps.style.display = 'none'; +} + +window.onload = main; diff --git a/demo/typescript/index.html b/demo/typescript/index.html index 3b7d0ba4..a74a14bc 100644 --- a/demo/typescript/index.html +++ b/demo/typescript/index.html @@ -26,6 +26,5 @@

     

     
- diff --git a/models/liveness.bin b/models/liveness.bin new file mode 100644 index 00000000..1418d0d9 Binary files /dev/null and b/models/liveness.bin differ diff --git a/models/liveness.json b/models/liveness.json new file mode 100644 index 00000000..dc443294 --- /dev/null +++ b/models/liveness.json @@ -0,0 +1,79 @@ +{ + "format": "graph-model", + "generatedBy": "https://github.com/leokwu/livenessnet", + "convertedBy": "https://github.com/vladmandic", + "signature": + { + "inputs": + { + "conv2d_1_input": {"name":"conv2d_1_input:0","dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"-1"},{"size":"32"},{"size":"32"},{"size":"3"}]}} + }, + "outputs": + { + "activation_6": {"name":"Identity:0","dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"-1"},{"size":"2"}]}} + } + }, + "modelTopology": + { + "node": + [ + {"name":"StatefulPartitionedCall/sequential_1/conv2d_1/Conv2D/ReadVariableOp","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"3"},{"size":"3"},{"size":"3"},{"size":"16"}]}}},"dtype":{"type":"DT_FLOAT"}}}, + {"name":"StatefulPartitionedCall/sequential_1/conv2d_1/BiasAdd/ReadVariableOp","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"16"}]}}},"dtype":{"type":"DT_FLOAT"}}}, + {"name":"StatefulPartitionedCall/sequential_1/conv2d_2/Conv2D/ReadVariableOp","op":"Const","attr":{"dtype":{"type":"DT_FLOAT"},"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"3"},{"size":"3"},{"size":"16"},{"size":"16"}]}}}}}, + {"name":"StatefulPartitionedCall/sequential_1/conv2d_2/BiasAdd/ReadVariableOp","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"16"}]}}},"dtype":{"type":"DT_FLOAT"}}}, + {"name":"StatefulPartitionedCall/sequential_1/conv2d_3/Conv2D/ReadVariableOp","op":"Const","attr":{"dtype":{"type":"DT_FLOAT"},"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"3"},{"size":"3"},{"size":"16"},{"size":"32"}]}}}}}, + {"name":"StatefulPartitionedCall/sequential_1/conv2d_3/BiasAdd/ReadVariableOp","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"32"}]}}},"dtype":{"type":"DT_FLOAT"}}}, + {"name":"StatefulPartitionedCall/sequential_1/conv2d_4/Conv2D/ReadVariableOp","op":"Const","attr":{"dtype":{"type":"DT_FLOAT"},"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"3"},{"size":"3"},{"size":"32"},{"size":"32"}]}}}}}, + {"name":"StatefulPartitionedCall/sequential_1/conv2d_4/BiasAdd/ReadVariableOp","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"32"}]}}},"dtype":{"type":"DT_FLOAT"}}}, + {"name":"StatefulPartitionedCall/sequential_1/flatten_1/Const","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_INT32","tensorShape":{"dim":[{"size":"2"}]}}},"dtype":{"type":"DT_INT32"}}}, + {"name":"StatefulPartitionedCall/sequential_1/dense_1/MatMul/ReadVariableOp","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"2048"},{"size":"64"}]}}},"dtype":{"type":"DT_FLOAT"}}}, + {"name":"StatefulPartitionedCall/sequential_1/dense_1/BiasAdd/ReadVariableOp","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"64"}]}}},"dtype":{"type":"DT_FLOAT"}}}, + {"name":"StatefulPartitionedCall/sequential_1/batch_normalization_5/batchnorm/mul","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"64"}]}}},"dtype":{"type":"DT_FLOAT"}}}, + {"name":"StatefulPartitionedCall/sequential_1/batch_normalization_5/batchnorm/sub","op":"Const","attr":{"dtype":{"type":"DT_FLOAT"},"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"64"}]}}}}}, + {"name":"StatefulPartitionedCall/sequential_1/dense_2/MatMul/ReadVariableOp","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"64"},{"size":"2"}]}}},"dtype":{"type":"DT_FLOAT"}}}, + {"name":"StatefulPartitionedCall/sequential_1/dense_2/BiasAdd/ReadVariableOp","op":"Const","attr":{"dtype":{"type":"DT_FLOAT"},"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"2"}]}}}}}, + {"name":"conv2d_1_input","op":"Placeholder","attr":{"shape":{"shape":{"dim":[{"size":"-1"},{"size":"32"},{"size":"32"},{"size":"3"}]}},"dtype":{"type":"DT_FLOAT"}}}, + {"name":"StatefulPartitionedCall/sequential_1/batch_normalization_2/FusedBatchNormV3/Scaled","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"16"}]}}},"dtype":{"type":"DT_FLOAT"}}}, + {"name":"StatefulPartitionedCall/sequential_1/batch_normalization_2/FusedBatchNormV3/Offset","op":"Const","attr":{"dtype":{"type":"DT_FLOAT"},"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"16"}]}}}}}, + {"name":"StatefulPartitionedCall/sequential_1/batch_normalization_1/FusedBatchNormV3/Scaled","op":"Const","attr":{"dtype":{"type":"DT_FLOAT"},"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"16"}]}}}}}, + {"name":"StatefulPartitionedCall/sequential_1/batch_normalization_1/FusedBatchNormV3/Offset","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"16"}]}}},"dtype":{"type":"DT_FLOAT"}}}, + {"name":"StatefulPartitionedCall/sequential_1/batch_normalization_4/FusedBatchNormV3/Scaled","op":"Const","attr":{"dtype":{"type":"DT_FLOAT"},"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"32"}]}}}}}, + {"name":"StatefulPartitionedCall/sequential_1/batch_normalization_4/FusedBatchNormV3/Offset","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"32"}]}}},"dtype":{"type":"DT_FLOAT"}}}, + {"name":"StatefulPartitionedCall/sequential_1/batch_normalization_3/FusedBatchNormV3/Scaled","op":"Const","attr":{"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"32"}]}}},"dtype":{"type":"DT_FLOAT"}}}, + {"name":"StatefulPartitionedCall/sequential_1/batch_normalization_3/FusedBatchNormV3/Offset","op":"Const","attr":{"dtype":{"type":"DT_FLOAT"},"value":{"tensor":{"dtype":"DT_FLOAT","tensorShape":{"dim":[{"size":"32"}]}}}}}, + {"name":"StatefulPartitionedCall/sequential_1/activation_1/Relu","op":"_FusedConv2D","input":["conv2d_1_input","StatefulPartitionedCall/sequential_1/conv2d_1/Conv2D/ReadVariableOp","StatefulPartitionedCall/sequential_1/conv2d_1/BiasAdd/ReadVariableOp"],"device":"/device:CPU:0","attr":{"dilations":{"list":{"i":["1","1","1","1"]}},"T":{"type":"DT_FLOAT"},"explicit_paddings":{"list":{}},"use_cudnn_on_gpu":{"b":true},"data_format":{"s":"TkhXQw=="},"num_args":{"i":"1"},"fused_ops":{"list":{"s":["Qmlhc0FkZA==","UmVsdQ=="]}},"strides":{"list":{"i":["1","1","1","1"]}},"epsilon":{"f":0},"padding":{"s":"U0FNRQ=="}}}, + {"name":"StatefulPartitionedCall/sequential_1/batch_normalization_1/FusedBatchNormV3/Mul","op":"Mul","input":["StatefulPartitionedCall/sequential_1/activation_1/Relu","StatefulPartitionedCall/sequential_1/batch_normalization_1/FusedBatchNormV3/Scaled"],"attr":{"T":{"type":"DT_FLOAT"}}}, + {"name":"StatefulPartitionedCall/sequential_1/batch_normalization_1/FusedBatchNormV3","op":"Add","input":["StatefulPartitionedCall/sequential_1/batch_normalization_1/FusedBatchNormV3/Mul","StatefulPartitionedCall/sequential_1/batch_normalization_1/FusedBatchNormV3/Offset"],"attr":{"T":{"type":"DT_FLOAT"}}}, + {"name":"StatefulPartitionedCall/sequential_1/activation_2/Relu","op":"_FusedConv2D","input":["StatefulPartitionedCall/sequential_1/batch_normalization_1/FusedBatchNormV3","StatefulPartitionedCall/sequential_1/conv2d_2/Conv2D/ReadVariableOp","StatefulPartitionedCall/sequential_1/conv2d_2/BiasAdd/ReadVariableOp"],"device":"/device:CPU:0","attr":{"epsilon":{"f":0},"explicit_paddings":{"list":{}},"T":{"type":"DT_FLOAT"},"dilations":{"list":{"i":["1","1","1","1"]}},"strides":{"list":{"i":["1","1","1","1"]}},"fused_ops":{"list":{"s":["Qmlhc0FkZA==","UmVsdQ=="]}},"data_format":{"s":"TkhXQw=="},"num_args":{"i":"1"},"padding":{"s":"U0FNRQ=="},"use_cudnn_on_gpu":{"b":true}}}, + {"name":"StatefulPartitionedCall/sequential_1/batch_normalization_2/FusedBatchNormV3/Mul","op":"Mul","input":["StatefulPartitionedCall/sequential_1/activation_2/Relu","StatefulPartitionedCall/sequential_1/batch_normalization_2/FusedBatchNormV3/Scaled"],"attr":{"T":{"type":"DT_FLOAT"}}}, + {"name":"StatefulPartitionedCall/sequential_1/batch_normalization_2/FusedBatchNormV3","op":"Add","input":["StatefulPartitionedCall/sequential_1/batch_normalization_2/FusedBatchNormV3/Mul","StatefulPartitionedCall/sequential_1/batch_normalization_2/FusedBatchNormV3/Offset"],"attr":{"T":{"type":"DT_FLOAT"}}}, + {"name":"StatefulPartitionedCall/sequential_1/max_pooling2d_1/MaxPool","op":"MaxPool","input":["StatefulPartitionedCall/sequential_1/batch_normalization_2/FusedBatchNormV3"],"attr":{"T":{"type":"DT_FLOAT"},"data_format":{"s":"TkhXQw=="},"padding":{"s":"VkFMSUQ="},"ksize":{"list":{"i":["1","2","2","1"]}},"explicit_paddings":{"list":{}},"strides":{"list":{"i":["1","2","2","1"]}}}}, + {"name":"StatefulPartitionedCall/sequential_1/activation_3/Relu","op":"_FusedConv2D","input":["StatefulPartitionedCall/sequential_1/max_pooling2d_1/MaxPool","StatefulPartitionedCall/sequential_1/conv2d_3/Conv2D/ReadVariableOp","StatefulPartitionedCall/sequential_1/conv2d_3/BiasAdd/ReadVariableOp"],"device":"/device:CPU:0","attr":{"use_cudnn_on_gpu":{"b":true},"padding":{"s":"U0FNRQ=="},"T":{"type":"DT_FLOAT"},"explicit_paddings":{"list":{}},"strides":{"list":{"i":["1","1","1","1"]}},"data_format":{"s":"TkhXQw=="},"num_args":{"i":"1"},"fused_ops":{"list":{"s":["Qmlhc0FkZA==","UmVsdQ=="]}},"dilations":{"list":{"i":["1","1","1","1"]}},"epsilon":{"f":0}}}, + {"name":"StatefulPartitionedCall/sequential_1/batch_normalization_3/FusedBatchNormV3/Mul","op":"Mul","input":["StatefulPartitionedCall/sequential_1/activation_3/Relu","StatefulPartitionedCall/sequential_1/batch_normalization_3/FusedBatchNormV3/Scaled"],"attr":{"T":{"type":"DT_FLOAT"}}}, + {"name":"StatefulPartitionedCall/sequential_1/batch_normalization_3/FusedBatchNormV3","op":"Add","input":["StatefulPartitionedCall/sequential_1/batch_normalization_3/FusedBatchNormV3/Mul","StatefulPartitionedCall/sequential_1/batch_normalization_3/FusedBatchNormV3/Offset"],"attr":{"T":{"type":"DT_FLOAT"}}}, + {"name":"StatefulPartitionedCall/sequential_1/activation_4/Relu","op":"_FusedConv2D","input":["StatefulPartitionedCall/sequential_1/batch_normalization_3/FusedBatchNormV3","StatefulPartitionedCall/sequential_1/conv2d_4/Conv2D/ReadVariableOp","StatefulPartitionedCall/sequential_1/conv2d_4/BiasAdd/ReadVariableOp"],"device":"/device:CPU:0","attr":{"num_args":{"i":"1"},"padding":{"s":"U0FNRQ=="},"use_cudnn_on_gpu":{"b":true},"data_format":{"s":"TkhXQw=="},"T":{"type":"DT_FLOAT"},"strides":{"list":{"i":["1","1","1","1"]}},"epsilon":{"f":0},"explicit_paddings":{"list":{}},"fused_ops":{"list":{"s":["Qmlhc0FkZA==","UmVsdQ=="]}},"dilations":{"list":{"i":["1","1","1","1"]}}}}, + {"name":"StatefulPartitionedCall/sequential_1/batch_normalization_4/FusedBatchNormV3/Mul","op":"Mul","input":["StatefulPartitionedCall/sequential_1/activation_4/Relu","StatefulPartitionedCall/sequential_1/batch_normalization_4/FusedBatchNormV3/Scaled"],"attr":{"T":{"type":"DT_FLOAT"}}}, + {"name":"StatefulPartitionedCall/sequential_1/batch_normalization_4/FusedBatchNormV3","op":"Add","input":["StatefulPartitionedCall/sequential_1/batch_normalization_4/FusedBatchNormV3/Mul","StatefulPartitionedCall/sequential_1/batch_normalization_4/FusedBatchNormV3/Offset"],"attr":{"T":{"type":"DT_FLOAT"}}}, + {"name":"StatefulPartitionedCall/sequential_1/max_pooling2d_2/MaxPool","op":"MaxPool","input":["StatefulPartitionedCall/sequential_1/batch_normalization_4/FusedBatchNormV3"],"attr":{"explicit_paddings":{"list":{}},"T":{"type":"DT_FLOAT"},"data_format":{"s":"TkhXQw=="},"padding":{"s":"VkFMSUQ="},"ksize":{"list":{"i":["1","2","2","1"]}},"strides":{"list":{"i":["1","2","2","1"]}}}}, + {"name":"StatefulPartitionedCall/sequential_1/flatten_1/Reshape","op":"Reshape","input":["StatefulPartitionedCall/sequential_1/max_pooling2d_2/MaxPool","StatefulPartitionedCall/sequential_1/flatten_1/Const"],"attr":{"T":{"type":"DT_FLOAT"},"Tshape":{"type":"DT_INT32"}}}, + {"name":"StatefulPartitionedCall/sequential_1/activation_5/Relu","op":"_FusedMatMul","input":["StatefulPartitionedCall/sequential_1/flatten_1/Reshape","StatefulPartitionedCall/sequential_1/dense_1/MatMul/ReadVariableOp","StatefulPartitionedCall/sequential_1/dense_1/BiasAdd/ReadVariableOp"],"device":"/device:CPU:0","attr":{"fused_ops":{"list":{"s":["Qmlhc0FkZA==","UmVsdQ=="]}},"T":{"type":"DT_FLOAT"},"epsilon":{"f":0},"transpose_b":{"b":false},"num_args":{"i":"1"},"transpose_a":{"b":false}}}, + {"name":"StatefulPartitionedCall/sequential_1/batch_normalization_5/batchnorm/mul_1","op":"Mul","input":["StatefulPartitionedCall/sequential_1/activation_5/Relu","StatefulPartitionedCall/sequential_1/batch_normalization_5/batchnorm/mul"],"attr":{"T":{"type":"DT_FLOAT"}}}, + {"name":"StatefulPartitionedCall/sequential_1/batch_normalization_5/batchnorm/add_1","op":"AddV2","input":["StatefulPartitionedCall/sequential_1/batch_normalization_5/batchnorm/mul_1","StatefulPartitionedCall/sequential_1/batch_normalization_5/batchnorm/sub"],"attr":{"T":{"type":"DT_FLOAT"}}}, + {"name":"StatefulPartitionedCall/sequential_1/dense_2/BiasAdd","op":"_FusedMatMul","input":["StatefulPartitionedCall/sequential_1/batch_normalization_5/batchnorm/add_1","StatefulPartitionedCall/sequential_1/dense_2/MatMul/ReadVariableOp","StatefulPartitionedCall/sequential_1/dense_2/BiasAdd/ReadVariableOp"],"device":"/device:CPU:0","attr":{"fused_ops":{"list":{"s":["Qmlhc0FkZA=="]}},"transpose_a":{"b":false},"epsilon":{"f":0},"transpose_b":{"b":false},"num_args":{"i":"1"},"T":{"type":"DT_FLOAT"}}}, + {"name":"StatefulPartitionedCall/sequential_1/activation_6/Softmax","op":"Softmax","input":["StatefulPartitionedCall/sequential_1/dense_2/BiasAdd"],"attr":{"T":{"type":"DT_FLOAT"}}}, + {"name":"Identity","op":"Identity","input":["StatefulPartitionedCall/sequential_1/activation_6/Softmax"],"attr":{"T":{"type":"DT_FLOAT"}}} + ], + "library": {}, + "versions": + { + "producer": 808 + } + }, + "weightsManifest": + [ + { + "paths": ["liveness.bin"], + "weights": [{"name":"StatefulPartitionedCall/sequential_1/conv2d_1/Conv2D/ReadVariableOp","shape":[3,3,3,16],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/conv2d_1/BiasAdd/ReadVariableOp","shape":[16],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/conv2d_2/Conv2D/ReadVariableOp","shape":[3,3,16,16],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/conv2d_2/BiasAdd/ReadVariableOp","shape":[16],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/conv2d_3/Conv2D/ReadVariableOp","shape":[3,3,16,32],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/conv2d_3/BiasAdd/ReadVariableOp","shape":[32],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/conv2d_4/Conv2D/ReadVariableOp","shape":[3,3,32,32],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/conv2d_4/BiasAdd/ReadVariableOp","shape":[32],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/flatten_1/Const","shape":[2],"dtype":"int32"},{"name":"StatefulPartitionedCall/sequential_1/dense_1/MatMul/ReadVariableOp","shape":[2048,64],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/dense_1/BiasAdd/ReadVariableOp","shape":[64],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_5/batchnorm/mul","shape":[64],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_5/batchnorm/sub","shape":[64],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/dense_2/MatMul/ReadVariableOp","shape":[64,2],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/dense_2/BiasAdd/ReadVariableOp","shape":[2],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_2/FusedBatchNormV3/Scaled","shape":[16],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_2/FusedBatchNormV3/Offset","shape":[16],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_1/FusedBatchNormV3/Scaled","shape":[16],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_1/FusedBatchNormV3/Offset","shape":[16],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_4/FusedBatchNormV3/Scaled","shape":[32],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_4/FusedBatchNormV3/Offset","shape":[32],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_3/FusedBatchNormV3/Scaled","shape":[32],"dtype":"float32"},{"name":"StatefulPartitionedCall/sequential_1/batch_normalization_3/FusedBatchNormV3/Offset","shape":[32],"dtype":"float32"}] + } + ] +} \ No newline at end of file diff --git a/src/face/facemesh.ts b/src/face/facemesh.ts index bf3f96d7..3efe0d5b 100644 --- a/src/face/facemesh.ts +++ b/src/face/facemesh.ts @@ -78,7 +78,6 @@ export async function predict(input: Tensor, config: Config): Promise [ ((box.startPoint[0] + box.endPoint[0])) / 2 + ((box.endPoint[0] + box.startPoint[0]) * pt[0] / blazeface.size()), @@ -102,7 +101,7 @@ export async function predict(input: Tensor, config: Config): Promise [pt[0] / (input.shape[2] || 0), pt[1] / (input.shape[1] || 0), (pt[2] || 0) / inputSize]); for (const key of Object.keys(coords.meshAnnotations)) face.annotations[key] = coords.meshAnnotations[key].map((index) => face.mesh[index]); // add annotations - box = util.squarifyBox(util.enlargeBox(util.calculateLandmarksBoundingBox(face.mesh), enlargeFact)); // redefine box with mesh calculated one + box = util.squarifyBox({ ...util.enlargeBox(util.calculateLandmarksBoundingBox(face.mesh), enlargeFact), confidence: box.confidence }); // redefine box with mesh calculated one face.box = util.getClampedBox(box, input); // update detected box with box around the face mesh face.boxRaw = util.getRawBox(box, input); face.score = face.faceScore; diff --git a/src/image/image.ts b/src/image/image.ts index 94a00184..ba149600 100644 --- a/src/image/image.ts +++ b/src/image/image.ts @@ -82,7 +82,7 @@ export async function process(input: Input, config: Config, getTensor: boolean = if ((input)['isDisposedInternal']) { throw new Error('input tensor is disposed'); } else if (!(input as Tensor).shape || (input as Tensor).shape.length !== 4 || (input as Tensor).shape[0] !== 1 || (input as Tensor).shape[3] !== 3) { - throw new Error(`input tensor shape must be [1, height, width, 3] and instead was ${input['shape']}`); + throw new Error('input tensor shape must be [1, height, width, 3] and instead was' + (input['shape'] ? input['shape'].toString() : 'unknown')); } else { return { tensor: tf.clone(input), canvas: (config.filter.return ? outCanvas : null) }; }