mirror of https://github.com/vladmandic/human
add softwareKernels config option
parent
79775267bc
commit
22755355f7
25
TODO.md
25
TODO.md
|
@ -23,8 +23,11 @@ N/A
|
||||||
|
|
||||||
### Face with Attention
|
### Face with Attention
|
||||||
|
|
||||||
`FaceMesh-Attention` is not supported in `Node` or in browser using `WASM` backend due to missing kernel op in **TFJS**
|
`FaceMesh-Attention` is not supported in browser using `WASM` backend due to missing kernel op in **TFJS**
|
||||||
Model is supported using `WebGL` backend in browser
|
|
||||||
|
### Object Detection
|
||||||
|
|
||||||
|
`NanoDet` model is not supported in in browser using `WASM` backend due to missing kernel op in **TFJS**
|
||||||
|
|
||||||
### WebGPU
|
### WebGPU
|
||||||
|
|
||||||
|
@ -36,21 +39,12 @@ Enable via <chrome://flags/#enable-unsafe-webgpu>
|
||||||
Running in **web workers** requires `OffscreenCanvas` which is still disabled by default in **Firefox**
|
Running in **web workers** requires `OffscreenCanvas` which is still disabled by default in **Firefox**
|
||||||
Enable via `about:config` -> `gfx.offscreencanvas.enabled`
|
Enable via `about:config` -> `gfx.offscreencanvas.enabled`
|
||||||
|
|
||||||
### Face Detection & Hand Detection
|
|
||||||
|
|
||||||
Enhanced rotation correction for face detection and hand detection is not working in **NodeJS** due to missing kernel op in **TFJS**
|
|
||||||
Feature is automatically disabled in **NodeJS** without user impact
|
|
||||||
|
|
||||||
### Object Detection
|
|
||||||
|
|
||||||
`NanoDet` model is not supported in `Node` or in browser using `WASM` backend due to missing kernel op in **TFJS**
|
|
||||||
Model is supported using `WebGL` backend in browser
|
|
||||||
|
|
||||||
<hr><br>
|
<hr><br>
|
||||||
|
|
||||||
## Pending Release Changes
|
## Pending Release Changes
|
||||||
|
|
||||||
- Update TFJS to **3.20.0**
|
- Update **TFJS** to **3.20.0**
|
||||||
|
- Update **TypeScript** to **4.8**
|
||||||
- Add **InsightFace** model as alternative for face embedding/descriptor detection
|
- Add **InsightFace** model as alternative for face embedding/descriptor detection
|
||||||
Compatible with multiple variations of **InsightFace** models
|
Compatible with multiple variations of **InsightFace** models
|
||||||
Configurable using `config.face.insightface` config section
|
Configurable using `config.face.insightface` config section
|
||||||
|
@ -58,9 +52,14 @@ Model is supported using `WebGL` backend in browser
|
||||||
Models can be downloaded from <https://github.com/vladmandic/insightface>
|
Models can be downloaded from <https://github.com/vladmandic/insightface>
|
||||||
- Add `human.check()` which validates all kernel ops for currently loaded models with currently selected backend
|
- Add `human.check()` which validates all kernel ops for currently loaded models with currently selected backend
|
||||||
Example: `console.error(human.check());`
|
Example: `console.error(human.check());`
|
||||||
|
- Add `config.softwareKernels` config option which uses **CPU** implementation for missing ops
|
||||||
|
Disabled by default
|
||||||
|
If enabled, it is used by face and hand rotation correction (`config.face.rotation` and `config.hand.rotation`)
|
||||||
- Add underlying **tensorflow** library version detection when running in NodeJS to
|
- Add underlying **tensorflow** library version detection when running in NodeJS to
|
||||||
`human.env` and check if **GPU** is used for acceleration
|
`human.env` and check if **GPU** is used for acceleration
|
||||||
Example: `console.log(human.env.tensorflow)`
|
Example: `console.log(human.env.tensorflow)`
|
||||||
|
- Treat models that cannot be found & loaded as non-critical error
|
||||||
|
Instead of creating runtime exception, `human` will now report that model could not be loaded
|
||||||
- Host models in <human-models>
|
- Host models in <human-models>
|
||||||
Models can be directly used without downloading to local storage
|
Models can be directly used without downloading to local storage
|
||||||
Example: `modelPath: 'https://vladmandic.github.io/human-models/models/facemesh.json'`
|
Example: `modelPath: 'https://vladmandic.github.io/human-models/models/facemesh.json'`
|
||||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -4,6 +4,109 @@
|
||||||
author: <https://github.com/vladmandic>'
|
author: <https://github.com/vladmandic>'
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import*as c from"../../dist/human.esm.js";var w={async:!1,modelBasePath:"../../models",filter:{enabled:!0,equalization:!1,flip:!1},face:{enabled:!0,detector:{rotation:!1},mesh:{enabled:!0},attention:{enabled:!1},iris:{enabled:!0},description:{enabled:!0},emotion:{enabled:!0}},body:{enabled:!0},hand:{enabled:!0},object:{enabled:!1},gesture:{enabled:!0}},e=new c.Human(w);e.env.perfadd=!1;e.draw.options.font='small-caps 18px "Lato"';e.draw.options.lineHeight=20;var t={video:document.getElementById("video"),canvas:document.getElementById("canvas"),log:document.getElementById("log"),fps:document.getElementById("status"),perf:document.getElementById("performance")},n={detect:0,draw:0,tensors:0,start:0},o={detectFPS:0,drawFPS:0,frames:0,averageMs:0},i=(...a)=>{t.log.innerText+=a.join(" ")+`
|
|
||||||
`,console.log(...a)},r=a=>t.fps.innerText=a,b=a=>t.perf.innerText="tensors:"+e.tf.memory().numTensors.toString()+" | performance: "+JSON.stringify(a).replace(/"|{|}/g,"").replace(/,/g," | ");async function h(){r("starting webcam...");let a={audio:!1,video:{facingMode:"user",resizeMode:"none",width:{ideal:document.body.clientWidth},height:{ideal:document.body.clientHeight}}},d=await navigator.mediaDevices.getUserMedia(a),f=new Promise(p=>{t.video.onloadeddata=()=>p(!0)});t.video.srcObject=d,t.video.play(),await f,t.canvas.width=t.video.videoWidth,t.canvas.height=t.video.videoHeight;let s=d.getVideoTracks()[0],v=s.getCapabilities?s.getCapabilities():"",g=s.getSettings?s.getSettings():"",u=s.getConstraints?s.getConstraints():"";i("video:",t.video.videoWidth,t.video.videoHeight,s.label,{stream:d,track:s,settings:g,constraints:u,capabilities:v}),t.canvas.onclick=()=>{t.video.paused?t.video.play():t.video.pause()}}async function l(){if(!t.video.paused){n.start===0&&(n.start=e.now()),await e.detect(t.video);let a=e.tf.memory().numTensors;a-n.tensors!==0&&i("allocated tensors:",a-n.tensors),n.tensors=a,o.detectFPS=Math.round(1e3*1e3/(e.now()-n.detect))/1e3,o.frames++,o.averageMs=Math.round(1e3*(e.now()-n.start)/o.frames)/1e3,o.frames%100===0&&!t.video.paused&&i("performance",{...o,tensors:n.tensors})}n.detect=e.now(),requestAnimationFrame(l)}async function m(){if(!t.video.paused){let d=e.next(e.result);e.config.filter.flip?e.draw.canvas(d.canvas,t.canvas):e.draw.canvas(t.video,t.canvas),await e.draw.all(t.canvas,d),b(d.performance)}let a=e.now();o.drawFPS=Math.round(1e3*1e3/(a-n.draw))/1e3,n.draw=a,r(t.video.paused?"paused":`fps: ${o.detectFPS.toFixed(1).padStart(5," ")} detect | ${o.drawFPS.toFixed(1).padStart(5," ")} draw`),setTimeout(m,30)}async function M(){i("human version:",e.version,"| tfjs version:",e.tf.version["tfjs-core"]),i("platform:",e.env.platform,"| agent:",e.env.agent),r("loading..."),await e.load(),i("backend:",e.tf.getBackend(),"| available:",e.env.backends),i("models stats:",e.getModelStats()),i("models loaded:",Object.values(e.models).filter(a=>a!==null).length),r("initializing..."),await e.warmup(),await h(),await l(),await m()}window.onload=M;
|
// demo/typescript/index.ts
|
||||||
|
import * as H from "../../dist/human.esm.js";
|
||||||
|
var humanConfig = {
|
||||||
|
async: false,
|
||||||
|
modelBasePath: "../../models",
|
||||||
|
filter: { enabled: true, equalization: false, flip: false },
|
||||||
|
face: { enabled: true, detector: { rotation: false }, mesh: { enabled: true }, attention: { enabled: false }, iris: { enabled: true }, description: { enabled: true }, emotion: { enabled: true } },
|
||||||
|
body: { enabled: true },
|
||||||
|
hand: { enabled: true },
|
||||||
|
object: { enabled: false },
|
||||||
|
gesture: { enabled: true }
|
||||||
|
};
|
||||||
|
var human = new H.Human(humanConfig);
|
||||||
|
human.env.perfadd = false;
|
||||||
|
human.draw.options.font = 'small-caps 18px "Lato"';
|
||||||
|
human.draw.options.lineHeight = 20;
|
||||||
|
var dom = {
|
||||||
|
video: document.getElementById("video"),
|
||||||
|
canvas: document.getElementById("canvas"),
|
||||||
|
log: document.getElementById("log"),
|
||||||
|
fps: document.getElementById("status"),
|
||||||
|
perf: document.getElementById("performance")
|
||||||
|
};
|
||||||
|
var timestamp = { detect: 0, draw: 0, tensors: 0, start: 0 };
|
||||||
|
var fps = { detectFPS: 0, drawFPS: 0, frames: 0, averageMs: 0 };
|
||||||
|
var log = (...msg) => {
|
||||||
|
dom.log.innerText += msg.join(" ") + "\n";
|
||||||
|
console.log(...msg);
|
||||||
|
};
|
||||||
|
var status = (msg) => dom.fps.innerText = msg;
|
||||||
|
var perf = (msg) => dom.perf.innerText = "tensors:" + human.tf.memory().numTensors.toString() + " | performance: " + JSON.stringify(msg).replace(/"|{|}/g, "").replace(/,/g, " | ");
|
||||||
|
async function webCam() {
|
||||||
|
status("starting webcam...");
|
||||||
|
const options = { audio: false, video: { facingMode: "user", resizeMode: "none", width: { ideal: document.body.clientWidth }, height: { ideal: document.body.clientHeight } } };
|
||||||
|
const stream = await navigator.mediaDevices.getUserMedia(options);
|
||||||
|
const ready = new Promise((resolve) => {
|
||||||
|
dom.video.onloadeddata = () => resolve(true);
|
||||||
|
});
|
||||||
|
dom.video.srcObject = stream;
|
||||||
|
void dom.video.play();
|
||||||
|
await ready;
|
||||||
|
dom.canvas.width = dom.video.videoWidth;
|
||||||
|
dom.canvas.height = dom.video.videoHeight;
|
||||||
|
const track = stream.getVideoTracks()[0];
|
||||||
|
const capabilities = track.getCapabilities ? track.getCapabilities() : "";
|
||||||
|
const settings = track.getSettings ? track.getSettings() : "";
|
||||||
|
const constraints = track.getConstraints ? track.getConstraints() : "";
|
||||||
|
log("video:", dom.video.videoWidth, dom.video.videoHeight, track.label, { stream, track, settings, constraints, capabilities });
|
||||||
|
dom.canvas.onclick = () => {
|
||||||
|
if (dom.video.paused)
|
||||||
|
void dom.video.play();
|
||||||
|
else
|
||||||
|
dom.video.pause();
|
||||||
|
};
|
||||||
|
}
|
||||||
|
async function detectionLoop() {
|
||||||
|
if (!dom.video.paused) {
|
||||||
|
if (timestamp.start === 0)
|
||||||
|
timestamp.start = human.now();
|
||||||
|
await human.detect(dom.video);
|
||||||
|
const tensors = human.tf.memory().numTensors;
|
||||||
|
if (tensors - timestamp.tensors !== 0)
|
||||||
|
log("allocated tensors:", tensors - timestamp.tensors);
|
||||||
|
timestamp.tensors = tensors;
|
||||||
|
fps.detectFPS = Math.round(1e3 * 1e3 / (human.now() - timestamp.detect)) / 1e3;
|
||||||
|
fps.frames++;
|
||||||
|
fps.averageMs = Math.round(1e3 * (human.now() - timestamp.start) / fps.frames) / 1e3;
|
||||||
|
if (fps.frames % 100 === 0 && !dom.video.paused)
|
||||||
|
log("performance", { ...fps, tensors: timestamp.tensors });
|
||||||
|
}
|
||||||
|
timestamp.detect = human.now();
|
||||||
|
requestAnimationFrame(detectionLoop);
|
||||||
|
}
|
||||||
|
async function drawLoop() {
|
||||||
|
if (!dom.video.paused) {
|
||||||
|
const interpolated = human.next(human.result);
|
||||||
|
if (human.config.filter.flip)
|
||||||
|
human.draw.canvas(interpolated.canvas, dom.canvas);
|
||||||
|
else
|
||||||
|
human.draw.canvas(dom.video, dom.canvas);
|
||||||
|
await human.draw.all(dom.canvas, interpolated);
|
||||||
|
perf(interpolated.performance);
|
||||||
|
}
|
||||||
|
const now = human.now();
|
||||||
|
fps.drawFPS = Math.round(1e3 * 1e3 / (now - timestamp.draw)) / 1e3;
|
||||||
|
timestamp.draw = now;
|
||||||
|
status(dom.video.paused ? "paused" : `fps: ${fps.detectFPS.toFixed(1).padStart(5, " ")} detect | ${fps.drawFPS.toFixed(1).padStart(5, " ")} draw`);
|
||||||
|
setTimeout(drawLoop, 30);
|
||||||
|
}
|
||||||
|
async function main() {
|
||||||
|
log("human version:", human.version, "| tfjs version:", human.tf.version["tfjs-core"]);
|
||||||
|
log("platform:", human.env.platform, "| agent:", human.env.agent);
|
||||||
|
status("loading...");
|
||||||
|
await human.load();
|
||||||
|
log("backend:", human.tf.getBackend(), "| available:", human.env.backends);
|
||||||
|
log("models stats:", human.getModelStats());
|
||||||
|
log("models loaded:", Object.values(human.models).filter((model) => model !== null).length);
|
||||||
|
status("initializing...");
|
||||||
|
await human.warmup();
|
||||||
|
await webCam();
|
||||||
|
await detectionLoop();
|
||||||
|
await drawLoop();
|
||||||
|
}
|
||||||
|
window.onload = main;
|
||||||
//# sourceMappingURL=index.js.map
|
//# sourceMappingURL=index.js.map
|
||||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
|
@ -4,4 +4,38 @@
|
||||||
author: <https://github.com/vladmandic>'
|
author: <https://github.com/vladmandic>'
|
||||||
*/
|
*/
|
||||||
|
|
||||||
var e="3.20.0";var s="3.20.0";var t="3.20.0";var i="3.20.0";var n="3.20.0";var r="3.20.0";var l="3.20.0";var V={tfjs:e,"tfjs-core":s,"tfjs-data":t,"tfjs-layers":i,"tfjs-converter":n,"tfjs-backend-webgl":r,"tfjs-backend-wasm":l};export{V as version};
|
|
||||||
|
// node_modules/.pnpm/@tensorflow+tfjs@3.20.0_seedrandom@3.0.5/node_modules/@tensorflow/tfjs/package.json
|
||||||
|
var version = "3.20.0";
|
||||||
|
|
||||||
|
// node_modules/.pnpm/@tensorflow+tfjs-core@3.20.0/node_modules/@tensorflow/tfjs-core/package.json
|
||||||
|
var version2 = "3.20.0";
|
||||||
|
|
||||||
|
// node_modules/.pnpm/@tensorflow+tfjs-data@3.20.0_k7dauiu3y265wd6lcplf62oi7i/node_modules/@tensorflow/tfjs-data/package.json
|
||||||
|
var version3 = "3.20.0";
|
||||||
|
|
||||||
|
// node_modules/.pnpm/@tensorflow+tfjs-layers@3.20.0_au2niqrxqvhsnv4oetlud656gy/node_modules/@tensorflow/tfjs-layers/package.json
|
||||||
|
var version4 = "3.20.0";
|
||||||
|
|
||||||
|
// node_modules/.pnpm/@tensorflow+tfjs-converter@3.20.0_au2niqrxqvhsnv4oetlud656gy/node_modules/@tensorflow/tfjs-converter/package.json
|
||||||
|
var version5 = "3.20.0";
|
||||||
|
|
||||||
|
// node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.20.0_au2niqrxqvhsnv4oetlud656gy/node_modules/@tensorflow/tfjs-backend-webgl/package.json
|
||||||
|
var version6 = "3.20.0";
|
||||||
|
|
||||||
|
// node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.20.0_au2niqrxqvhsnv4oetlud656gy/node_modules/@tensorflow/tfjs-backend-wasm/package.json
|
||||||
|
var version7 = "3.20.0";
|
||||||
|
|
||||||
|
// tfjs/tf-version.ts
|
||||||
|
var version8 = {
|
||||||
|
tfjs: version,
|
||||||
|
"tfjs-core": version2,
|
||||||
|
"tfjs-data": version3,
|
||||||
|
"tfjs-layers": version4,
|
||||||
|
"tfjs-converter": version5,
|
||||||
|
"tfjs-backend-webgl": version6,
|
||||||
|
"tfjs-backend-wasm": version7
|
||||||
|
};
|
||||||
|
export {
|
||||||
|
version8 as version
|
||||||
|
};
|
||||||
|
|
|
@ -76,16 +76,16 @@
|
||||||
"@tensorflow/tfjs-node": "^3.20.0",
|
"@tensorflow/tfjs-node": "^3.20.0",
|
||||||
"@tensorflow/tfjs-node-gpu": "^3.20.0",
|
"@tensorflow/tfjs-node-gpu": "^3.20.0",
|
||||||
"@tensorflow/tfjs-tflite": "0.0.1-alpha.8",
|
"@tensorflow/tfjs-tflite": "0.0.1-alpha.8",
|
||||||
"@types/node": "^18.7.13",
|
"@types/node": "^18.7.14",
|
||||||
"@types/offscreencanvas": "^2019.7.0",
|
"@types/offscreencanvas": "^2019.7.0",
|
||||||
"@typescript-eslint/eslint-plugin": "^5.35.1",
|
"@typescript-eslint/eslint-plugin": "^5.36.0",
|
||||||
"@typescript-eslint/parser": "^5.35.1",
|
"@typescript-eslint/parser": "^5.36.0",
|
||||||
"@vladmandic/build": "^0.7.11",
|
"@vladmandic/build": "^0.7.11",
|
||||||
"@vladmandic/pilogger": "^0.4.6",
|
"@vladmandic/pilogger": "^0.4.6",
|
||||||
"@vladmandic/tfjs": "github:vladmandic/tfjs",
|
"@vladmandic/tfjs": "github:vladmandic/tfjs",
|
||||||
"@webgpu/types": "^0.1.21",
|
"@webgpu/types": "^0.1.21",
|
||||||
"canvas": "^2.9.3",
|
"canvas": "^2.9.3",
|
||||||
"esbuild": "^0.15.5",
|
"esbuild": "^0.15.6",
|
||||||
"eslint": "8.23.0",
|
"eslint": "8.23.0",
|
||||||
"eslint-config-airbnb-base": "^15.0.0",
|
"eslint-config-airbnb-base": "^15.0.0",
|
||||||
"eslint-plugin-html": "^7.1.0",
|
"eslint-plugin-html": "^7.1.0",
|
||||||
|
|
|
@ -34,7 +34,7 @@ export async function loadDetect(config: Config): Promise<GraphModel> {
|
||||||
if (env.initial) models.detector = null;
|
if (env.initial) models.detector = null;
|
||||||
if (!models.detector && config.body['detector'] && config.body['detector'].modelPath || '') {
|
if (!models.detector && config.body['detector'] && config.body['detector'].modelPath || '') {
|
||||||
models.detector = await loadModel(config.body['detector'].modelPath);
|
models.detector = await loadModel(config.body['detector'].modelPath);
|
||||||
const inputs = Object.values(models.detector.modelSignature['inputs']);
|
const inputs = models.detector?.['executor'] ? Object.values(models.detector.modelSignature['inputs']) : undefined;
|
||||||
inputSize.detector[0] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[1].size) : 0;
|
inputSize.detector[0] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[1].size) : 0;
|
||||||
inputSize.detector[1] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : 0;
|
inputSize.detector[1] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : 0;
|
||||||
} else if (config.debug && models.detector) log('cached model:', models.detector['modelUrl']);
|
} else if (config.debug && models.detector) log('cached model:', models.detector['modelUrl']);
|
||||||
|
@ -46,7 +46,7 @@ export async function loadPose(config: Config): Promise<GraphModel> {
|
||||||
if (env.initial) models.landmarks = null;
|
if (env.initial) models.landmarks = null;
|
||||||
if (!models.landmarks) {
|
if (!models.landmarks) {
|
||||||
models.landmarks = await loadModel(config.body.modelPath);
|
models.landmarks = await loadModel(config.body.modelPath);
|
||||||
const inputs = Object.values(models.landmarks.modelSignature['inputs']);
|
const inputs = models.landmarks?.['executor'] ? Object.values(models.landmarks.modelSignature['inputs']) : undefined;
|
||||||
inputSize.landmarks[0] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[1].size) : 0;
|
inputSize.landmarks[0] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[1].size) : 0;
|
||||||
inputSize.landmarks[1] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : 0;
|
inputSize.landmarks[1] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : 0;
|
||||||
} else if (config.debug) log('cached model:', models.landmarks['modelUrl']);
|
} else if (config.debug) log('cached model:', models.landmarks['modelUrl']);
|
||||||
|
@ -140,6 +140,7 @@ async function detectLandmarks(input: Tensor, config: Config, outputSize: [numbe
|
||||||
* t.world: 39 keypoints [x,y,z] normalized to -1..1
|
* t.world: 39 keypoints [x,y,z] normalized to -1..1
|
||||||
* t.poseflag: body score
|
* t.poseflag: body score
|
||||||
*/
|
*/
|
||||||
|
if (!models.landmarks?.['executor']) return null;
|
||||||
const t: Record<string, Tensor> = {};
|
const t: Record<string, Tensor> = {};
|
||||||
[t.ld/* 1,195(39*5) */, t.segmentation/* 1,256,256,1 */, t.heatmap/* 1,64,64,39 */, t.world/* 1,117(39*3) */, t.poseflag/* 1,1 */] = models.landmarks?.execute(input, outputNodes.landmarks) as Tensor[]; // run model
|
[t.ld/* 1,195(39*5) */, t.segmentation/* 1,256,256,1 */, t.heatmap/* 1,64,64,39 */, t.world/* 1,117(39*3) */, t.poseflag/* 1,1 */] = models.landmarks?.execute(input, outputNodes.landmarks) as Tensor[]; // run model
|
||||||
const poseScore = (await t.poseflag.data())[0];
|
const poseScore = (await t.poseflag.data())[0];
|
||||||
|
|
|
@ -51,6 +51,7 @@ async function max2d(inputs, minScore): Promise<[number, number, number]> {
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function predict(image: Tensor, config: Config): Promise<BodyResult[]> {
|
export async function predict(image: Tensor, config: Config): Promise<BodyResult[]> {
|
||||||
|
if (!model?.['executor']) return [];
|
||||||
const skipTime = (config.body.skipTime || 0) > (now() - lastTime);
|
const skipTime = (config.body.skipTime || 0) > (now() - lastTime);
|
||||||
const skipFrame = skipped < (config.body.skipFrames || 0);
|
const skipFrame = skipped < (config.body.skipFrames || 0);
|
||||||
if (config.skipAllowed && skipTime && skipFrame && Object.keys(cache.keypoints).length > 0) {
|
if (config.skipAllowed && skipTime && skipFrame && Object.keys(cache.keypoints).length > 0) {
|
||||||
|
|
|
@ -37,7 +37,7 @@ export async function load(config: Config): Promise<GraphModel> {
|
||||||
fakeOps(['size'], config);
|
fakeOps(['size'], config);
|
||||||
model = await loadModel(config.body.modelPath);
|
model = await loadModel(config.body.modelPath);
|
||||||
} else if (config.debug) log('cached model:', model['modelUrl']);
|
} else if (config.debug) log('cached model:', model['modelUrl']);
|
||||||
inputSize = model.inputs[0].shape ? model.inputs[0].shape[2] : 0;
|
inputSize = (model?.['executor'] && model?.inputs?.[0].shape) ? model.inputs[0].shape[2] : 0;
|
||||||
if (inputSize < 64) inputSize = 256;
|
if (inputSize < 64) inputSize = 256;
|
||||||
return model;
|
return model;
|
||||||
}
|
}
|
||||||
|
@ -124,7 +124,7 @@ function parseMultiPose(res, config, image) {
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function predict(input: Tensor, config: Config): Promise<BodyResult[]> {
|
export async function predict(input: Tensor, config: Config): Promise<BodyResult[]> {
|
||||||
if (!model?.inputs?.[0].shape) return []; // something is wrong with the model
|
if (!model?.['executor'] || !model?.inputs?.[0].shape) return []; // something is wrong with the model
|
||||||
if (!config.skipAllowed) cache.boxes.length = 0; // allowed to use cache or not
|
if (!config.skipAllowed) cache.boxes.length = 0; // allowed to use cache or not
|
||||||
skipped++; // increment skip frames
|
skipped++; // increment skip frames
|
||||||
const skipTime = (config.body.skipTime || 0) > (now() - cache.last);
|
const skipTime = (config.body.skipTime || 0) > (now() - cache.last);
|
||||||
|
|
|
@ -159,6 +159,7 @@ export async function predict(input: Tensor, config: Config): Promise<BodyResult
|
||||||
/** posenet is mostly obsolete
|
/** posenet is mostly obsolete
|
||||||
* caching is not implemented
|
* caching is not implemented
|
||||||
*/
|
*/
|
||||||
|
if (!model?.['executor']) return [];
|
||||||
const res = tf.tidy(() => {
|
const res = tf.tidy(() => {
|
||||||
if (!model.inputs[0].shape) return [];
|
if (!model.inputs[0].shape) return [];
|
||||||
const resized = tf.image.resizeBilinear(input, [model.inputs[0].shape[2], model.inputs[0].shape[1]]);
|
const resized = tf.image.resizeBilinear(input, [model.inputs[0].shape[2], model.inputs[0].shape[1]]);
|
||||||
|
|
|
@ -286,6 +286,11 @@ export interface Config {
|
||||||
*/
|
*/
|
||||||
cacheSensitivity: number;
|
cacheSensitivity: number;
|
||||||
|
|
||||||
|
/** Software Kernels
|
||||||
|
* Registers software kernel ops running on CPU when accelerated version of kernel is not found in the current backend
|
||||||
|
*/
|
||||||
|
softwareKernels: boolean,
|
||||||
|
|
||||||
/** Perform immediate garbage collection on deallocated tensors instead of caching them */
|
/** Perform immediate garbage collection on deallocated tensors instead of caching them */
|
||||||
deallocate: boolean;
|
deallocate: boolean;
|
||||||
|
|
||||||
|
@ -328,6 +333,7 @@ const config: Config = {
|
||||||
cacheSensitivity: 0.70,
|
cacheSensitivity: 0.70,
|
||||||
skipAllowed: false,
|
skipAllowed: false,
|
||||||
deallocate: false,
|
deallocate: false,
|
||||||
|
softwareKernels: false,
|
||||||
filter: {
|
filter: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
equalization: false,
|
equalization: false,
|
||||||
|
|
|
@ -23,7 +23,7 @@ export async function load(config: Config): Promise<GraphModel> {
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function predict(image: Tensor, config: Config, idx: number, count: number): Promise<number> {
|
export async function predict(image: Tensor, config: Config, idx: number, count: number): Promise<number> {
|
||||||
if (!model) return 0;
|
if (!model || !model?.['executor']) return 0;
|
||||||
const skipTime = (config.face.antispoof?.skipTime || 0) > (now() - lastTime);
|
const skipTime = (config.face.antispoof?.skipTime || 0) > (now() - lastTime);
|
||||||
const skipFrame = skipped < (config.face.antispoof?.skipFrames || 0);
|
const skipFrame = skipped < (config.face.antispoof?.skipFrames || 0);
|
||||||
if (config.skipAllowed && skipTime && skipFrame && (lastCount === count) && cached[idx]) {
|
if (config.skipAllowed && skipTime && skipFrame && (lastCount === count) && cached[idx]) {
|
||||||
|
|
|
@ -28,7 +28,7 @@ export async function load(config: Config): Promise<GraphModel> {
|
||||||
if (env.initial) model = null;
|
if (env.initial) model = null;
|
||||||
if (!model) model = await loadModel(config.face.detector?.modelPath);
|
if (!model) model = await loadModel(config.face.detector?.modelPath);
|
||||||
else if (config.debug) log('cached model:', model['modelUrl']);
|
else if (config.debug) log('cached model:', model['modelUrl']);
|
||||||
inputSize = model.inputs[0].shape ? model.inputs[0].shape[2] : 0;
|
inputSize = (model['executor'] && model.inputs[0].shape) ? model.inputs[0].shape[2] : 256;
|
||||||
inputSizeT = tf.scalar(inputSize, 'int32') as Tensor;
|
inputSizeT = tf.scalar(inputSize, 'int32') as Tensor;
|
||||||
anchors = tf.tensor2d(util.generateAnchors(inputSize)) as Tensor;
|
anchors = tf.tensor2d(util.generateAnchors(inputSize)) as Tensor;
|
||||||
return model;
|
return model;
|
||||||
|
|
|
@ -33,6 +33,7 @@ let model: GraphModel | null = null;
|
||||||
let inputSize = 0;
|
let inputSize = 0;
|
||||||
|
|
||||||
export async function predict(input: Tensor, config: Config): Promise<FaceResult[]> {
|
export async function predict(input: Tensor, config: Config): Promise<FaceResult[]> {
|
||||||
|
if (!model?.['executor']) return [];
|
||||||
// reset cached boxes
|
// reset cached boxes
|
||||||
const skipTime = (config.face.detector?.skipTime || 0) > (now() - cache.timestamp);
|
const skipTime = (config.face.detector?.skipTime || 0) > (now() - cache.timestamp);
|
||||||
const skipFrame = cache.skipped < (config.face.detector?.skipFrames || 0);
|
const skipFrame = cache.skipped < (config.face.detector?.skipFrames || 0);
|
||||||
|
@ -120,7 +121,7 @@ export async function predict(input: Tensor, config: Config): Promise<FaceResult
|
||||||
if (config.face.attention?.enabled) {
|
if (config.face.attention?.enabled) {
|
||||||
rawCoords = await attention.augment(rawCoords, results); // augment iris results using attention model results
|
rawCoords = await attention.augment(rawCoords, results); // augment iris results using attention model results
|
||||||
} else if (config.face.iris?.enabled) {
|
} else if (config.face.iris?.enabled) {
|
||||||
rawCoords = await iris.augmentIris(rawCoords, face.tensor, config, inputSize); // run iris model and augment results
|
rawCoords = await iris.augmentIris(rawCoords, face.tensor, inputSize); // run iris model and augment results
|
||||||
}
|
}
|
||||||
face.mesh = util.transformRawCoords(rawCoords, box, angle, rotationMatrix, inputSize); // get processed mesh
|
face.mesh = util.transformRawCoords(rawCoords, box, angle, rotationMatrix, inputSize); // get processed mesh
|
||||||
face.meshRaw = face.mesh.map((pt) => [pt[0] / (input.shape[2] || 0), pt[1] / (input.shape[1] || 0), (pt[2] || 0) / size]);
|
face.meshRaw = face.mesh.map((pt) => [pt[0] / (input.shape[2] || 0), pt[1] / (input.shape[1] || 0), (pt[2] || 0) / size]);
|
||||||
|
@ -158,7 +159,7 @@ export async function load(config: Config): Promise<GraphModel> {
|
||||||
} else if (config.debug) {
|
} else if (config.debug) {
|
||||||
log('cached model:', model['modelUrl']);
|
log('cached model:', model['modelUrl']);
|
||||||
}
|
}
|
||||||
inputSize = model.inputs[0].shape ? model.inputs[0].shape[2] : 0;
|
inputSize = (model['executor'] && model?.inputs?.[0].shape) ? model?.inputs?.[0].shape[2] : 256;
|
||||||
return model;
|
return model;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -169,7 +169,7 @@ export function correctFaceRotation(rotate, box, input, inputSize) {
|
||||||
let rotationMatrix = fixedRotationMatrix; // default
|
let rotationMatrix = fixedRotationMatrix; // default
|
||||||
let face; // default
|
let face; // default
|
||||||
|
|
||||||
if (rotate && env.kernels.includes('rotatewithoffset')) { // rotateWithOffset is not defined for tfjs-node
|
if (rotate && env.kernels.includes('rotatewithoffset')) {
|
||||||
angle = computeRotation(box.landmarks[symmetryLine[0]], box.landmarks[symmetryLine[1]]);
|
angle = computeRotation(box.landmarks[symmetryLine[0]], box.landmarks[symmetryLine[1]]);
|
||||||
const largeAngle = angle && (angle !== 0) && (Math.abs(angle) > 0.2);
|
const largeAngle = angle && (angle !== 0) && (Math.abs(angle) > 0.2);
|
||||||
if (largeAngle) { // perform rotation only if angle is sufficiently high
|
if (largeAngle) { // perform rotation only if angle is sufficiently high
|
||||||
|
|
|
@ -64,7 +64,7 @@ export function enhance(input): Tensor {
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function predict(image: Tensor, config: Config, idx: number, count: number): Promise<FaceRes> {
|
export async function predict(image: Tensor, config: Config, idx: number, count: number): Promise<FaceRes> {
|
||||||
if (!model) return { age: 0, gender: 'unknown', genderScore: 0, descriptor: [] };
|
if (!model?.['executor']) return { age: 0, gender: 'unknown', genderScore: 0, descriptor: [] };
|
||||||
const skipFrame = skipped < (config.face.description?.skipFrames || 0);
|
const skipFrame = skipped < (config.face.description?.skipFrames || 0);
|
||||||
const skipTime = (config.face.description?.skipTime || 0) > (now() - lastTime);
|
const skipTime = (config.face.description?.skipTime || 0) > (now() - lastTime);
|
||||||
if (config.skipAllowed && skipFrame && skipTime && (lastCount === count) && last[idx]?.age && (last[idx]?.age > 0)) {
|
if (config.skipAllowed && skipFrame && skipTime && (lastCount === count) && last[idx]?.age && (last[idx]?.age > 0)) {
|
||||||
|
|
|
@ -27,7 +27,7 @@ export async function load(config: Config): Promise<GraphModel> {
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function predict(input: Tensor, config: Config, idx, count): Promise<number[]> {
|
export async function predict(input: Tensor, config: Config, idx, count): Promise<number[]> {
|
||||||
if (!model) return [];
|
if (!model?.['executor']) return [];
|
||||||
const skipFrame = skipped < (config.face['insightface']?.skipFrames || 0);
|
const skipFrame = skipped < (config.face['insightface']?.skipFrames || 0);
|
||||||
const skipTime = (config.face['insightface']?.skipTime || 0) > (now() - lastTime);
|
const skipTime = (config.face['insightface']?.skipTime || 0) > (now() - lastTime);
|
||||||
if (config.skipAllowed && skipTime && skipFrame && (lastCount === count) && last[idx]) {
|
if (config.skipAllowed && skipTime && skipFrame && (lastCount === count) && last[idx]) {
|
||||||
|
|
|
@ -32,7 +32,7 @@ export async function load(config: Config): Promise<GraphModel> {
|
||||||
if (env.initial) model = null;
|
if (env.initial) model = null;
|
||||||
if (!model) model = await loadModel(config.face.iris?.modelPath);
|
if (!model) model = await loadModel(config.face.iris?.modelPath);
|
||||||
else if (config.debug) log('cached model:', model['modelUrl']);
|
else if (config.debug) log('cached model:', model['modelUrl']);
|
||||||
inputSize = model.inputs[0].shape ? model.inputs[0].shape[2] : 0;
|
inputSize = (model?.['executor'] && model.inputs?.[0].shape) ? model.inputs[0].shape[2] : 0;
|
||||||
if (inputSize === -1) inputSize = 64;
|
if (inputSize === -1) inputSize = 64;
|
||||||
return model;
|
return model;
|
||||||
}
|
}
|
||||||
|
@ -110,11 +110,8 @@ export const getAdjustedIrisCoords = (rawCoords, irisCoords, direction) => {
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
export async function augmentIris(rawCoords, face, config, meshSize) {
|
export async function augmentIris(rawCoords, face, meshSize) {
|
||||||
if (!model) {
|
if (!model?.['executor']) return rawCoords;
|
||||||
if (config.debug) log('face mesh iris detection requested, but model is not loaded');
|
|
||||||
return rawCoords;
|
|
||||||
}
|
|
||||||
const { box: leftEyeBox, boxSize: leftEyeBoxSize, crop: leftEyeCrop } = getEyeBox(rawCoords, face, eyeLandmarks.leftBounds[0], eyeLandmarks.leftBounds[1], meshSize, true);
|
const { box: leftEyeBox, boxSize: leftEyeBoxSize, crop: leftEyeCrop } = getEyeBox(rawCoords, face, eyeLandmarks.leftBounds[0], eyeLandmarks.leftBounds[1], meshSize, true);
|
||||||
const { box: rightEyeBox, boxSize: rightEyeBoxSize, crop: rightEyeCrop } = getEyeBox(rawCoords, face, eyeLandmarks.rightBounds[0], eyeLandmarks.rightBounds[1], meshSize, true);
|
const { box: rightEyeBox, boxSize: rightEyeBoxSize, crop: rightEyeCrop } = getEyeBox(rawCoords, face, eyeLandmarks.rightBounds[0], eyeLandmarks.rightBounds[1], meshSize, true);
|
||||||
const combined = tf.concat([leftEyeCrop, rightEyeCrop]);
|
const combined = tf.concat([leftEyeCrop, rightEyeCrop]);
|
||||||
|
|
|
@ -23,7 +23,7 @@ export async function load(config: Config): Promise<GraphModel> {
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function predict(image: Tensor, config: Config, idx: number, count: number): Promise<number> {
|
export async function predict(image: Tensor, config: Config, idx: number, count: number): Promise<number> {
|
||||||
if (!model) return 0;
|
if (!model?.['executor']) return 0;
|
||||||
const skipTime = (config.face.liveness?.skipTime || 0) > (now() - lastTime);
|
const skipTime = (config.face.liveness?.skipTime || 0) > (now() - lastTime);
|
||||||
const skipFrame = skipped < (config.face.liveness?.skipFrames || 0);
|
const skipFrame = skipped < (config.face.liveness?.skipFrames || 0);
|
||||||
if (config.skipAllowed && skipTime && skipFrame && (lastCount === count) && cached[idx]) {
|
if (config.skipAllowed && skipTime && skipFrame && (lastCount === count) && cached[idx]) {
|
||||||
|
|
|
@ -45,7 +45,7 @@ const contrast = merge.sub(mean).mul(factor).add(mean);
|
||||||
*/
|
*/
|
||||||
|
|
||||||
export async function predict(input: Tensor, config: Config, idx, count): Promise<number[]> {
|
export async function predict(input: Tensor, config: Config, idx, count): Promise<number[]> {
|
||||||
if (!model) return [];
|
if (!model?.['executor']) return [];
|
||||||
const skipFrame = skipped < (config.face['mobilefacenet']?.skipFrames || 0);
|
const skipFrame = skipped < (config.face['mobilefacenet']?.skipFrames || 0);
|
||||||
const skipTime = (config.face['mobilefacenet']?.skipTime || 0) > (now() - lastTime);
|
const skipTime = (config.face['mobilefacenet']?.skipTime || 0) > (now() - lastTime);
|
||||||
if (config.skipAllowed && skipTime && skipFrame && (lastCount === count) && last[idx]) {
|
if (config.skipAllowed && skipTime && skipFrame && (lastCount === count) && last[idx]) {
|
||||||
|
|
|
@ -76,7 +76,7 @@ export async function loadDetect(config: Config): Promise<GraphModel> {
|
||||||
// ideally need to prune the model itself
|
// ideally need to prune the model itself
|
||||||
fakeOps(['tensorlistreserve', 'enter', 'tensorlistfromtensor', 'merge', 'loopcond', 'switch', 'exit', 'tensorliststack', 'nextiteration', 'tensorlistsetitem', 'tensorlistgetitem', 'reciprocal', 'shape', 'split', 'where'], config);
|
fakeOps(['tensorlistreserve', 'enter', 'tensorlistfromtensor', 'merge', 'loopcond', 'switch', 'exit', 'tensorliststack', 'nextiteration', 'tensorlistsetitem', 'tensorlistgetitem', 'reciprocal', 'shape', 'split', 'where'], config);
|
||||||
models[0] = await loadModel(config.hand.detector?.modelPath);
|
models[0] = await loadModel(config.hand.detector?.modelPath);
|
||||||
const inputs = Object.values(models[0].modelSignature['inputs']);
|
const inputs = models[0]['executor'] ? Object.values(models[0].modelSignature['inputs']) : undefined;
|
||||||
inputSize[0][0] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[1].size) : 0;
|
inputSize[0][0] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[1].size) : 0;
|
||||||
inputSize[0][1] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : 0;
|
inputSize[0][1] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : 0;
|
||||||
} else if (config.debug) log('cached model:', models[0]['modelUrl']);
|
} else if (config.debug) log('cached model:', models[0]['modelUrl']);
|
||||||
|
@ -87,7 +87,7 @@ export async function loadSkeleton(config: Config): Promise<GraphModel> {
|
||||||
if (env.initial) models[1] = null;
|
if (env.initial) models[1] = null;
|
||||||
if (!models[1]) {
|
if (!models[1]) {
|
||||||
models[1] = await loadModel(config.hand.skeleton?.modelPath);
|
models[1] = await loadModel(config.hand.skeleton?.modelPath);
|
||||||
const inputs = Object.values(models[1].modelSignature['inputs']);
|
const inputs = models[1]['executor'] ? Object.values(models[1].modelSignature['inputs']) : undefined;
|
||||||
inputSize[1][0] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[1].size) : 0;
|
inputSize[1][0] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[1].size) : 0;
|
||||||
inputSize[1][1] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : 0;
|
inputSize[1][1] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : 0;
|
||||||
} else if (config.debug) log('cached model:', models[1]['modelUrl']);
|
} else if (config.debug) log('cached model:', models[1]['modelUrl']);
|
||||||
|
@ -182,7 +182,7 @@ async function detectFingers(input: Tensor, h: HandDetectResult, config: Config)
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function predict(input: Tensor, config: Config): Promise<HandResult[]> {
|
export async function predict(input: Tensor, config: Config): Promise<HandResult[]> {
|
||||||
if (!models[0] || !models[1] || !models[0].inputs[0].shape || !models[1].inputs[0].shape) return []; // something is wrong with the model
|
if (!models[0]?.['executor'] || !models[1]?.['executor'] || !models[0].inputs[0].shape || !models[1].inputs[0].shape) return []; // something is wrong with the model
|
||||||
outputSize = [input.shape[2] || 0, input.shape[1] || 0];
|
outputSize = [input.shape[2] || 0, input.shape[1] || 0];
|
||||||
skipped++; // increment skip frames
|
skipped++; // increment skip frames
|
||||||
const skipTime = (config.hand.skipTime || 0) > (now() - lastTime);
|
const skipTime = (config.hand.skipTime || 0) > (now() - lastTime);
|
||||||
|
|
|
@ -164,7 +164,9 @@ export function validateModel(newInstance: Human | null, model: GraphModel | nul
|
||||||
if (!ops.includes(op)) ops.push(op);
|
if (!ops.includes(op)) ops.push(op);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (!executor && instance.config.debug) log('model signature not determined:', name);
|
if (!executor && instance.config.debug) {
|
||||||
|
log('model not loaded', name);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
for (const op of ops) {
|
for (const op of ops) {
|
||||||
if (!simpleOps.includes(op) // exclude simple ops
|
if (!simpleOps.includes(op) // exclude simple ops
|
||||||
|
|
|
@ -24,7 +24,7 @@ export async function load(config: Config): Promise<GraphModel> {
|
||||||
if (!model) {
|
if (!model) {
|
||||||
// fakeOps(['floormod'], config);
|
// fakeOps(['floormod'], config);
|
||||||
model = await loadModel(config.object.modelPath);
|
model = await loadModel(config.object.modelPath);
|
||||||
const inputs = Object.values(model.modelSignature['inputs']);
|
const inputs = model?.['executor'] ? Object.values(model.modelSignature['inputs']) : undefined;
|
||||||
inputSize = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : 0;
|
inputSize = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : 0;
|
||||||
} else if (config.debug) log('cached model:', model['modelUrl']);
|
} else if (config.debug) log('cached model:', model['modelUrl']);
|
||||||
return model;
|
return model;
|
||||||
|
@ -72,6 +72,7 @@ async function process(res: Tensor | null, outputShape: [number, number], config
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function predict(input: Tensor, config: Config): Promise<ObjectResult[]> {
|
export async function predict(input: Tensor, config: Config): Promise<ObjectResult[]> {
|
||||||
|
if (!model?.['executor']) return [];
|
||||||
const skipTime = (config.object.skipTime || 0) > (now() - lastTime);
|
const skipTime = (config.object.skipTime || 0) > (now() - lastTime);
|
||||||
const skipFrame = skipped < (config.object.skipFrames || 0);
|
const skipFrame = skipped < (config.object.skipFrames || 0);
|
||||||
if (config.skipAllowed && skipTime && skipFrame && (last.length > 0)) {
|
if (config.skipAllowed && skipTime && skipFrame && (last.length > 0)) {
|
||||||
|
|
|
@ -25,8 +25,8 @@ const scaleBox = 2.5; // increase box size
|
||||||
export async function load(config: Config): Promise<GraphModel> {
|
export async function load(config: Config): Promise<GraphModel> {
|
||||||
if (!model || env.initial) {
|
if (!model || env.initial) {
|
||||||
model = await loadModel(config.object.modelPath);
|
model = await loadModel(config.object.modelPath);
|
||||||
const inputs = Object.values(model.modelSignature['inputs']);
|
const inputs = model?.['executor'] ? Object.values(model.modelSignature['inputs']) : undefined;
|
||||||
inputSize = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : 0;
|
inputSize = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : 416;
|
||||||
} else if (config.debug) log('cached model:', model['modelUrl']);
|
} else if (config.debug) log('cached model:', model['modelUrl']);
|
||||||
return model;
|
return model;
|
||||||
}
|
}
|
||||||
|
@ -106,6 +106,7 @@ async function process(res: Tensor[], outputShape: [number, number], config: Con
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function predict(image: Tensor, config: Config): Promise<ObjectResult[]> {
|
export async function predict(image: Tensor, config: Config): Promise<ObjectResult[]> {
|
||||||
|
if (!model?.['executor']) return [];
|
||||||
const skipTime = (config.object.skipTime || 0) > (now() - lastTime);
|
const skipTime = (config.object.skipTime || 0) > (now() - lastTime);
|
||||||
const skipFrame = skipped < (config.object.skipFrames || 0);
|
const skipFrame = skipped < (config.object.skipFrames || 0);
|
||||||
if (config.skipAllowed && skipTime && skipFrame && (last.length > 0)) {
|
if (config.skipAllowed && skipTime && skipFrame && (last.length > 0)) {
|
||||||
|
|
|
@ -1,31 +1,68 @@
|
||||||
/** TFJS backend initialization and customization */
|
/** TFJS backend initialization and customization */
|
||||||
|
|
||||||
import type { Human } from '../human';
|
import type { Human, Config } from '../human';
|
||||||
import { log, now } from '../util/util';
|
import { log, now } from '../util/util';
|
||||||
import { env } from '../util/env';
|
import { env } from '../util/env';
|
||||||
import * as humangl from './humangl';
|
import * as humangl from './humangl';
|
||||||
import * as tf from '../../dist/tfjs.esm.js';
|
import * as tf from '../../dist/tfjs.esm.js';
|
||||||
import * as constants from './constants';
|
import * as constants from './constants';
|
||||||
|
|
||||||
function registerCustomOps() {
|
function registerCustomOps(config: Config) {
|
||||||
if (!env.kernels.includes('mod')) {
|
if (!env.kernels.includes('mod')) {
|
||||||
const kernelMod = {
|
const kernelMod = {
|
||||||
kernelName: 'Mod',
|
kernelName: 'Mod',
|
||||||
backendName: tf.getBackend(),
|
backendName: tf.getBackend(),
|
||||||
kernelFunc: (op) => tf.tidy(() => tf.sub(op.inputs.a, tf.mul(tf.div(op.inputs.a, op.inputs.b), op.inputs.b))),
|
kernelFunc: (op) => tf.tidy(() => tf.sub(op.inputs.a, tf.mul(tf.div(op.inputs.a, op.inputs.b), op.inputs.b))),
|
||||||
};
|
};
|
||||||
|
if (config.debug) log('registered kernel:', 'Mod');
|
||||||
tf.registerKernel(kernelMod);
|
tf.registerKernel(kernelMod);
|
||||||
env.kernels.push('mod');
|
env.kernels.push('mod');
|
||||||
}
|
}
|
||||||
if (!env.kernels.includes('floormod')) {
|
if (!env.kernels.includes('floormod')) {
|
||||||
const kernelMod = {
|
const kernelFloorMod = {
|
||||||
kernelName: 'FloorMod',
|
kernelName: 'FloorMod',
|
||||||
backendName: tf.getBackend(),
|
backendName: tf.getBackend(),
|
||||||
kernelFunc: (op) => tf.tidy(() => tf.add(tf.mul(tf.floorDiv(op.inputs.a / op.inputs.b), op.inputs.b), tf.mod(op.inputs.a, op.inputs.b))),
|
kernelFunc: (op) => tf.tidy(() => tf.add(tf.mul(tf.floorDiv(op.inputs.a / op.inputs.b), op.inputs.b), tf.mod(op.inputs.a, op.inputs.b))),
|
||||||
};
|
};
|
||||||
tf.registerKernel(kernelMod);
|
if (config.debug) log('registered kernel:', 'FloorMod');
|
||||||
|
tf.registerKernel(kernelFloorMod);
|
||||||
env.kernels.push('floormod');
|
env.kernels.push('floormod');
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
if (!env.kernels.includes('atan2') && config.softwareKernels) {
|
||||||
|
const kernelAtan2 = {
|
||||||
|
kernelName: 'Atan2',
|
||||||
|
backendName: tf.getBackend(),
|
||||||
|
kernelFunc: (op) => tf.tidy(() => {
|
||||||
|
const backend = tf.getBackend();
|
||||||
|
tf.setBackend('cpu');
|
||||||
|
const t = tf.atan2(op.inputs.a, op.inputs.b);
|
||||||
|
tf.setBackend(backend);
|
||||||
|
return t;
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
if (config.debug) log('registered kernel:', 'atan2');
|
||||||
|
log('registered kernel:', 'atan2');
|
||||||
|
tf.registerKernel(kernelAtan2);
|
||||||
|
env.kernels.push('atan2');
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
if (!env.kernels.includes('rotatewithoffset') && config.softwareKernels) {
|
||||||
|
const kernelRotateWithOffset = {
|
||||||
|
kernelName: 'RotateWithOffset',
|
||||||
|
backendName: tf.getBackend(),
|
||||||
|
kernelFunc: (op) => tf.tidy(() => {
|
||||||
|
const backend = tf.getBackend();
|
||||||
|
tf.setBackend('cpu');
|
||||||
|
const t = tf.image.rotateWithOffset(op.inputs.image, op.attrs.radians, op.attrs.fillValue, op.attrs.center);
|
||||||
|
tf.setBackend(backend);
|
||||||
|
return t;
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
if (config.debug) log('registered kernel:', 'RotateWithOffset');
|
||||||
|
tf.registerKernel(kernelRotateWithOffset);
|
||||||
|
env.kernels.push('rotatewithoffset');
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function check(instance: Human, force = false) {
|
export async function check(instance: Human, force = false) {
|
||||||
|
@ -146,7 +183,7 @@ export async function check(instance: Human, force = false) {
|
||||||
instance.config.backend = tf.getBackend();
|
instance.config.backend = tf.getBackend();
|
||||||
|
|
||||||
await env.updateBackend(); // update env on backend init
|
await env.updateBackend(); // update env on backend init
|
||||||
registerCustomOps();
|
registerCustomOps(instance.config);
|
||||||
// await env.updateBackend(); // update env on backend init
|
// await env.updateBackend(); // update env on backend init
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -7,9 +7,6 @@ Not required for normal funcioning of library
|
||||||
### NodeJS using TensorFlow library
|
### NodeJS using TensorFlow library
|
||||||
|
|
||||||
- Image filters are disabled due to lack of Canvas and WebGL access
|
- Image filters are disabled due to lack of Canvas and WebGL access
|
||||||
- Face rotation is disabled for `NodeJS` platform:
|
|
||||||
`Kernel 'RotateWithOffset' not registered for backend 'tensorflow'`
|
|
||||||
<https://github.com/tensorflow/tfjs/issues/4606>
|
|
||||||
|
|
||||||
### NodeJS using WASM
|
### NodeJS using WASM
|
||||||
|
|
||||||
|
|
1802
test/build.log
1802
test/build.log
File diff suppressed because it is too large
Load Diff
10
test/node.js
10
test/node.js
|
@ -8,10 +8,12 @@ let logFile = 'test.log';
|
||||||
log.configure({ inspect: { breakLength: 350 } });
|
log.configure({ inspect: { breakLength: 350 } });
|
||||||
|
|
||||||
const tests = [
|
const tests = [
|
||||||
'test-node.js',
|
'test-node-load.js',
|
||||||
'test-node-gpu.js',
|
'test-node-gear.js',
|
||||||
'test-node-wasm.js',
|
'test-backend-node.js',
|
||||||
// 'test-node-cpu.js',
|
'test-backend-node-gpu.js',
|
||||||
|
'test-backend-node-wasm.js',
|
||||||
|
// 'test-backend-node-cpu.js',
|
||||||
];
|
];
|
||||||
|
|
||||||
const demos = [
|
const demos = [
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
process.env.TF_CPP_MIN_LOG_LEVEL = '2';
|
process.env.TF_CPP_MIN_LOG_LEVEL = '2';
|
||||||
const Human = require('../dist/human.node.js').default;
|
const H = require('../dist/human.node.js');
|
||||||
const test = require('./test-main.js').test;
|
const test = require('./test-node-main.js').test;
|
||||||
|
|
||||||
const config = {
|
const config = {
|
||||||
cacheSensitivity: 0,
|
cacheSensitivity: 0,
|
||||||
|
@ -10,7 +10,7 @@ const config = {
|
||||||
async: true,
|
async: true,
|
||||||
face: {
|
face: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
detector: { rotation: true },
|
detector: { rotation: false },
|
||||||
mesh: { enabled: true },
|
mesh: { enabled: true },
|
||||||
iris: { enabled: true },
|
iris: { enabled: true },
|
||||||
description: { enabled: true },
|
description: { enabled: true },
|
||||||
|
@ -25,4 +25,8 @@ const config = {
|
||||||
filter: { enabled: false },
|
filter: { enabled: false },
|
||||||
};
|
};
|
||||||
|
|
||||||
test(Human, config);
|
async function main() {
|
||||||
|
test(H.Human, config);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (require.main === module) main();
|
|
@ -1,6 +1,6 @@
|
||||||
process.env.TF_CPP_MIN_LOG_LEVEL = '2';
|
process.env.TF_CPP_MIN_LOG_LEVEL = '2';
|
||||||
const H = require('../dist/human.node-gpu.js');
|
const H = require('../dist/human.node-gpu.js');
|
||||||
const test = require('./test-main.js').test;
|
const test = require('./test-node-main.js').test;
|
||||||
|
|
||||||
const config = {
|
const config = {
|
||||||
cacheSensitivity: 0,
|
cacheSensitivity: 0,
|
||||||
|
@ -10,7 +10,7 @@ const config = {
|
||||||
async: true,
|
async: true,
|
||||||
face: {
|
face: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
detector: { rotation: true },
|
detector: { rotation: false },
|
||||||
mesh: { enabled: true },
|
mesh: { enabled: true },
|
||||||
iris: { enabled: true },
|
iris: { enabled: true },
|
||||||
description: { enabled: true },
|
description: { enabled: true },
|
||||||
|
@ -29,4 +29,4 @@ async function main() {
|
||||||
test(H.Human, config);
|
test(H.Human, config);
|
||||||
}
|
}
|
||||||
|
|
||||||
main();
|
if (require.main === module) main();
|
|
@ -3,7 +3,7 @@ const tf = require('@tensorflow/tfjs'); // wasm backend requires tfjs to be load
|
||||||
const wasm = require('@tensorflow/tfjs-backend-wasm'); // wasm backend does not get auto-loaded in nodejs
|
const wasm = require('@tensorflow/tfjs-backend-wasm'); // wasm backend does not get auto-loaded in nodejs
|
||||||
const { Canvas, Image } = require('canvas'); // eslint-disable-line node/no-extraneous-require, node/no-missing-require
|
const { Canvas, Image } = require('canvas'); // eslint-disable-line node/no-extraneous-require, node/no-missing-require
|
||||||
const H = require('../dist/human.node-wasm.js');
|
const H = require('../dist/human.node-wasm.js');
|
||||||
const test = require('./test-main.js').test;
|
const test = require('./test-node-main.js').test;
|
||||||
|
|
||||||
H.env.Canvas = Canvas; // requires monkey-patch as wasm does not have tf.browser namespace
|
H.env.Canvas = Canvas; // requires monkey-patch as wasm does not have tf.browser namespace
|
||||||
H.env.Image = Image; // requires monkey-patch as wasm does not have tf.browser namespace
|
H.env.Image = Image; // requires monkey-patch as wasm does not have tf.browser namespace
|
||||||
|
@ -16,6 +16,7 @@ const config = {
|
||||||
wasmPath: `https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@${tf.version_core}/dist/`,
|
wasmPath: `https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@${tf.version_core}/dist/`,
|
||||||
debug: false,
|
debug: false,
|
||||||
async: false,
|
async: false,
|
||||||
|
softwareKernels: true,
|
||||||
face: {
|
face: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
detector: { rotation: false },
|
detector: { rotation: false },
|
||||||
|
@ -42,4 +43,4 @@ async function main() {
|
||||||
test(H.Human, config);
|
test(H.Human, config);
|
||||||
}
|
}
|
||||||
|
|
||||||
main();
|
if (require.main === module) main();
|
|
@ -1,7 +1,7 @@
|
||||||
process.env.TF_CPP_MIN_LOG_LEVEL = '2';
|
process.env.TF_CPP_MIN_LOG_LEVEL = '2';
|
||||||
|
|
||||||
const H = require('../dist/human.node.js');
|
const H = require('../dist/human.node.js');
|
||||||
const test = require('./test-main.js').test;
|
const test = require('./test-node-main.js').test;
|
||||||
|
|
||||||
const config = {
|
const config = {
|
||||||
cacheSensitivity: 0,
|
cacheSensitivity: 0,
|
||||||
|
@ -11,7 +11,7 @@ const config = {
|
||||||
async: true,
|
async: true,
|
||||||
face: {
|
face: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
detector: { rotation: true },
|
detector: { rotation: false },
|
||||||
mesh: { enabled: true },
|
mesh: { enabled: true },
|
||||||
iris: { enabled: true },
|
iris: { enabled: true },
|
||||||
description: { enabled: true },
|
description: { enabled: true },
|
||||||
|
@ -30,4 +30,4 @@ async function main() {
|
||||||
test(H.Human, config);
|
test(H.Human, config);
|
||||||
}
|
}
|
||||||
|
|
||||||
main();
|
if (require.main === module) main();
|
|
@ -1,19 +0,0 @@
|
||||||
const tf = require('@tensorflow/tfjs-node'); // in nodejs environments tfjs-node is required to be loaded before human
|
|
||||||
const Human = require('../dist/human.node.js'); // use this when using human in dev mode
|
|
||||||
|
|
||||||
async function main() {
|
|
||||||
const log = (...msg) => console.log(...msg); // eslint-disable-line no-console
|
|
||||||
const human = new Human.Human(); // create instance of human using default configuration
|
|
||||||
const startTime = new Date();
|
|
||||||
log('start', { human: human.version, tf: tf.version_core, progress: human.getModelStats().percentageLoaded });
|
|
||||||
setInterval(() => log('interval', { elapsed: new Date() - startTime, progress: human.getModelStats().percentageLoaded }));
|
|
||||||
const loadPromise = human.load();
|
|
||||||
loadPromise
|
|
||||||
.then(() => log('resolved', { progress: human.getModelStats().percentageLoaded }))
|
|
||||||
.catch(() => log('error'));
|
|
||||||
await loadPromise;
|
|
||||||
log('final', { progress: human.getModelStats().percentageLoaded });
|
|
||||||
await human.warmup(); // optional as model warmup is performed on-demand first time its executed
|
|
||||||
}
|
|
||||||
|
|
||||||
main();
|
|
|
@ -1,15 +1,19 @@
|
||||||
require('@tensorflow/tfjs-node');
|
require('@tensorflow/tfjs-node');
|
||||||
const fs = require('fs');
|
const fs = require('fs');
|
||||||
const path = require('path');
|
const path = require('path');
|
||||||
const log = require('@vladmandic/pilogger');
|
|
||||||
const Human = require('../dist/human.node.js').default;
|
const Human = require('../dist/human.node.js').default;
|
||||||
|
|
||||||
|
const log = (status, ...data) => {
|
||||||
|
if (typeof process.send !== 'undefined') process.send([status, data]); // send to parent process over ipc
|
||||||
|
else console.log(status, ...data); // eslint-disable-line no-console
|
||||||
|
};
|
||||||
|
|
||||||
process.env.TF_CPP_MIN_LOG_LEVEL = '2';
|
process.env.TF_CPP_MIN_LOG_LEVEL = '2';
|
||||||
const humanConfig = {
|
const humanConfig = {
|
||||||
backend: 'tensorflow',
|
backend: 'tensorflow',
|
||||||
face: {
|
face: {
|
||||||
detector: { enabled: true, modelPath: 'file://../human-models/models/blazeface-back.json', cropFactor: 1.6 },
|
detector: { enabled: true, modelPath: 'file://../human-models/models/blazeface-back.json', cropFactor: 1.6 },
|
||||||
mesh: { enabled: false },
|
mesh: { enabled: true },
|
||||||
iris: { enabled: false },
|
iris: { enabled: false },
|
||||||
description: { enabled: true, modelPath: 'file://../human-models/models/faceres.json' },
|
description: { enabled: true, modelPath: 'file://../human-models/models/faceres.json' },
|
||||||
gear: { enabled: true, modelPath: 'file://../human-models/models/gear.json' },
|
gear: { enabled: true, modelPath: 'file://../human-models/models/gear.json' },
|
||||||
|
@ -29,47 +33,63 @@ function getImageTensor(imageFile) {
|
||||||
const buffer = fs.readFileSync(imageFile);
|
const buffer = fs.readFileSync(imageFile);
|
||||||
tensor = human.tf.node.decodeImage(buffer, 3);
|
tensor = human.tf.node.decodeImage(buffer, 3);
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
log.warn(`error loading image: ${imageFile}: ${e.message}`);
|
log('error', `failed: loading image ${imageFile}: ${e.message}`);
|
||||||
}
|
}
|
||||||
return tensor;
|
return tensor;
|
||||||
}
|
}
|
||||||
|
|
||||||
function printResult(obj) {
|
function printResult(obj) {
|
||||||
if (!obj || !obj.res || !obj.res.face || obj.res.face.length === 0) log.warn('no faces detected');
|
if (!obj || !obj.res || !obj.res.face || obj.res.face.length === 0) log('warn', 'failed: no faces detected');
|
||||||
else obj.res.face.forEach((face, i) => log.data({ face: i, model: obj.model, image: obj.image, age: face.age, gender: face.gender, genderScore: face.genderScore, race: face.race }));
|
else obj.res.face.forEach((face, i) => log('data', 'results', { face: i, model: obj.model, image: obj.image, age: face.age, gender: face.gender, genderScore: face.genderScore, race: face.race }));
|
||||||
}
|
}
|
||||||
|
|
||||||
async function main() {
|
async function main() {
|
||||||
log.header();
|
const inputs = process.argv.length === 3 ? process.argv[2] : 'samples/in/ai-face.jpg';
|
||||||
if (process.argv.length !== 3) throw new Error('parameters: <input-image> or <input-folder> missing');
|
if (!fs.existsSync(inputs)) throw new Error(`file not found: ${inputs}`);
|
||||||
if (!fs.existsSync(process.argv[2])) throw new Error(`file not found: ${process.argv[2]}`);
|
const stat = fs.statSync(inputs);
|
||||||
const stat = fs.statSync(process.argv[2]);
|
|
||||||
const files = [];
|
const files = [];
|
||||||
if (stat.isFile()) files.push(process.argv[2]);
|
if (stat.isFile()) files.push(inputs);
|
||||||
else if (stat.isDirectory()) fs.readdirSync(process.argv[2]).forEach((f) => files.push(path.join(process.argv[2], f)));
|
else if (stat.isDirectory()) fs.readdirSync(inputs).forEach((f) => files.push(path.join(inputs, f)));
|
||||||
log.data('input:', files);
|
log('data', 'input:', files);
|
||||||
await human.load();
|
await human.load();
|
||||||
let res;
|
let res;
|
||||||
for (const f of files) {
|
for (const f of files) {
|
||||||
const tensor = getImageTensor(f);
|
const tensor = getImageTensor(f);
|
||||||
if (!tensor) continue;
|
if (!tensor) continue;
|
||||||
|
let msg = {};
|
||||||
|
|
||||||
human.config.face.description.enabled = true;
|
human.config.face.description.enabled = true;
|
||||||
human.config.face.gear.enabled = false;
|
human.config.face.gear.enabled = false;
|
||||||
human.config.face.ssrnet.enabled = false;
|
human.config.face.ssrnet.enabled = false;
|
||||||
res = await human.detect(tensor);
|
res = await human.detect(tensor);
|
||||||
printResult({ model: 'faceres', image: f, res });
|
|
||||||
|
msg = { model: 'faceres', image: f, res };
|
||||||
|
if (res?.face?.[0].age > 20 && res?.face?.[0].age < 30) log('state', 'passed: gear', msg.model, msg.image);
|
||||||
|
else log('error', 'failed: gear', msg);
|
||||||
|
printResult(msg);
|
||||||
|
|
||||||
human.config.face.description.enabled = false;
|
human.config.face.description.enabled = false;
|
||||||
human.config.face.gear.enabled = true;
|
human.config.face.gear.enabled = true;
|
||||||
human.config.face.ssrnet.enabled = false;
|
human.config.face.ssrnet.enabled = false;
|
||||||
res = await human.detect(tensor);
|
res = await human.detect(tensor);
|
||||||
printResult({ model: 'gear', image: f, res });
|
msg = { model: 'gear', image: f, res };
|
||||||
|
if (res?.face?.[0].age > 20 && res?.face?.[0].age < 30) log('state', 'passed: gear', msg.model, msg.image);
|
||||||
|
else log('error', 'failed: gear', msg);
|
||||||
|
printResult(msg);
|
||||||
|
|
||||||
human.config.face.description.enabled = false;
|
human.config.face.description.enabled = false;
|
||||||
human.config.face.gear.enabled = false;
|
human.config.face.gear.enabled = false;
|
||||||
human.config.face.ssrnet.enabled = true;
|
human.config.face.ssrnet.enabled = true;
|
||||||
res = await human.detect(tensor);
|
res = await human.detect(tensor);
|
||||||
printResult({ model: 'ssrnet', image: f, res });
|
msg = { model: 'ssrnet', image: f, res };
|
||||||
|
if (res?.face?.[0].age > 20 && res?.face?.[0].age < 30) log('state', 'passed: gear', msg.model, msg.image);
|
||||||
|
else log('error', 'failed: gear', msg);
|
||||||
|
printResult(msg);
|
||||||
|
|
||||||
human.tf.dispose(tensor);
|
human.tf.dispose(tensor);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
main();
|
exports.test = main;
|
||||||
|
|
||||||
|
if (require.main === module) main();
|
|
@ -0,0 +1,33 @@
|
||||||
|
const tf = require('@tensorflow/tfjs-node'); // in nodejs environments tfjs-node is required to be loaded before human
|
||||||
|
const Human = require('../dist/human.node.js'); // use this when using human in dev mode
|
||||||
|
|
||||||
|
const log = (status, ...data) => {
|
||||||
|
if (typeof process.send !== 'undefined') process.send([status, data]); // send to parent process over ipc
|
||||||
|
else console.log(status, ...data); // eslint-disable-line no-console
|
||||||
|
};
|
||||||
|
|
||||||
|
async function main() {
|
||||||
|
const human = new Human.Human(); // create instance of human using default configuration
|
||||||
|
const startTime = new Date();
|
||||||
|
log('info', 'load start', { human: human.version, tf: tf.version_core, progress: human.getModelStats().percentageLoaded });
|
||||||
|
|
||||||
|
async function monitor() {
|
||||||
|
const progress = human.getModelStats().percentageLoaded;
|
||||||
|
log('data', 'load interval', { elapsed: new Date() - startTime, progress });
|
||||||
|
if (progress < 1) setTimeout(monitor, 10);
|
||||||
|
}
|
||||||
|
|
||||||
|
monitor();
|
||||||
|
// setInterval(() => log('interval', { elapsed: new Date() - startTime, progress: human.getModelStats().percentageLoaded }));
|
||||||
|
const loadPromise = human.load();
|
||||||
|
loadPromise
|
||||||
|
.then(() => log('state', 'passed', { progress: human.getModelStats().percentageLoaded }))
|
||||||
|
.catch(() => log('error', 'load promise'));
|
||||||
|
await loadPromise;
|
||||||
|
log('info', 'load final', { progress: human.getModelStats().percentageLoaded });
|
||||||
|
await human.warmup(); // optional as model warmup is performed on-demand first time its executed
|
||||||
|
}
|
||||||
|
|
||||||
|
exports.test = main;
|
||||||
|
|
||||||
|
if (require.main === module) main();
|
|
@ -487,6 +487,7 @@ async function test(Human, inputConfig) {
|
||||||
// test face attention
|
// test face attention
|
||||||
log('info', 'test face attention');
|
log('info', 'test face attention');
|
||||||
human.models.facemesh = null;
|
human.models.facemesh = null;
|
||||||
|
config.softwareKernels = true;
|
||||||
config.face.attention = { enabled: true, modelPath: 'https://vladmandic.github.io/human-models/models/facemesh-attention.json' };
|
config.face.attention = { enabled: true, modelPath: 'https://vladmandic.github.io/human-models/models/facemesh-attention.json' };
|
||||||
res = await testDetect(human, 'samples/in/ai-face.jpg', 'face attention');
|
res = await testDetect(human, 'samples/in/ai-face.jpg', 'face attention');
|
||||||
if (!res || !res.face[0] || res.face[0].mesh.length !== 478 || Object.keys(res.face[0].annotations).length !== 36) log('error', 'failed: face attention', { mesh: res.face?.[0]?.mesh?.length, annotations: Object.keys(res.face?.[0]?.annotations | {}).length });
|
if (!res || !res.face[0] || res.face[0].mesh.length !== 478 || Object.keys(res.face[0].annotations).length !== 36) log('error', 'failed: face attention', { mesh: res.face?.[0]?.mesh?.length, annotations: Object.keys(res.face?.[0]?.annotations | {}).length });
|
1976
test/test.log
1976
test/test.log
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue