mirror of https://github.com/vladmandic/human
support dynamic loads
parent
daec8d4ba1
commit
b47e6251c8
|
@ -93,6 +93,7 @@
|
|||
- **Similarity** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Compares two input images for similarity of detected faces
|
||||
- **Face Match** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facematch): Parallel processing of face **match** in multiple child worker threads
|
||||
- **Multiple Workers** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Runs multiple parallel `human` by dispaching them to pool of pre-created worker processes
|
||||
- **Dynamic Load** [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/nodejs): Loads Human dynamically with multiple different desired backends
|
||||
|
||||
## Project pages
|
||||
|
||||
|
|
3
TODO.md
3
TODO.md
|
@ -68,6 +68,7 @@ Optimizations:
|
|||
|
||||
Features:
|
||||
- Add [draw label templates](https://github.com/vladmandic/human/wiki/Draw)
|
||||
Allows easy customization of results labels drawn on canvas
|
||||
- Add `config.filter.autoBrightness` (*enabled by default*)
|
||||
Per-frame video on-the-fly brightness adjustments
|
||||
Which significantly increases performance and precision in poorly lit scenes
|
||||
|
@ -94,6 +95,8 @@ Architecture:
|
|||
- Cleanup `@vladmandic/human-models`
|
||||
- Support for **NodeJS v19**
|
||||
- Upgrade to **TypeScript 4.9**
|
||||
- Support for dynamic module load in **NodeJS**
|
||||
See <https://vladmandic.github.io/human/demo/nodejs/node-bench>
|
||||
|
||||
Breaking changes:
|
||||
- Replaced `result.face[n].iris` with `result.face[n].distance`
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -61,11 +61,11 @@ threads.parentPort?.on('message', (msg) => {
|
|||
}
|
||||
if (typeof msg.debug !== 'undefined') { // set verbose logging
|
||||
debug = msg.debug;
|
||||
if (debug) threads.parentPort?.postMessage(`debug: ${debug}`);
|
||||
// if (debug) threads.parentPort?.postMessage(`debug: ${debug}`);
|
||||
}
|
||||
if (typeof msg.threshold !== 'undefined') { // set minimum similarity threshold
|
||||
threshold = msg.threshold;
|
||||
if (debug) threads.parentPort?.postMessage(`threshold: ${threshold}`);
|
||||
// if (debug) threads.parentPort?.postMessage(`threshold: ${threshold}`);
|
||||
}
|
||||
if (typeof msg.shutdown !== 'undefined') { // got message to close worker
|
||||
if (debug) threads.parentPort?.postMessage('shutting down');
|
||||
|
|
|
@ -176,7 +176,7 @@ async function main() {
|
|||
data.requestID++; // increase request id
|
||||
if (testOptions.fuzDescriptors) match(fuzDescriptor(descriptor)); // fuz descriptor for harder match
|
||||
else match(descriptor);
|
||||
if (options.debug) log.info('submited job', data.requestID); // we already know what we're searching for so we can compare results
|
||||
if (options.debug) log.debug('submited job', data.requestID); // we already know what we're searching for so we can compare results
|
||||
}
|
||||
log.state('submitted:', { matchJobs: testOptions.maxJobs, poolSize: data.workers.length, activeWorkers: data.workers.filter((worker) => !!worker).length });
|
||||
}
|
||||
|
|
|
@ -0,0 +1,66 @@
|
|||
/**
|
||||
* Human simple demo for NodeJS
|
||||
*/
|
||||
|
||||
const childProcess = require('child_process'); // eslint-disable-line camelcase
|
||||
const log = require('@vladmandic/pilogger'); // eslint-disable-line node/no-unpublished-require
|
||||
const canvas = require('canvas'); // eslint-disable-line node/no-unpublished-require
|
||||
|
||||
const config = {
|
||||
cacheSensitivity: 0,
|
||||
wasmPlatformFetch: true,
|
||||
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
|
||||
};
|
||||
const count = 10;
|
||||
|
||||
async function loadImage(input) {
|
||||
const inputImage = await canvas.loadImage(input);
|
||||
const inputCanvas = new canvas.Canvas(inputImage.width, inputImage.height);
|
||||
const inputCtx = inputCanvas.getContext('2d');
|
||||
inputCtx.drawImage(inputImage, 0, 0);
|
||||
const imageData = inputCtx.getImageData(0, 0, inputCanvas.width, inputCanvas.height);
|
||||
process.send({ input, resolution: [inputImage.width, inputImage.height] });
|
||||
return imageData;
|
||||
}
|
||||
|
||||
async function runHuman(module, backend) {
|
||||
if (backend === 'wasm') require('@tensorflow/tfjs-backend-wasm'); // eslint-disable-line node/no-unpublished-require, global-require
|
||||
const Human = require('../../dist/' + module); // eslint-disable-line global-require, import/no-dynamic-require
|
||||
config.backend = backend;
|
||||
const human = new Human.Human(config);
|
||||
human.env.Canvas = canvas.Canvas;
|
||||
human.env.Image = canvas.Image;
|
||||
human.env.ImageData = canvas.ImageData;
|
||||
process.send({ human: human.version, module });
|
||||
await human.init();
|
||||
process.send({ desired: human.config.backend, wasm: human.env.wasm, tfjs: human.tf.version.tfjs, tensorflow: human.env.tensorflow });
|
||||
const imageData = await loadImage('samples/in/ai-body.jpg');
|
||||
const t0 = human.now();
|
||||
await human.load();
|
||||
const t1 = human.now();
|
||||
await human.warmup();
|
||||
const t2 = human.now();
|
||||
for (let i = 0; i < count; i++) await human.detect(imageData);
|
||||
const t3 = human.now();
|
||||
process.send({ backend: human.tf.getBackend(), load: Math.round(t1 - t0), warmup: Math.round(t2 - t1), detect: Math.round(t3 - t2), count, memory: human.tf.memory().numBytes });
|
||||
}
|
||||
|
||||
async function executeWorker(args) {
|
||||
return new Promise((resolve) => {
|
||||
const worker = childProcess.fork(process.argv[1], args);
|
||||
worker.on('message', (msg) => log.data(msg));
|
||||
worker.on('exit', () => resolve(true));
|
||||
});
|
||||
}
|
||||
|
||||
async function main() {
|
||||
if (process.argv[2]) {
|
||||
await runHuman(process.argv[2], process.argv[3]);
|
||||
} else {
|
||||
await executeWorker(['human.node.js', 'tensorflow']);
|
||||
await executeWorker(['human.node-gpu.js', 'tensorflow']);
|
||||
await executeWorker(['human.node-wasm.js', 'wasm']);
|
||||
}
|
||||
}
|
||||
|
||||
main();
|
|
@ -40,7 +40,8 @@ async function main() {
|
|||
|
||||
// parse cmdline
|
||||
const input = process.argv[2];
|
||||
const output = process.argv[3];
|
||||
let output = process.argv[3];
|
||||
if (!output.toLowerCase().endsWith('.jpg')) output += '.jpg';
|
||||
if (process.argv.length !== 4) log.error('Parameters: <input-image> <output-image> missing');
|
||||
else if (!fs.existsSync(input) && !input.startsWith('http')) log.error(`File not found: ${process.argv[2]}`);
|
||||
else {
|
||||
|
|
|
@ -48,7 +48,8 @@ async function main() {
|
|||
log.configure({ inspect: { breakLength: 265 } });
|
||||
log.header();
|
||||
if (process.argv.length !== 4) {
|
||||
throw new Error('Parameters: <first image> <second image> missing');
|
||||
log.error('Parameters: <first image> <second image> missing');
|
||||
return;
|
||||
}
|
||||
await init();
|
||||
const res1 = await detect(process.argv[2]);
|
||||
|
|
|
@ -46,9 +46,9 @@
|
|||
<main>
|
||||
<div id="main" class="main">
|
||||
<video id="webcam" style="position: fixed; top: 0; left: 0; width: 50vw; height: 50vh"></video>
|
||||
<video id="video" style="position: fixed; top: 0; right: 0; width: 50vw; height: 50vh" controls></video>
|
||||
<canvas id="output" style="position: fixed; bottom: 0; left: 0; width: 50vw; height: 50vh"></canvas>
|
||||
<canvas id="merge" style="position: fixed; bottom: 0; right: 0; width: 50vw; height: 50vh"></canvas>
|
||||
<img id="background" alt="background" style="position: fixed; top: 0; right: 0; width: 50vw; height: 50vh" controls></img>
|
||||
<canvas id="output" style="position: fixed; bottom: 0; left: 0; height: 50vh"></canvas>
|
||||
<canvas id="merge" style="position: fixed; bottom: 0; right: 0; height: 50vh"></canvas>
|
||||
</div>
|
||||
</main>
|
||||
<footer>
|
||||
|
|
|
@ -25,6 +25,8 @@ const humanConfig = { // user configuration for human, used to fine-tune behavio
|
|||
},
|
||||
};
|
||||
|
||||
const backgroundImage = '../../samples/in/background.jpg';
|
||||
|
||||
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
|
||||
|
||||
const log = (...msg) => console.log(...msg); // eslint-disable-line no-console
|
||||
|
@ -32,7 +34,7 @@ const log = (...msg) => console.log(...msg); // eslint-disable-line no-console
|
|||
async function main() {
|
||||
// gather dom elements
|
||||
const dom = {
|
||||
video: document.getElementById('video'),
|
||||
background: document.getElementById('background'),
|
||||
webcam: document.getElementById('webcam'),
|
||||
output: document.getElementById('output'),
|
||||
merge: document.getElementById('merge'),
|
||||
|
@ -44,7 +46,7 @@ async function main() {
|
|||
// set defaults
|
||||
dom.fps.innerText = 'initializing';
|
||||
dom.ratio.valueAsNumber = human.config.segmentation.ratio;
|
||||
dom.video.src = 'https://vladmandic.github.io/segmentation/assets/rijeka.mp4';
|
||||
dom.background.src = backgroundImage;
|
||||
dom.composite.innerHTML = ['source-atop', 'color', 'color-burn', 'color-dodge', 'copy', 'darken', 'destination-atop', 'destination-in', 'destination-out', 'destination-over', 'difference', 'exclusion', 'hard-light', 'hue', 'lighten', 'lighter', 'luminosity', 'multiply', 'overlay', 'saturation', 'screen', 'soft-light', 'source-in', 'source-out', 'source-over', 'xor'].map((gco) => `<option value="${gco}">${gco}</option>`).join(''); // eslint-disable-line max-len
|
||||
const ctxMerge = dom.merge.getContext('2d');
|
||||
|
||||
|
@ -66,8 +68,10 @@ async function main() {
|
|||
dom.merge.height = human.webcam.height;
|
||||
loop(); // eslint-disable-line no-use-before-define
|
||||
};
|
||||
await human.webcam.start({ element: dom.webcam, crop: true, width: 960, height: 720 }); // use human webcam helper methods and associate webcam stream with a dom element
|
||||
|
||||
await human.webcam.start({ element: dom.webcam, crop: true, width: window.innerWidth / 2, height: window.innerHeight / 2 }); // use human webcam helper methods and associate webcam stream with a dom element
|
||||
if (!human.webcam.track) dom.fps.innerText = 'webcam error';
|
||||
console.log(human.webcam);
|
||||
|
||||
// processing loop
|
||||
async function loop() {
|
||||
|
@ -85,7 +89,7 @@ async function main() {
|
|||
human.tf.browser.toPixels(rgba, dom.output); // draw raw output
|
||||
human.tf.dispose(rgba); // dispose tensors
|
||||
ctxMerge.globalCompositeOperation = 'source-over';
|
||||
ctxMerge.drawImage(dom.video, 0, 0); // draw original video to first stacked canvas
|
||||
ctxMerge.drawImage(dom.background, 0, 0); // draw original video to first stacked canvas
|
||||
ctxMerge.globalCompositeOperation = dom.composite.value;
|
||||
ctxMerge.drawImage(dom.output, 0, 0); // draw processed output to second stacked canvas
|
||||
if (numTensors !== human.tf.engine().state.numTensors) log({ leak: human.tf.engine().state.numTensors - numTensors }); // check for memory leaks
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<canvas id="canvas" style="margin: 0 auto; width: 100%"></canvas>
|
||||
<canvas id="canvas" style="margin: 0 auto; width: 100vw"></canvas>
|
||||
<video id="video" playsinline style="display: none"></video>
|
||||
<pre id="status" style="position: absolute; top: 12px; right: 20px; background-color: grey; padding: 8px; box-shadow: 2px 2px black"></pre>
|
||||
<pre id="log" style="padding: 8px"></pre>
|
||||
|
|
|
@ -4,6 +4,100 @@
|
|||
author: <https://github.com/vladmandic>'
|
||||
*/
|
||||
|
||||
import*as m from"../../dist/human.esm.js";var f=1920,b={modelBasePath:"../../models",filter:{enabled:!0,equalization:!1,flip:!1,width:f},face:{enabled:!0,detector:{rotation:!0},mesh:{enabled:!0},attention:{enabled:!1},iris:{enabled:!0},description:{enabled:!0},emotion:{enabled:!0},antispoof:{enabled:!0},liveness:{enabled:!0}},body:{enabled:!0},hand:{enabled:!1},object:{enabled:!1},segmentation:{enabled:!1},gesture:{enabled:!0}},e=new m.Human(b);e.env.perfadd=!1;e.draw.options.font='small-caps 18px "Lato"';e.draw.options.lineHeight=20;var a={video:document.getElementById("video"),canvas:document.getElementById("canvas"),log:document.getElementById("log"),fps:document.getElementById("status"),perf:document.getElementById("performance")},n={detect:0,draw:0,tensors:0,start:0},s={detectFPS:0,drawFPS:0,frames:0,averageMs:0},o=(...t)=>{a.log.innerText+=t.join(" ")+`
|
||||
`,console.log(...t)},r=t=>a.fps.innerText=t,g=t=>a.perf.innerText="tensors:"+e.tf.memory().numTensors.toString()+" | performance: "+JSON.stringify(t).replace(/"|{|}/g,"").replace(/,/g," | ");async function u(){if(!a.video.paused){n.start===0&&(n.start=e.now()),await e.detect(a.video);let t=e.tf.memory().numTensors;t-n.tensors!==0&&o("allocated tensors:",t-n.tensors),n.tensors=t,s.detectFPS=Math.round(1e3*1e3/(e.now()-n.detect))/1e3,s.frames++,s.averageMs=Math.round(1e3*(e.now()-n.start)/s.frames)/1e3,s.frames%100===0&&!a.video.paused&&o("performance",{...s,tensors:n.tensors})}n.detect=e.now(),requestAnimationFrame(u)}async function p(){var d,i,c;if(!a.video.paused){let l=e.next(e.result),w=await e.image(a.video);e.draw.canvas(w.canvas,a.canvas);let v={bodyLabels:`person confidence [score] and ${(c=(i=(d=e.result)==null?void 0:d.body)==null?void 0:i[0])==null?void 0:c.keypoints.length} keypoints`};await e.draw.all(a.canvas,l,v),g(l.performance)}let t=e.now();s.drawFPS=Math.round(1e3*1e3/(t-n.draw))/1e3,n.draw=t,r(a.video.paused?"paused":`fps: ${s.detectFPS.toFixed(1).padStart(5," ")} detect | ${s.drawFPS.toFixed(1).padStart(5," ")} draw`),setTimeout(p,30)}async function y(){let d=(await e.webcam.enumerate())[0].deviceId;await e.webcam.start({element:a.video,crop:!0,width:f,id:d}),a.canvas.width=e.webcam.width,a.canvas.height=e.webcam.height,a.canvas.onclick=async()=>{e.webcam.paused?await e.webcam.play():e.webcam.pause()}}async function h(){o("human version:",e.version,"| tfjs version:",e.tf.version["tfjs-core"]),o("platform:",e.env.platform,"| agent:",e.env.agent),r("loading..."),await e.load(),o("backend:",e.tf.getBackend(),"| available:",e.env.backends),o("models stats:",e.models.stats()),o("models loaded:",e.models.loaded()),o("environment",e.env),r("initializing..."),await e.warmup(),await y(),await u(),await p()}window.onload=h;
|
||||
|
||||
// demo/typescript/index.ts
|
||||
import * as H from "../../dist/human.esm.js";
|
||||
var width = 1920;
|
||||
var humanConfig = {
|
||||
modelBasePath: "../../models",
|
||||
filter: { enabled: true, equalization: false, flip: false },
|
||||
face: { enabled: true, detector: { rotation: true }, mesh: { enabled: true }, attention: { enabled: false }, iris: { enabled: true }, description: { enabled: true }, emotion: { enabled: true }, antispoof: { enabled: true }, liveness: { enabled: true } },
|
||||
body: { enabled: true },
|
||||
hand: { enabled: false },
|
||||
object: { enabled: false },
|
||||
segmentation: { enabled: false },
|
||||
gesture: { enabled: true }
|
||||
};
|
||||
var human = new H.Human(humanConfig);
|
||||
human.env.perfadd = false;
|
||||
human.draw.options.font = 'small-caps 18px "Lato"';
|
||||
human.draw.options.lineHeight = 20;
|
||||
var dom = {
|
||||
video: document.getElementById("video"),
|
||||
canvas: document.getElementById("canvas"),
|
||||
log: document.getElementById("log"),
|
||||
fps: document.getElementById("status"),
|
||||
perf: document.getElementById("performance")
|
||||
};
|
||||
var timestamp = { detect: 0, draw: 0, tensors: 0, start: 0 };
|
||||
var fps = { detectFPS: 0, drawFPS: 0, frames: 0, averageMs: 0 };
|
||||
var log = (...msg) => {
|
||||
dom.log.innerText += msg.join(" ") + "\n";
|
||||
console.log(...msg);
|
||||
};
|
||||
var status = (msg) => dom.fps.innerText = msg;
|
||||
var perf = (msg) => dom.perf.innerText = "tensors:" + human.tf.memory().numTensors.toString() + " | performance: " + JSON.stringify(msg).replace(/"|{|}/g, "").replace(/,/g, " | ");
|
||||
async function detectionLoop() {
|
||||
if (!dom.video.paused) {
|
||||
if (timestamp.start === 0)
|
||||
timestamp.start = human.now();
|
||||
await human.detect(dom.video);
|
||||
const tensors = human.tf.memory().numTensors;
|
||||
if (tensors - timestamp.tensors !== 0)
|
||||
log("allocated tensors:", tensors - timestamp.tensors);
|
||||
timestamp.tensors = tensors;
|
||||
fps.detectFPS = Math.round(1e3 * 1e3 / (human.now() - timestamp.detect)) / 1e3;
|
||||
fps.frames++;
|
||||
fps.averageMs = Math.round(1e3 * (human.now() - timestamp.start) / fps.frames) / 1e3;
|
||||
if (fps.frames % 100 === 0 && !dom.video.paused)
|
||||
log("performance", { ...fps, tensors: timestamp.tensors });
|
||||
}
|
||||
timestamp.detect = human.now();
|
||||
requestAnimationFrame(detectionLoop);
|
||||
}
|
||||
async function drawLoop() {
|
||||
var _a, _b, _c;
|
||||
if (!dom.video.paused) {
|
||||
const interpolated = human.next(human.result);
|
||||
const processed = await human.image(dom.video);
|
||||
human.draw.canvas(processed.canvas, dom.canvas);
|
||||
const opt = { bodyLabels: `person confidence [score] and ${(_c = (_b = (_a = human.result) == null ? void 0 : _a.body) == null ? void 0 : _b[0]) == null ? void 0 : _c.keypoints.length} keypoints` };
|
||||
await human.draw.all(dom.canvas, interpolated, opt);
|
||||
perf(interpolated.performance);
|
||||
}
|
||||
const now = human.now();
|
||||
fps.drawFPS = Math.round(1e3 * 1e3 / (now - timestamp.draw)) / 1e3;
|
||||
timestamp.draw = now;
|
||||
status(dom.video.paused ? "paused" : `fps: ${fps.detectFPS.toFixed(1).padStart(5, " ")} detect | ${fps.drawFPS.toFixed(1).padStart(5, " ")} draw`);
|
||||
setTimeout(drawLoop, 30);
|
||||
}
|
||||
async function webCam() {
|
||||
const devices = await human.webcam.enumerate();
|
||||
const id = devices[0].deviceId;
|
||||
await human.webcam.start({ element: dom.video, crop: false, width, id });
|
||||
dom.canvas.width = human.webcam.width;
|
||||
dom.canvas.height = human.webcam.height;
|
||||
dom.canvas.onclick = async () => {
|
||||
if (human.webcam.paused)
|
||||
await human.webcam.play();
|
||||
else
|
||||
human.webcam.pause();
|
||||
};
|
||||
}
|
||||
async function main() {
|
||||
log("human version:", human.version, "| tfjs version:", human.tf.version["tfjs-core"]);
|
||||
log("platform:", human.env.platform, "| agent:", human.env.agent);
|
||||
status("loading...");
|
||||
await human.load();
|
||||
log("backend:", human.tf.getBackend(), "| available:", human.env.backends);
|
||||
log("models stats:", human.models.stats());
|
||||
log("models loaded:", human.models.loaded());
|
||||
log("environment", human.env);
|
||||
status("initializing...");
|
||||
await human.warmup();
|
||||
await webCam();
|
||||
await detectionLoop();
|
||||
await drawLoop();
|
||||
}
|
||||
window.onload = main;
|
||||
//# sourceMappingURL=index.js.map
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -14,7 +14,7 @@ const width = 1920; // used by webcam config as well as human maximum resultion
|
|||
const humanConfig: Partial<H.Config> = { // user configuration for human, used to fine-tune behavior
|
||||
// backend: 'webgpu',
|
||||
modelBasePath: '../../models',
|
||||
filter: { enabled: true, equalization: false, flip: false, width },
|
||||
filter: { enabled: true, equalization: false, flip: false },
|
||||
face: { enabled: true, detector: { rotation: true }, mesh: { enabled: true }, attention: { enabled: false }, iris: { enabled: true }, description: { enabled: true }, emotion: { enabled: true }, antispoof: { enabled: true }, liveness: { enabled: true } },
|
||||
body: { enabled: true },
|
||||
// hand: { enabled: true },
|
||||
|
@ -85,7 +85,7 @@ async function drawLoop() { // main screen refresh loop
|
|||
async function webCam() {
|
||||
const devices = await human.webcam.enumerate();
|
||||
const id = devices[0].deviceId; // use first available video source
|
||||
await human.webcam.start({ element: dom.video, crop: true, width, id }); // use human webcam helper methods and associate webcam stream with a dom element
|
||||
await human.webcam.start({ element: dom.video, crop: false, width, id }); // use human webcam helper methods and associate webcam stream with a dom element
|
||||
dom.canvas.width = human.webcam.width;
|
||||
dom.canvas.height = human.webcam.height;
|
||||
dom.canvas.onclick = async () => { // pause when clicked on screen and resume on next click
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -4,4 +4,35 @@
|
|||
author: <https://github.com/vladmandic>'
|
||||
*/
|
||||
|
||||
var e="4.1.0";var s="4.1.0";var t="4.1.0";var n="4.1.0";var r="4.1.0";var i="0.0.1-alpha.16";var h={tfjs:e,"tfjs-core":e,"tfjs-converter":s,"tfjs-backend-cpu":t,"tfjs-backend-webgl":n,"tfjs-backend-wasm":r,"tfjs-backend-webgpu":i};export{h as version};
|
||||
|
||||
// node_modules/.pnpm/@tensorflow+tfjs-core@4.1.0/node_modules/@tensorflow/tfjs-core/package.json
|
||||
var version = "4.1.0";
|
||||
|
||||
// node_modules/.pnpm/@tensorflow+tfjs-converter@4.1.0_npjwttp6o2hhjgfcmiedqvkgoa/node_modules/@tensorflow/tfjs-converter/package.json
|
||||
var version2 = "4.1.0";
|
||||
|
||||
// node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@4.1.0_npjwttp6o2hhjgfcmiedqvkgoa/node_modules/@tensorflow/tfjs-backend-cpu/package.json
|
||||
var version3 = "4.1.0";
|
||||
|
||||
// node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@4.1.0_npjwttp6o2hhjgfcmiedqvkgoa/node_modules/@tensorflow/tfjs-backend-webgl/package.json
|
||||
var version4 = "4.1.0";
|
||||
|
||||
// node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@4.1.0_npjwttp6o2hhjgfcmiedqvkgoa/node_modules/@tensorflow/tfjs-backend-wasm/package.json
|
||||
var version5 = "4.1.0";
|
||||
|
||||
// node_modules/.pnpm/@tensorflow+tfjs-backend-webgpu@0.0.1-alpha.16_npjwttp6o2hhjgfcmiedqvkgoa/node_modules/@tensorflow/tfjs-backend-webgpu/package.json
|
||||
var version6 = "0.0.1-alpha.16";
|
||||
|
||||
// tfjs/tf-version.ts
|
||||
var version7 = {
|
||||
tfjs: version,
|
||||
"tfjs-core": version,
|
||||
"tfjs-converter": version2,
|
||||
"tfjs-backend-cpu": version3,
|
||||
"tfjs-backend-webgl": version4,
|
||||
"tfjs-backend-wasm": version5,
|
||||
"tfjs-backend-webgpu": version6
|
||||
};
|
||||
export {
|
||||
version7 as version
|
||||
};
|
||||
|
|
Binary file not shown.
After Width: | Height: | Size: 98 KiB |
|
@ -75,7 +75,6 @@ export async function process(input: Input, config: Config, getTensor: boolean =
|
|||
if (
|
||||
!(input instanceof tf.Tensor)
|
||||
&& !(typeof Image !== 'undefined' && input instanceof Image)
|
||||
&& !(typeof env.Canvas !== 'undefined' && input instanceof env.Canvas)
|
||||
&& !(typeof globalThis.Canvas !== 'undefined' && input instanceof globalThis.Canvas)
|
||||
&& !(typeof ImageData !== 'undefined' && input instanceof ImageData)
|
||||
&& !(typeof ImageBitmap !== 'undefined' && input instanceof ImageBitmap)
|
||||
|
@ -85,7 +84,7 @@ export async function process(input: Input, config: Config, getTensor: boolean =
|
|||
&& !(typeof HTMLCanvasElement !== 'undefined' && input instanceof HTMLCanvasElement)
|
||||
&& !(typeof OffscreenCanvas !== 'undefined' && input instanceof OffscreenCanvas)
|
||||
) {
|
||||
throw new Error('input error: type is not recognized');
|
||||
throw new Error('input error: type not recognized');
|
||||
}
|
||||
if (input instanceof tf.Tensor) { // if input is tensor use as-is without filters but correct shape as needed
|
||||
let tensor: Tensor | null = null;
|
||||
|
|
|
@ -10,7 +10,7 @@ import type { TensorInfo } from './types';
|
|||
|
||||
export async function getBestBackend(): Promise<BackendEnum> {
|
||||
await env.updateBackend(); // update env on backend init
|
||||
if (!env.browser) return 'tensorflow';
|
||||
if (env.tensorflow?.version) return 'tensorflow';
|
||||
if (env.webgpu.supported && env.webgpu.backend) return 'webgpu';
|
||||
if (env.webgl.supported && env.webgl.backend) return 'webgl';
|
||||
if (env.wasm.supported && env.wasm.backend) return 'wasm';
|
||||
|
|
|
@ -83,12 +83,20 @@ export class Env {
|
|||
};
|
||||
/** List of supported kernels for current backend */
|
||||
kernels: string[] = [];
|
||||
/** MonkeyPatch for Canvas */
|
||||
Canvas: undefined;
|
||||
/** MonkeyPatch for Image */
|
||||
Image: undefined;
|
||||
/** MonkeyPatch for ImageData */
|
||||
ImageData: undefined;
|
||||
|
||||
/** MonkeyPatch for Canvas/Image/ImageData */
|
||||
#canvas: undefined;
|
||||
#image: undefined;
|
||||
#imageData: undefined;
|
||||
|
||||
get Canvas() { return this.#canvas; }
|
||||
set Canvas(val) { this.#canvas = val; globalThis.Canvas = val; }
|
||||
get Image() { return this.#image; }
|
||||
// @ts-ignore monkey-patch;
|
||||
set Image(val) { this.#image = val; globalThis.Image = val; }
|
||||
get ImageData() { return this.#imageData; }
|
||||
// @ts-ignore monkey-patch;
|
||||
set ImageData(val) { this.#imageData = val; globalThis.ImageData = val; }
|
||||
|
||||
constructor() {
|
||||
this.browser = typeof navigator !== 'undefined';
|
||||
|
|
|
@ -139,10 +139,10 @@ export class WebCam { // eslint-disable-line @typescript-eslint/no-extraneous-cl
|
|||
facingMode: this.config.mode === 'front' ? 'user' : 'environment',
|
||||
// @ts-ignore // resizeMode is still not defined in tslib
|
||||
resizeMode: this.config.crop ? 'crop-and-scale' : 'none',
|
||||
width: { ideal: this.config.width > 0 ? this.config.width : window.innerWidth },
|
||||
height: { ideal: this.config.height > 0 ? this.config.height : window.innerHeight },
|
||||
},
|
||||
};
|
||||
if (this.config?.width > 0) (requestedConstraints.video as MediaTrackConstraints).width = { ideal: this.config.width };
|
||||
if (this.config?.height > 0) (requestedConstraints.video as MediaTrackConstraints).height = { ideal: this.config.height };
|
||||
if (this.config.id) (requestedConstraints.video as MediaTrackConstraintSet).deviceId = this.config.id;
|
||||
|
||||
// set default event listeners
|
||||
|
|
|
@ -53,7 +53,10 @@ async function warmupCanvas(instance: Human): Promise<Result | undefined> {
|
|||
if (typeof Image !== 'undefined') img = new Image();
|
||||
// @ts-ignore env.image is an external monkey-patch
|
||||
else if (env.Image) img = new env.Image();
|
||||
else return;
|
||||
else {
|
||||
resolve(undefined);
|
||||
return;
|
||||
}
|
||||
img.onload = async () => {
|
||||
const canvas = image.canvas(img.naturalWidth, img.naturalHeight);
|
||||
if (!canvas) {
|
||||
|
@ -104,7 +107,7 @@ async function warmupNode(instance: Human): Promise<Result | undefined> {
|
|||
async function runInference(instance: Human) {
|
||||
let res: Result | undefined;
|
||||
if (typeof createImageBitmap === 'function') res = await warmupBitmap(instance);
|
||||
else if (typeof Image !== 'undefined' || env.Canvas !== undefined) res = await warmupCanvas(instance);
|
||||
else if ((typeof Image !== 'undefined') || (env.Canvas !== undefined)) res = await warmupCanvas(instance);
|
||||
else res = await warmupNode(instance);
|
||||
return res;
|
||||
}
|
||||
|
|
1443
test/build.log
1443
test/build.log
File diff suppressed because it is too large
Load Diff
|
@ -25,6 +25,7 @@ const demos = [
|
|||
{ cmd: '../demo/nodejs/process-folder.js', args: ['samples'] },
|
||||
{ cmd: '../demo/multithread/node-multiprocess.js', args: [] },
|
||||
{ cmd: '../demo/facematch/node-match.js', args: [] },
|
||||
{ cmd: '../demo/nodejs/node-bench.js', args: [] },
|
||||
// { cmd: '../demo/nodejs/node-video.js', args: [] },
|
||||
// { cmd: '../demo/nodejs/node-webcam.js', args: [] },
|
||||
];
|
||||
|
|
|
@ -95,10 +95,9 @@ async function testInstance(human) {
|
|||
if (loaded.length > 10) {
|
||||
log('state', 'passed: load models', loaded.length);
|
||||
return true;
|
||||
} else {
|
||||
log('error', 'failed: load models', loaded.length);
|
||||
return false;
|
||||
}
|
||||
log('error', 'failed: load models', loaded.length);
|
||||
return false;
|
||||
}
|
||||
|
||||
async function testWarmup(human, title) {
|
||||
|
@ -116,22 +115,7 @@ async function testWarmup(human, title) {
|
|||
} else {
|
||||
log('error', 'failed: warmup:', config.warmup, title);
|
||||
}
|
||||
/*
|
||||
// now try with monkey-patch
|
||||
globalThis.Canvas = canvasJS.Canvas; // monkey-patch to use external canvas library
|
||||
globalThis.ImageData = canvasJS.ImageData; // monkey-patch to use external canvas library
|
||||
try {
|
||||
warmup = await human.warmup(config);
|
||||
} catch (err) {
|
||||
log('error', 'error warmup');
|
||||
}
|
||||
if (warmup) {
|
||||
log('state', 'passed: warmup:', config.warmup, title);
|
||||
printResults(warmup);
|
||||
} else {
|
||||
log('error', 'failed: warmup:', config.warmup, title);
|
||||
}
|
||||
*/
|
||||
|
||||
return warmup;
|
||||
}
|
||||
|
||||
|
|
1998
test/test.log
1998
test/test.log
File diff suppressed because it is too large
Load Diff
2
wiki
2
wiki
|
@ -1 +1 @@
|
|||
Subproject commit 185d129a178776c150defdc125334bb1221bec14
|
||||
Subproject commit ba16d32c56485602e430c87c7c8cc6f5f461ed83
|
Loading…
Reference in New Issue