mirror of https://github.com/vladmandic/human
fixed memory leaks and added scoped runs
parent
12de5a71b5
commit
5754e5e36e
|
@ -4,6 +4,9 @@
|
||||||
export default {
|
export default {
|
||||||
backend: 'webgl', // select tfjs backend to use
|
backend: 'webgl', // select tfjs backend to use
|
||||||
console: true, // enable debugging output to console
|
console: true, // enable debugging output to console
|
||||||
|
scoped: false, // enable scoped runs
|
||||||
|
// some models *may* have memory leaks, this wrapps everything in a local scope at a cost of performance
|
||||||
|
// typically not needed
|
||||||
face: {
|
face: {
|
||||||
enabled: true, // controls if specified modul is enabled
|
enabled: true, // controls if specified modul is enabled
|
||||||
// face.enabled is required for all face models: detector, mesh, iris, age, gender, emotion
|
// face.enabled is required for all face models: detector, mesh, iris, age, gender, emotion
|
||||||
|
|
119
demo/browser.js
119
demo/browser.js
|
@ -3,6 +3,7 @@
|
||||||
import human from '../dist/human.esm.js';
|
import human from '../dist/human.esm.js';
|
||||||
import draw from './draw.js';
|
import draw from './draw.js';
|
||||||
|
|
||||||
|
// ui options
|
||||||
const ui = {
|
const ui = {
|
||||||
baseColor: 'rgba(255, 200, 255, 0.3)',
|
baseColor: 'rgba(255, 200, 255, 0.3)',
|
||||||
baseLabel: 'rgba(255, 200, 255, 0.9)',
|
baseLabel: 'rgba(255, 200, 255, 0.9)',
|
||||||
|
@ -20,11 +21,11 @@ const ui = {
|
||||||
drawPolygons: true,
|
drawPolygons: true,
|
||||||
fillPolygons: true,
|
fillPolygons: true,
|
||||||
useDepth: true,
|
useDepth: true,
|
||||||
|
console: true,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// configuration overrides
|
||||||
const config = {
|
const config = {
|
||||||
backend: 'webgl',
|
|
||||||
console: true,
|
|
||||||
face: {
|
face: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
detector: { maxFaces: 10, skipFrames: 10, minConfidence: 0.5, iouThreshold: 0.3, scoreThreshold: 0.7 },
|
detector: { maxFaces: 10, skipFrames: 10, minConfidence: 0.5, iouThreshold: 0.3, scoreThreshold: 0.7 },
|
||||||
|
@ -37,11 +38,14 @@ const config = {
|
||||||
body: { enabled: true, maxDetections: 10, scoreThreshold: 0.7, nmsRadius: 20 },
|
body: { enabled: true, maxDetections: 10, scoreThreshold: 0.7, nmsRadius: 20 },
|
||||||
hand: { enabled: true, skipFrames: 10, minConfidence: 0.5, iouThreshold: 0.3, scoreThreshold: 0.7 },
|
hand: { enabled: true, skipFrames: 10, minConfidence: 0.5, iouThreshold: 0.3, scoreThreshold: 0.7 },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// global variables
|
||||||
let settings;
|
let settings;
|
||||||
let worker;
|
let worker;
|
||||||
let timeStamp;
|
let timeStamp;
|
||||||
const fps = [];
|
const fps = [];
|
||||||
|
|
||||||
|
// helper function: translates json to human readable string
|
||||||
function str(...msg) {
|
function str(...msg) {
|
||||||
if (!Array.isArray(msg)) return msg;
|
if (!Array.isArray(msg)) return msg;
|
||||||
let line = '';
|
let line = '';
|
||||||
|
@ -52,11 +56,13 @@ function str(...msg) {
|
||||||
return line;
|
return line;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// helper function: wrapper around console output
|
||||||
const log = (...msg) => {
|
const log = (...msg) => {
|
||||||
// eslint-disable-next-line no-console
|
// eslint-disable-next-line no-console
|
||||||
if (config.console) console.log(...msg);
|
if (ui.console) console.log(...msg);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// draws processed results and starts processing of a next frame
|
||||||
async function drawResults(input, result, canvas) {
|
async function drawResults(input, result, canvas) {
|
||||||
// update fps
|
// update fps
|
||||||
settings.setValue('FPS', Math.round(1000 / (performance.now() - timeStamp)));
|
settings.setValue('FPS', Math.round(1000 / (performance.now() - timeStamp)));
|
||||||
|
@ -84,53 +90,7 @@ async function drawResults(input, result, canvas) {
|
||||||
`;
|
`;
|
||||||
}
|
}
|
||||||
|
|
||||||
// simple wrapper for worker.postmessage that creates worker if one does not exist
|
// setup webcam
|
||||||
function webWorker(input, image, canvas) {
|
|
||||||
if (!worker) {
|
|
||||||
// create new webworker and add event handler only once
|
|
||||||
log('Creating worker thread');
|
|
||||||
worker = new Worker(ui.worker, { type: 'module' });
|
|
||||||
// after receiving message from webworker, parse&draw results and send new frame for processing
|
|
||||||
worker.addEventListener('message', (msg) => drawResults(input, msg.data, canvas));
|
|
||||||
}
|
|
||||||
// pass image data as arraybuffer to worker by reference to avoid copy
|
|
||||||
worker.postMessage({ image: image.data.buffer, width: canvas.width, height: canvas.height, config }, [image.data.buffer]);
|
|
||||||
}
|
|
||||||
|
|
||||||
async function runHumanDetect(input, canvas) {
|
|
||||||
timeStamp = performance.now();
|
|
||||||
// perform detect if live video or not video at all
|
|
||||||
if (input.srcObject) {
|
|
||||||
// if video not ready, just redo
|
|
||||||
const live = (input.srcObject.getVideoTracks()[0].readyState === 'live') && (input.readyState > 2) && (!input.paused);
|
|
||||||
if (!live) {
|
|
||||||
if (!input.paused) log(`Video not ready: state: ${input.srcObject.getVideoTracks()[0].readyState} stream state: ${input.readyState}`);
|
|
||||||
setTimeout(() => runHumanDetect(input, canvas), 500);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (ui.useWorker) {
|
|
||||||
// get image data from video as we cannot send html objects to webworker
|
|
||||||
const offscreen = new OffscreenCanvas(canvas.width, canvas.height);
|
|
||||||
const ctx = offscreen.getContext('2d');
|
|
||||||
ctx.drawImage(input, 0, 0, input.width, input.height, 0, 0, canvas.width, canvas.height);
|
|
||||||
const data = ctx.getImageData(0, 0, canvas.width, canvas.height);
|
|
||||||
// perform detection in worker
|
|
||||||
webWorker(input, data, canvas);
|
|
||||||
} else {
|
|
||||||
let result = {};
|
|
||||||
try {
|
|
||||||
// perform detection
|
|
||||||
result = await human.detect(input, config);
|
|
||||||
} catch (err) {
|
|
||||||
log('Error during execution:', err.message);
|
|
||||||
}
|
|
||||||
if (result.error) log(result.error);
|
|
||||||
else drawResults(input, result, canvas);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// eslint-disable-next-line no-unused-vars
|
|
||||||
async function setupCamera() {
|
async function setupCamera() {
|
||||||
if (ui.busy) return null;
|
if (ui.busy) return null;
|
||||||
ui.busy = true;
|
ui.busy = true;
|
||||||
|
@ -173,12 +133,55 @@ async function setupCamera() {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// wrapper for worker.postmessage that creates worker if one does not exist
|
||||||
|
function webWorker(input, image, canvas) {
|
||||||
|
if (!worker) {
|
||||||
|
// create new webworker and add event handler only once
|
||||||
|
log('Creating worker thread');
|
||||||
|
worker = new Worker(ui.worker, { type: 'module' });
|
||||||
|
// after receiving message from webworker, parse&draw results and send new frame for processing
|
||||||
|
worker.addEventListener('message', (msg) => drawResults(input, msg.data, canvas));
|
||||||
|
}
|
||||||
|
// pass image data as arraybuffer to worker by reference to avoid copy
|
||||||
|
worker.postMessage({ image: image.data.buffer, width: canvas.width, height: canvas.height, config }, [image.data.buffer]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// main processing function when input is webcam, can use direct invocation or web worker
|
||||||
|
async function runHumanDetect(input, canvas) {
|
||||||
|
timeStamp = performance.now();
|
||||||
|
// perform detect if live video or not video at all
|
||||||
|
if (input.srcObject) {
|
||||||
|
// if video not ready, just redo
|
||||||
|
const live = (input.srcObject.getVideoTracks()[0].readyState === 'live') && (input.readyState > 2) && (!input.paused);
|
||||||
|
if (!live) {
|
||||||
|
if (!input.paused) log(`Video not ready: state: ${input.srcObject.getVideoTracks()[0].readyState} stream state: ${input.readyState}`);
|
||||||
|
setTimeout(() => runHumanDetect(input, canvas), 500);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (ui.useWorker) {
|
||||||
|
// get image data from video as we cannot send html objects to webworker
|
||||||
|
const offscreen = new OffscreenCanvas(canvas.width, canvas.height);
|
||||||
|
const ctx = offscreen.getContext('2d');
|
||||||
|
ctx.drawImage(input, 0, 0, input.width, input.height, 0, 0, canvas.width, canvas.height);
|
||||||
|
const data = ctx.getImageData(0, 0, canvas.width, canvas.height);
|
||||||
|
// perform detection in worker
|
||||||
|
webWorker(input, data, canvas);
|
||||||
|
} else {
|
||||||
|
let result = {};
|
||||||
|
try {
|
||||||
|
// perform detection
|
||||||
|
result = await human.detect(input, config);
|
||||||
|
} catch (err) {
|
||||||
|
log('Error during execution:', err.message);
|
||||||
|
}
|
||||||
|
if (result.error) log(result.error);
|
||||||
|
else drawResults(input, result, canvas);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// main processing function when input is image, can use direct invocation or web worker
|
||||||
async function processImage(input) {
|
async function processImage(input) {
|
||||||
ui.baseColor = 'rgba(200, 255, 255, 0.5)';
|
|
||||||
ui.baseLabel = 'rgba(200, 255, 255, 0.8)';
|
|
||||||
ui.baseFont = 'small-caps 3.5rem "Segoe UI"';
|
|
||||||
ui.baseLineWidth = 16;
|
|
||||||
ui.columns = 3;
|
|
||||||
const cfg = {
|
const cfg = {
|
||||||
backend: 'webgl',
|
backend: 'webgl',
|
||||||
console: true,
|
console: true,
|
||||||
|
@ -218,6 +221,7 @@ async function processImage(input) {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// just initialize everything and call main function
|
||||||
async function detectVideo() {
|
async function detectVideo() {
|
||||||
document.getElementById('samples').style.display = 'none';
|
document.getElementById('samples').style.display = 'none';
|
||||||
document.getElementById('canvas').style.display = 'block';
|
document.getElementById('canvas').style.display = 'block';
|
||||||
|
@ -236,7 +240,7 @@ async function detectVideo() {
|
||||||
runHumanDetect(video, canvas);
|
runHumanDetect(video, canvas);
|
||||||
}
|
}
|
||||||
|
|
||||||
// eslint-disable-next-line no-unused-vars
|
// just initialize everything and call main function
|
||||||
async function detectSampleImages() {
|
async function detectSampleImages() {
|
||||||
ui.baseFont = ui.baseFontProto.replace(/{size}/, `${ui.columns}rem`);
|
ui.baseFont = ui.baseFontProto.replace(/{size}/, `${ui.columns}rem`);
|
||||||
ui.baseLineHeight = ui.baseLineHeightProto * ui.columns;
|
ui.baseLineHeight = ui.baseLineHeightProto * ui.columns;
|
||||||
|
@ -246,8 +250,8 @@ async function detectSampleImages() {
|
||||||
for (const sample of ui.samples) await processImage(sample);
|
for (const sample of ui.samples) await processImage(sample);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// setup settings panel
|
||||||
function setupUI() {
|
function setupUI() {
|
||||||
// add all variables to ui control panel
|
|
||||||
settings = QuickSettings.create(10, 10, 'Settings', document.getElementById('main'));
|
settings = QuickSettings.create(10, 10, 'Settings', document.getElementById('main'));
|
||||||
const style = document.createElement('style');
|
const style = document.createElement('style');
|
||||||
style.innerHTML = `
|
style.innerHTML = `
|
||||||
|
@ -314,7 +318,6 @@ function setupUI() {
|
||||||
async function main() {
|
async function main() {
|
||||||
log('Human demo starting ...');
|
log('Human demo starting ...');
|
||||||
setupUI();
|
setupUI();
|
||||||
|
|
||||||
const msg = `Human ready: version: ${human.version} TensorFlow/JS version: ${human.tf.version_core}`;
|
const msg = `Human ready: version: ${human.version} TensorFlow/JS version: ${human.tf.version_core}`;
|
||||||
document.getElementById('log').innerText += '\n' + msg;
|
document.getElementById('log').innerText += '\n' + msg;
|
||||||
log(msg);
|
log(msg);
|
||||||
|
|
|
@ -113,8 +113,8 @@ var require_blazeface = __commonJS((exports2) => {
|
||||||
const boxIndicesTensor = await tf2.image.nonMaxSuppressionAsync(boxes, scores, this.maxFaces, this.iouThreshold, this.scoreThreshold);
|
const boxIndicesTensor = await tf2.image.nonMaxSuppressionAsync(boxes, scores, this.maxFaces, this.iouThreshold, this.scoreThreshold);
|
||||||
const boxIndices = await boxIndicesTensor.array();
|
const boxIndices = await boxIndicesTensor.array();
|
||||||
boxIndicesTensor.dispose();
|
boxIndicesTensor.dispose();
|
||||||
let boundingBoxes = boxIndices.map((boxIndex) => tf2.slice(boxes, [boxIndex, 0], [1, -1]));
|
const boundingBoxesMap = boxIndices.map((boxIndex) => tf2.slice(boxes, [boxIndex, 0], [1, -1]));
|
||||||
boundingBoxes = await Promise.all(boundingBoxes.map(async (boundingBox) => {
|
const boundingBoxes = await Promise.all(boundingBoxesMap.map(async (boundingBox) => {
|
||||||
const vals = await boundingBox.array();
|
const vals = await boundingBox.array();
|
||||||
boundingBox.dispose();
|
boundingBox.dispose();
|
||||||
return vals;
|
return vals;
|
||||||
|
@ -122,16 +122,19 @@ var require_blazeface = __commonJS((exports2) => {
|
||||||
const annotatedBoxes = [];
|
const annotatedBoxes = [];
|
||||||
for (let i = 0; i < boundingBoxes.length; i++) {
|
for (let i = 0; i < boundingBoxes.length; i++) {
|
||||||
const boundingBox = boundingBoxes[i];
|
const boundingBox = boundingBoxes[i];
|
||||||
const annotatedBox = tf2.tidy(() => {
|
|
||||||
const box = createBox(boundingBox);
|
const box = createBox(boundingBox);
|
||||||
const boxIndex = boxIndices[i];
|
const boxIndex = boxIndices[i];
|
||||||
const anchor = this.anchorsData[boxIndex];
|
const anchor = this.anchorsData[boxIndex];
|
||||||
const landmarks = tf2.slice(detectedOutputs, [boxIndex, NUM_LANDMARKS - 1], [1, -1]).squeeze().reshape([NUM_LANDMARKS, -1]);
|
const sliced = tf2.slice(detectedOutputs, [boxIndex, NUM_LANDMARKS - 1], [1, -1]);
|
||||||
|
const squeezed = sliced.squeeze();
|
||||||
|
const landmarks = squeezed.reshape([NUM_LANDMARKS, -1]);
|
||||||
const probability = tf2.slice(scores, [boxIndex], [1]);
|
const probability = tf2.slice(scores, [boxIndex], [1]);
|
||||||
return {box, landmarks, probability, anchor};
|
const annotatedBox = {box, landmarks, probability, anchor};
|
||||||
});
|
|
||||||
annotatedBoxes.push(annotatedBox);
|
annotatedBoxes.push(annotatedBox);
|
||||||
|
sliced.dispose();
|
||||||
|
squeezed.dispose();
|
||||||
}
|
}
|
||||||
|
detectedOutputs.dispose();
|
||||||
boxes.dispose();
|
boxes.dispose();
|
||||||
scores.dispose();
|
scores.dispose();
|
||||||
detectedOutputs.dispose();
|
detectedOutputs.dispose();
|
||||||
|
@ -141,12 +144,11 @@ var require_blazeface = __commonJS((exports2) => {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
async estimateFaces(input) {
|
async estimateFaces(input) {
|
||||||
const image = tf2.tidy(() => {
|
const imageRaw = !(input instanceof tf2.Tensor) ? tf2.browser.fromPixels(input) : input;
|
||||||
if (!(input instanceof tf2.Tensor)) {
|
const imageCast = imageRaw.toFloat();
|
||||||
input = tf2.browser.fromPixels(input);
|
const image = imageCast.expandDims(0);
|
||||||
}
|
imageRaw.dispose();
|
||||||
return input.toFloat().expandDims(0);
|
imageCast.dispose();
|
||||||
});
|
|
||||||
const {boxes, scaleFactor} = await this.getBoundingBoxes(image);
|
const {boxes, scaleFactor} = await this.getBoundingBoxes(image);
|
||||||
image.dispose();
|
image.dispose();
|
||||||
return Promise.all(boxes.map(async (face) => {
|
return Promise.all(boxes.map(async (face) => {
|
||||||
|
@ -172,12 +174,12 @@ var require_blazeface = __commonJS((exports2) => {
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
async function load(config2) {
|
async function load2(config2) {
|
||||||
const blazeface = await tf2.loadGraphModel(config2.detector.modelPath, {fromTFHub: config2.detector.modelPath.includes("tfhub.dev")});
|
const blazeface = await tf2.loadGraphModel(config2.detector.modelPath, {fromTFHub: config2.detector.modelPath.includes("tfhub.dev")});
|
||||||
const model = new BlazeFaceModel(blazeface, config2);
|
const model = new BlazeFaceModel(blazeface, config2);
|
||||||
return model;
|
return model;
|
||||||
}
|
}
|
||||||
exports2.load = load;
|
exports2.load = load2;
|
||||||
exports2.BlazeFaceModel = BlazeFaceModel;
|
exports2.BlazeFaceModel = BlazeFaceModel;
|
||||||
exports2.disposeBox = disposeBox;
|
exports2.disposeBox = disposeBox;
|
||||||
});
|
});
|
||||||
|
@ -530,21 +532,25 @@ var require_pipeline = __commonJS((exports2) => {
|
||||||
this.skipFrames = config2.detector.skipFrames;
|
this.skipFrames = config2.detector.skipFrames;
|
||||||
this.maxFaces = config2.detector.maxFaces;
|
this.maxFaces = config2.detector.maxFaces;
|
||||||
if (this.shouldUpdateRegionsOfInterest()) {
|
if (this.shouldUpdateRegionsOfInterest()) {
|
||||||
const {boxes, scaleFactor} = await this.boundingBoxDetector.getBoundingBoxes(input);
|
const detector = await this.boundingBoxDetector.getBoundingBoxes(input);
|
||||||
if (boxes.length === 0) {
|
if (detector.boxes.length === 0) {
|
||||||
this.regionsOfInterest = [];
|
this.regionsOfInterest = [];
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
const scaledBoxes = boxes.map((prediction) => {
|
const scaledBoxes = detector.boxes.map((prediction) => {
|
||||||
|
const startPoint = prediction.box.startPoint.squeeze();
|
||||||
|
const endPoint = prediction.box.endPoint.squeeze();
|
||||||
const predictionBox = {
|
const predictionBox = {
|
||||||
startPoint: prediction.box.startPoint.squeeze().arraySync(),
|
startPoint: startPoint.arraySync(),
|
||||||
endPoint: prediction.box.endPoint.squeeze().arraySync()
|
endPoint: endPoint.arraySync()
|
||||||
};
|
};
|
||||||
prediction.box.startPoint.dispose();
|
startPoint.dispose();
|
||||||
prediction.box.endPoint.dispose();
|
endPoint.dispose();
|
||||||
const scaledBox = bounding.scaleBoxCoordinates(predictionBox, scaleFactor);
|
const scaledBox = bounding.scaleBoxCoordinates(predictionBox, detector.scaleFactor);
|
||||||
const enlargedBox = bounding.enlargeBox(scaledBox);
|
const enlargedBox = bounding.enlargeBox(scaledBox);
|
||||||
const landmarks = prediction.landmarks.arraySync();
|
const landmarks = prediction.landmarks.arraySync();
|
||||||
|
prediction.box.startPoint.dispose();
|
||||||
|
prediction.box.endPoint.dispose();
|
||||||
prediction.landmarks.dispose();
|
prediction.landmarks.dispose();
|
||||||
prediction.probability.dispose();
|
prediction.probability.dispose();
|
||||||
return {...enlargedBox, landmarks};
|
return {...enlargedBox, landmarks};
|
||||||
|
@ -601,13 +607,15 @@ var require_pipeline = __commonJS((exports2) => {
|
||||||
const transformedCoordsData = this.transformRawCoords(rawCoords, box, angle, rotationMatrix);
|
const transformedCoordsData = this.transformRawCoords(rawCoords, box, angle, rotationMatrix);
|
||||||
tf2.dispose(rawCoords);
|
tf2.dispose(rawCoords);
|
||||||
const landmarksBox = bounding.enlargeBox(this.calculateLandmarksBoundingBox(transformedCoordsData));
|
const landmarksBox = bounding.enlargeBox(this.calculateLandmarksBoundingBox(transformedCoordsData));
|
||||||
|
const confidence = flag.squeeze();
|
||||||
|
tf2.dispose(flag);
|
||||||
if (config2.mesh.enabled) {
|
if (config2.mesh.enabled) {
|
||||||
const transformedCoords = tf2.tensor2d(transformedCoordsData);
|
const transformedCoords = tf2.tensor2d(transformedCoordsData);
|
||||||
this.regionsOfInterest[i] = {...landmarksBox, landmarks: transformedCoords.arraySync()};
|
this.regionsOfInterest[i] = {...landmarksBox, landmarks: transformedCoords.arraySync()};
|
||||||
const prediction2 = {
|
const prediction2 = {
|
||||||
coords: transformedCoords,
|
coords: transformedCoords,
|
||||||
box: landmarksBox,
|
box: landmarksBox,
|
||||||
confidence: flag.squeeze(),
|
confidence,
|
||||||
image: face
|
image: face
|
||||||
};
|
};
|
||||||
return prediction2;
|
return prediction2;
|
||||||
|
@ -615,7 +623,7 @@ var require_pipeline = __commonJS((exports2) => {
|
||||||
const prediction = {
|
const prediction = {
|
||||||
coords: null,
|
coords: null,
|
||||||
box: landmarksBox,
|
box: landmarksBox,
|
||||||
confidence: flag.squeeze(),
|
confidence,
|
||||||
image: face
|
image: face
|
||||||
};
|
};
|
||||||
return prediction;
|
return prediction;
|
||||||
|
@ -668,7 +676,7 @@ var require_pipeline = __commonJS((exports2) => {
|
||||||
const ys = landmarks.map((d) => d[1]);
|
const ys = landmarks.map((d) => d[1]);
|
||||||
const startPoint = [Math.min(...xs), Math.min(...ys)];
|
const startPoint = [Math.min(...xs), Math.min(...ys)];
|
||||||
const endPoint = [Math.max(...xs), Math.max(...ys)];
|
const endPoint = [Math.max(...xs), Math.max(...ys)];
|
||||||
return {startPoint, endPoint};
|
return {startPoint, endPoint, landmarks};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
exports2.Pipeline = Pipeline;
|
exports2.Pipeline = Pipeline;
|
||||||
|
@ -3814,11 +3822,11 @@ var require_facemesh = __commonJS((exports2) => {
|
||||||
async estimateFaces(input, config2) {
|
async estimateFaces(input, config2) {
|
||||||
if (config2)
|
if (config2)
|
||||||
this.config = config2;
|
this.config = config2;
|
||||||
const image = tf2.tidy(() => {
|
const imageRaw = !(input instanceof tf2.Tensor) ? tf2.browser.fromPixels(input) : input;
|
||||||
if (!(input instanceof tf2.Tensor))
|
const imageCast = imageRaw.toFloat();
|
||||||
input = tf2.browser.fromPixels(input);
|
const image = imageCast.expandDims(0);
|
||||||
return input.toFloat().expandDims(0);
|
imageRaw.dispose();
|
||||||
});
|
imageCast.dispose();
|
||||||
const predictions = await this.pipeline.predict(image, config2);
|
const predictions = await this.pipeline.predict(image, config2);
|
||||||
tf2.dispose(image);
|
tf2.dispose(image);
|
||||||
const results = [];
|
const results = [];
|
||||||
|
@ -3844,13 +3852,17 @@ var require_facemesh = __commonJS((exports2) => {
|
||||||
image: prediction.image ? tf2.clone(prediction.image) : null
|
image: prediction.image ? tf2.clone(prediction.image) : null
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
if (prediction.confidence)
|
||||||
prediction.confidence.dispose();
|
prediction.confidence.dispose();
|
||||||
|
if (prediction.coords)
|
||||||
|
prediction.coords.dispose();
|
||||||
|
if (prediction.image)
|
||||||
prediction.image.dispose();
|
prediction.image.dispose();
|
||||||
}
|
}
|
||||||
return results;
|
return results;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
async function load(config2) {
|
async function load2(config2) {
|
||||||
const models2 = await Promise.all([
|
const models2 = await Promise.all([
|
||||||
blazeface.load(config2),
|
blazeface.load(config2),
|
||||||
tf2.loadGraphModel(config2.mesh.modelPath, {fromTFHub: config2.mesh.modelPath.includes("tfhub.dev")}),
|
tf2.loadGraphModel(config2.mesh.modelPath, {fromTFHub: config2.mesh.modelPath.includes("tfhub.dev")}),
|
||||||
|
@ -3859,7 +3871,7 @@ var require_facemesh = __commonJS((exports2) => {
|
||||||
const faceMesh = new MediaPipeFaceMesh(models2[0], models2[1], models2[2], config2);
|
const faceMesh = new MediaPipeFaceMesh(models2[0], models2[1], models2[2], config2);
|
||||||
return faceMesh;
|
return faceMesh;
|
||||||
}
|
}
|
||||||
exports2.load = load;
|
exports2.load = load2;
|
||||||
exports2.MediaPipeFaceMesh = MediaPipeFaceMesh;
|
exports2.MediaPipeFaceMesh = MediaPipeFaceMesh;
|
||||||
exports2.uv_coords = uv_coords;
|
exports2.uv_coords = uv_coords;
|
||||||
exports2.triangulation = triangulation;
|
exports2.triangulation = triangulation;
|
||||||
|
@ -3946,7 +3958,7 @@ var require_emotion = __commonJS((exports2) => {
|
||||||
});
|
});
|
||||||
return tensor;
|
return tensor;
|
||||||
}
|
}
|
||||||
async function load(config2) {
|
async function load2(config2) {
|
||||||
if (!models2.emotion)
|
if (!models2.emotion)
|
||||||
models2.emotion = await tf2.loadGraphModel(config2.face.emotion.modelPath);
|
models2.emotion = await tf2.loadGraphModel(config2.face.emotion.modelPath);
|
||||||
return models2.emotion;
|
return models2.emotion;
|
||||||
|
@ -3988,7 +4000,7 @@ var require_emotion = __commonJS((exports2) => {
|
||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
exports2.predict = predict;
|
exports2.predict = predict;
|
||||||
exports2.load = load;
|
exports2.load = load2;
|
||||||
});
|
});
|
||||||
|
|
||||||
// src/posenet/modelBase.js
|
// src/posenet/modelBase.js
|
||||||
|
@ -4542,10 +4554,10 @@ var require_modelPoseNet = __commonJS((exports2) => {
|
||||||
const mobilenet = new modelMobileNet.MobileNet(graphModel, config2.outputStride);
|
const mobilenet = new modelMobileNet.MobileNet(graphModel, config2.outputStride);
|
||||||
return new PoseNet(mobilenet);
|
return new PoseNet(mobilenet);
|
||||||
}
|
}
|
||||||
async function load(config2) {
|
async function load2(config2) {
|
||||||
return loadMobileNet(config2);
|
return loadMobileNet(config2);
|
||||||
}
|
}
|
||||||
exports2.load = load;
|
exports2.load = load2;
|
||||||
});
|
});
|
||||||
|
|
||||||
// src/posenet/posenet.js
|
// src/posenet/posenet.js
|
||||||
|
@ -4681,19 +4693,7 @@ var require_handdetector = __commonJS((exports2) => {
|
||||||
const boxes = this.normalizeBoxes(rawBoxes);
|
const boxes = this.normalizeBoxes(rawBoxes);
|
||||||
const boxesWithHandsTensor = await tf2.image.nonMaxSuppressionAsync(boxes, scores, this.maxHands, this.iouThreshold, this.scoreThreshold);
|
const boxesWithHandsTensor = await tf2.image.nonMaxSuppressionAsync(boxes, scores, this.maxHands, this.iouThreshold, this.scoreThreshold);
|
||||||
const boxesWithHands = await boxesWithHandsTensor.array();
|
const boxesWithHands = await boxesWithHandsTensor.array();
|
||||||
const toDispose = [
|
const toDispose = [normalizedInput, batchedPrediction, boxesWithHandsTensor, prediction, boxes, rawBoxes, scores];
|
||||||
normalizedInput,
|
|
||||||
batchedPrediction,
|
|
||||||
boxesWithHandsTensor,
|
|
||||||
prediction,
|
|
||||||
boxes,
|
|
||||||
rawBoxes,
|
|
||||||
scores
|
|
||||||
];
|
|
||||||
if (boxesWithHands.length === 0) {
|
|
||||||
toDispose.forEach((tensor) => tensor.dispose());
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
const detectedHands = tf2.tidy(() => {
|
const detectedHands = tf2.tidy(() => {
|
||||||
const detectedBoxes = [];
|
const detectedBoxes = [];
|
||||||
for (const i in boxesWithHands) {
|
for (const i in boxesWithHands) {
|
||||||
|
@ -4705,6 +4705,7 @@ var require_handdetector = __commonJS((exports2) => {
|
||||||
}
|
}
|
||||||
return detectedBoxes;
|
return detectedBoxes;
|
||||||
});
|
});
|
||||||
|
toDispose.forEach((tensor) => tensor.dispose());
|
||||||
return detectedHands;
|
return detectedHands;
|
||||||
}
|
}
|
||||||
async estimateHandBounds(input, config2) {
|
async estimateHandBounds(input, config2) {
|
||||||
|
@ -5033,7 +5034,7 @@ var require_handpose = __commonJS((exports2) => {
|
||||||
}
|
}
|
||||||
return tf2.util.fetch(url).then((d) => d.json());
|
return tf2.util.fetch(url).then((d) => d.json());
|
||||||
}
|
}
|
||||||
async function load(config2) {
|
async function load2(config2) {
|
||||||
const [anchors, handDetectorModel, handPoseModel] = await Promise.all([
|
const [anchors, handDetectorModel, handPoseModel] = await Promise.all([
|
||||||
loadAnchors(config2.detector.anchors),
|
loadAnchors(config2.detector.anchors),
|
||||||
tf2.loadGraphModel(config2.detector.modelPath, {fromTFHub: config2.detector.modelPath.includes("tfhub.dev")}),
|
tf2.loadGraphModel(config2.detector.modelPath, {fromTFHub: config2.detector.modelPath.includes("tfhub.dev")}),
|
||||||
|
@ -5044,7 +5045,7 @@ var require_handpose = __commonJS((exports2) => {
|
||||||
const handpose2 = new HandPose(pipeline);
|
const handpose2 = new HandPose(pipeline);
|
||||||
return handpose2;
|
return handpose2;
|
||||||
}
|
}
|
||||||
exports2.load = load;
|
exports2.load = load2;
|
||||||
});
|
});
|
||||||
|
|
||||||
// config.js
|
// config.js
|
||||||
|
@ -5055,6 +5056,7 @@ var require_config = __commonJS((exports2) => {
|
||||||
var config_default = {
|
var config_default = {
|
||||||
backend: "webgl",
|
backend: "webgl",
|
||||||
console: true,
|
console: true,
|
||||||
|
scoped: false,
|
||||||
face: {
|
face: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
detector: {
|
detector: {
|
||||||
|
@ -5202,6 +5204,7 @@ const handpose = require_handpose();
|
||||||
const defaults = require_config().default;
|
const defaults = require_config().default;
|
||||||
const app = require_package();
|
const app = require_package();
|
||||||
let config;
|
let config;
|
||||||
|
let state = "idle";
|
||||||
const models = {
|
const models = {
|
||||||
facemesh: null,
|
facemesh: null,
|
||||||
posenet: null,
|
posenet: null,
|
||||||
|
@ -5217,9 +5220,21 @@ const now = () => {
|
||||||
return parseInt(Number(process.hrtime.bigint()) / 1e3 / 1e3);
|
return parseInt(Number(process.hrtime.bigint()) / 1e3 / 1e3);
|
||||||
};
|
};
|
||||||
const log = (...msg) => {
|
const log = (...msg) => {
|
||||||
if (config.console)
|
if (msg && config.console)
|
||||||
console.log(...msg);
|
console.log(...msg);
|
||||||
};
|
};
|
||||||
|
let numTensors = 0;
|
||||||
|
const analyzeMemoryLeaks = false;
|
||||||
|
const analyze = (...msg) => {
|
||||||
|
if (!analyzeMemoryLeaks)
|
||||||
|
return;
|
||||||
|
const current = tf.engine().state.numTensors;
|
||||||
|
const previous = numTensors;
|
||||||
|
numTensors = current;
|
||||||
|
const leaked = current - previous;
|
||||||
|
if (leaked !== 0)
|
||||||
|
log(...msg, leaked);
|
||||||
|
};
|
||||||
function mergeDeep(...objects) {
|
function mergeDeep(...objects) {
|
||||||
const isObject = (obj) => obj && typeof obj === "object";
|
const isObject = (obj) => obj && typeof obj === "object";
|
||||||
return objects.reduce((prev, obj) => {
|
return objects.reduce((prev, obj) => {
|
||||||
|
@ -5252,22 +5267,9 @@ function sanity(input) {
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
async function detect(input, userConfig) {
|
async function load(userConfig) {
|
||||||
|
if (userConfig)
|
||||||
config = mergeDeep(defaults, userConfig);
|
config = mergeDeep(defaults, userConfig);
|
||||||
const error = sanity(input);
|
|
||||||
if (error) {
|
|
||||||
log(error, input);
|
|
||||||
return {error};
|
|
||||||
}
|
|
||||||
return new Promise(async (resolve) => {
|
|
||||||
const loadedModels = Object.values(models).filter((a) => a).length;
|
|
||||||
if (loadedModels === 0)
|
|
||||||
log("Human library starting");
|
|
||||||
if (tf.getBackend() !== config.backend) {
|
|
||||||
log("Human library setting backend:", config.backend);
|
|
||||||
await tf.setBackend(config.backend);
|
|
||||||
await tf.ready();
|
|
||||||
}
|
|
||||||
if (config.face.enabled && !models.facemesh)
|
if (config.face.enabled && !models.facemesh)
|
||||||
models.facemesh = await facemesh.load(config.face);
|
models.facemesh = await facemesh.load(config.face);
|
||||||
if (config.body.enabled && !models.posenet)
|
if (config.body.enabled && !models.posenet)
|
||||||
|
@ -5280,18 +5282,50 @@ async function detect(input, userConfig) {
|
||||||
models.gender = await ssrnet.loadGender(config);
|
models.gender = await ssrnet.loadGender(config);
|
||||||
if (config.face.enabled && config.face.emotion.enabled && !models.emotion)
|
if (config.face.enabled && config.face.emotion.enabled && !models.emotion)
|
||||||
models.emotion = await emotion.load(config);
|
models.emotion = await emotion.load(config);
|
||||||
|
}
|
||||||
|
async function detect(input, userConfig = {}) {
|
||||||
|
state = "config";
|
||||||
|
config = mergeDeep(defaults, userConfig);
|
||||||
|
state = "check";
|
||||||
|
const error = sanity(input);
|
||||||
|
if (error) {
|
||||||
|
log(error, input);
|
||||||
|
return {error};
|
||||||
|
}
|
||||||
|
return new Promise(async (resolve) => {
|
||||||
|
const loadedModels = Object.values(models).filter((a) => a).length;
|
||||||
|
if (loadedModels === 0)
|
||||||
|
log("Human library starting");
|
||||||
|
if (tf.getBackend() !== config.backend) {
|
||||||
|
state = "backend";
|
||||||
|
log("Human library setting backend:", config.backend);
|
||||||
|
await tf.setBackend(config.backend);
|
||||||
|
await tf.ready();
|
||||||
|
}
|
||||||
|
state = "load";
|
||||||
|
await load();
|
||||||
const perf = {};
|
const perf = {};
|
||||||
let timeStamp;
|
let timeStamp;
|
||||||
|
if (config.scoped)
|
||||||
tf.engine().startScope();
|
tf.engine().startScope();
|
||||||
|
analyze("Start Detect:");
|
||||||
|
state = "run:body";
|
||||||
timeStamp = now();
|
timeStamp = now();
|
||||||
|
analyze("Start PoseNet");
|
||||||
const poseRes = config.body.enabled ? await models.posenet.estimatePoses(input, config.body) : [];
|
const poseRes = config.body.enabled ? await models.posenet.estimatePoses(input, config.body) : [];
|
||||||
|
analyze("End PoseNet:");
|
||||||
perf.body = Math.trunc(now() - timeStamp);
|
perf.body = Math.trunc(now() - timeStamp);
|
||||||
|
state = "run:hand";
|
||||||
timeStamp = now();
|
timeStamp = now();
|
||||||
|
analyze("Start HandPose:");
|
||||||
const handRes = config.hand.enabled ? await models.handpose.estimateHands(input, config.hand) : [];
|
const handRes = config.hand.enabled ? await models.handpose.estimateHands(input, config.hand) : [];
|
||||||
|
analyze("End HandPose:");
|
||||||
perf.hand = Math.trunc(now() - timeStamp);
|
perf.hand = Math.trunc(now() - timeStamp);
|
||||||
const faceRes = [];
|
const faceRes = [];
|
||||||
if (config.face.enabled) {
|
if (config.face.enabled) {
|
||||||
|
state = "run:face";
|
||||||
timeStamp = now();
|
timeStamp = now();
|
||||||
|
analyze("Start FaceMesh:");
|
||||||
const faces = await models.facemesh.estimateFaces(input, config.face);
|
const faces = await models.facemesh.estimateFaces(input, config.face);
|
||||||
perf.face = Math.trunc(now() - timeStamp);
|
perf.face = Math.trunc(now() - timeStamp);
|
||||||
for (const face of faces) {
|
for (const face of faces) {
|
||||||
|
@ -5299,13 +5333,16 @@ async function detect(input, userConfig) {
|
||||||
log("face object is disposed:", face.image);
|
log("face object is disposed:", face.image);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
state = "run:agegender";
|
||||||
timeStamp = now();
|
timeStamp = now();
|
||||||
const ssrData = config.face.age.enabled || config.face.gender.enabled ? await ssrnet.predict(face.image, config) : {};
|
const ssrData = config.face.age.enabled || config.face.gender.enabled ? await ssrnet.predict(face.image, config) : {};
|
||||||
perf.agegender = Math.trunc(now() - timeStamp);
|
perf.agegender = Math.trunc(now() - timeStamp);
|
||||||
|
state = "run:emotion";
|
||||||
timeStamp = now();
|
timeStamp = now();
|
||||||
const emotionData = config.face.emotion.enabled ? await emotion.predict(face.image, config) : {};
|
const emotionData = config.face.emotion.enabled ? await emotion.predict(face.image, config) : {};
|
||||||
perf.emotion = Math.trunc(now() - timeStamp);
|
perf.emotion = Math.trunc(now() - timeStamp);
|
||||||
face.image.dispose();
|
face.image.dispose();
|
||||||
|
delete face.image;
|
||||||
const iris = face.annotations.leftEyeIris && face.annotations.rightEyeIris ? Math.max(face.annotations.leftEyeIris[3][0] - face.annotations.leftEyeIris[1][0], face.annotations.rightEyeIris[3][0] - face.annotations.rightEyeIris[1][0]) : 0;
|
const iris = face.annotations.leftEyeIris && face.annotations.rightEyeIris ? Math.max(face.annotations.leftEyeIris[3][0] - face.annotations.leftEyeIris[1][0], face.annotations.rightEyeIris[3][0] - face.annotations.rightEyeIris[1][0]) : 0;
|
||||||
faceRes.push({
|
faceRes.push({
|
||||||
confidence: face.confidence,
|
confidence: face.confidence,
|
||||||
|
@ -5319,8 +5356,12 @@ async function detect(input, userConfig) {
|
||||||
iris: iris !== 0 ? Math.trunc(100 * 11.7 / iris) / 100 : 0
|
iris: iris !== 0 ? Math.trunc(100 * 11.7 / iris) / 100 : 0
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
analyze("End FaceMesh:");
|
||||||
}
|
}
|
||||||
|
state = "idle";
|
||||||
|
if (config.scoped)
|
||||||
tf.engine().endScope();
|
tf.engine().endScope();
|
||||||
|
analyze("End Scope:");
|
||||||
perf.total = Object.values(perf).reduce((a, b) => a + b);
|
perf.total = Object.values(perf).reduce((a, b) => a + b);
|
||||||
resolve({face: faceRes, body: poseRes, hand: handRes, performance: perf});
|
resolve({face: faceRes, body: poseRes, hand: handRes, performance: perf});
|
||||||
});
|
});
|
||||||
|
@ -5335,4 +5376,5 @@ exports.posenet = posenet;
|
||||||
exports.handpose = handpose;
|
exports.handpose = handpose;
|
||||||
exports.tf = tf;
|
exports.tf = tf;
|
||||||
exports.version = app.version;
|
exports.version = app.version;
|
||||||
|
exports.state = state;
|
||||||
//# sourceMappingURL=human.cjs.map
|
//# sourceMappingURL=human.cjs.map
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
{
|
{
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"config.js": {
|
"config.js": {
|
||||||
"bytes": 4536,
|
"bytes": 4774,
|
||||||
"imports": []
|
"imports": []
|
||||||
},
|
},
|
||||||
"package.json": {
|
"package.json": {
|
||||||
|
@ -13,7 +13,7 @@
|
||||||
"imports": []
|
"imports": []
|
||||||
},
|
},
|
||||||
"src/facemesh/blazeface.js": {
|
"src/facemesh/blazeface.js": {
|
||||||
"bytes": 7042,
|
"bytes": 7407,
|
||||||
"imports": []
|
"imports": []
|
||||||
},
|
},
|
||||||
"src/facemesh/box.js": {
|
"src/facemesh/box.js": {
|
||||||
|
@ -21,7 +21,7 @@
|
||||||
"imports": []
|
"imports": []
|
||||||
},
|
},
|
||||||
"src/facemesh/facemesh.js": {
|
"src/facemesh/facemesh.js": {
|
||||||
"bytes": 2649,
|
"bytes": 2816,
|
||||||
"imports": [
|
"imports": [
|
||||||
{
|
{
|
||||||
"path": "src/facemesh/blazeface.js"
|
"path": "src/facemesh/blazeface.js"
|
||||||
|
@ -45,7 +45,7 @@
|
||||||
"imports": []
|
"imports": []
|
||||||
},
|
},
|
||||||
"src/facemesh/pipeline.js": {
|
"src/facemesh/pipeline.js": {
|
||||||
"bytes": 14108,
|
"bytes": 14393,
|
||||||
"imports": [
|
"imports": [
|
||||||
{
|
{
|
||||||
"path": "src/facemesh/box.js"
|
"path": "src/facemesh/box.js"
|
||||||
|
@ -75,7 +75,7 @@
|
||||||
"imports": []
|
"imports": []
|
||||||
},
|
},
|
||||||
"src/handpose/handdetector.js": {
|
"src/handpose/handdetector.js": {
|
||||||
"bytes": 4253,
|
"bytes": 4296,
|
||||||
"imports": [
|
"imports": [
|
||||||
{
|
{
|
||||||
"path": "src/handpose/box.js"
|
"path": "src/handpose/box.js"
|
||||||
|
@ -116,7 +116,7 @@
|
||||||
"imports": []
|
"imports": []
|
||||||
},
|
},
|
||||||
"src/index.js": {
|
"src/index.js": {
|
||||||
"bytes": 6474,
|
"bytes": 7175,
|
||||||
"imports": [
|
"imports": [
|
||||||
{
|
{
|
||||||
"path": "src/facemesh/facemesh.js"
|
"path": "src/facemesh/facemesh.js"
|
||||||
|
@ -253,13 +253,13 @@
|
||||||
"dist/human.cjs.map": {
|
"dist/human.cjs.map": {
|
||||||
"imports": [],
|
"imports": [],
|
||||||
"inputs": {},
|
"inputs": {},
|
||||||
"bytes": 216628
|
"bytes": 219147
|
||||||
},
|
},
|
||||||
"dist/human.cjs": {
|
"dist/human.cjs": {
|
||||||
"imports": [],
|
"imports": [],
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"src/facemesh/blazeface.js": {
|
"src/facemesh/blazeface.js": {
|
||||||
"bytesInOutput": 7246
|
"bytesInOutput": 7398
|
||||||
},
|
},
|
||||||
"src/facemesh/keypoints.js": {
|
"src/facemesh/keypoints.js": {
|
||||||
"bytesInOutput": 2771
|
"bytesInOutput": 2771
|
||||||
|
@ -271,7 +271,7 @@
|
||||||
"bytesInOutput": 3027
|
"bytesInOutput": 3027
|
||||||
},
|
},
|
||||||
"src/facemesh/pipeline.js": {
|
"src/facemesh/pipeline.js": {
|
||||||
"bytesInOutput": 13162
|
"bytesInOutput": 13366
|
||||||
},
|
},
|
||||||
"src/facemesh/uvcoords.js": {
|
"src/facemesh/uvcoords.js": {
|
||||||
"bytesInOutput": 20586
|
"bytesInOutput": 20586
|
||||||
|
@ -280,13 +280,13 @@
|
||||||
"bytesInOutput": 23311
|
"bytesInOutput": 23311
|
||||||
},
|
},
|
||||||
"src/facemesh/facemesh.js": {
|
"src/facemesh/facemesh.js": {
|
||||||
"bytesInOutput": 2758
|
"bytesInOutput": 2950
|
||||||
},
|
},
|
||||||
"src/ssrnet/ssrnet.js": {
|
"src/ssrnet/ssrnet.js": {
|
||||||
"bytesInOutput": 2068
|
"bytesInOutput": 2068
|
||||||
},
|
},
|
||||||
"src/emotion/emotion.js": {
|
"src/emotion/emotion.js": {
|
||||||
"bytesInOutput": 2132
|
"bytesInOutput": 2134
|
||||||
},
|
},
|
||||||
"src/posenet/modelBase.js": {
|
"src/posenet/modelBase.js": {
|
||||||
"bytesInOutput": 1120
|
"bytesInOutput": 1120
|
||||||
|
@ -316,7 +316,7 @@
|
||||||
"bytesInOutput": 4383
|
"bytesInOutput": 4383
|
||||||
},
|
},
|
||||||
"src/posenet/modelPoseNet.js": {
|
"src/posenet/modelPoseNet.js": {
|
||||||
"bytesInOutput": 1974
|
"bytesInOutput": 1976
|
||||||
},
|
},
|
||||||
"src/posenet/posenet.js": {
|
"src/posenet/posenet.js": {
|
||||||
"bytesInOutput": 917
|
"bytesInOutput": 917
|
||||||
|
@ -325,7 +325,7 @@
|
||||||
"bytesInOutput": 2813
|
"bytesInOutput": 2813
|
||||||
},
|
},
|
||||||
"src/handpose/handdetector.js": {
|
"src/handpose/handdetector.js": {
|
||||||
"bytesInOutput": 4271
|
"bytesInOutput": 4135
|
||||||
},
|
},
|
||||||
"src/handpose/keypoints.js": {
|
"src/handpose/keypoints.js": {
|
||||||
"bytesInOutput": 265
|
"bytesInOutput": 265
|
||||||
|
@ -337,19 +337,19 @@
|
||||||
"bytesInOutput": 7651
|
"bytesInOutput": 7651
|
||||||
},
|
},
|
||||||
"src/handpose/handpose.js": {
|
"src/handpose/handpose.js": {
|
||||||
"bytesInOutput": 2516
|
"bytesInOutput": 2518
|
||||||
},
|
},
|
||||||
"config.js": {
|
"config.js": {
|
||||||
"bytesInOutput": 1853
|
"bytesInOutput": 1872
|
||||||
},
|
},
|
||||||
"package.json": {
|
"package.json": {
|
||||||
"bytesInOutput": 2748
|
"bytesInOutput": 2748
|
||||||
},
|
},
|
||||||
"src/index.js": {
|
"src/index.js": {
|
||||||
"bytesInOutput": 5148
|
"bytesInOutput": 6171
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"bytes": 132178
|
"bytes": 133638
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -1,7 +1,7 @@
|
||||||
{
|
{
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"config.js": {
|
"config.js": {
|
||||||
"bytes": 4536,
|
"bytes": 4774,
|
||||||
"imports": []
|
"imports": []
|
||||||
},
|
},
|
||||||
"package.json": {
|
"package.json": {
|
||||||
|
@ -13,7 +13,7 @@
|
||||||
"imports": []
|
"imports": []
|
||||||
},
|
},
|
||||||
"src/facemesh/blazeface.js": {
|
"src/facemesh/blazeface.js": {
|
||||||
"bytes": 7042,
|
"bytes": 7407,
|
||||||
"imports": []
|
"imports": []
|
||||||
},
|
},
|
||||||
"src/facemesh/box.js": {
|
"src/facemesh/box.js": {
|
||||||
|
@ -21,7 +21,7 @@
|
||||||
"imports": []
|
"imports": []
|
||||||
},
|
},
|
||||||
"src/facemesh/facemesh.js": {
|
"src/facemesh/facemesh.js": {
|
||||||
"bytes": 2649,
|
"bytes": 2816,
|
||||||
"imports": [
|
"imports": [
|
||||||
{
|
{
|
||||||
"path": "src/facemesh/blazeface.js"
|
"path": "src/facemesh/blazeface.js"
|
||||||
|
@ -45,7 +45,7 @@
|
||||||
"imports": []
|
"imports": []
|
||||||
},
|
},
|
||||||
"src/facemesh/pipeline.js": {
|
"src/facemesh/pipeline.js": {
|
||||||
"bytes": 14108,
|
"bytes": 14393,
|
||||||
"imports": [
|
"imports": [
|
||||||
{
|
{
|
||||||
"path": "src/facemesh/box.js"
|
"path": "src/facemesh/box.js"
|
||||||
|
@ -75,7 +75,7 @@
|
||||||
"imports": []
|
"imports": []
|
||||||
},
|
},
|
||||||
"src/handpose/handdetector.js": {
|
"src/handpose/handdetector.js": {
|
||||||
"bytes": 4253,
|
"bytes": 4296,
|
||||||
"imports": [
|
"imports": [
|
||||||
{
|
{
|
||||||
"path": "src/handpose/box.js"
|
"path": "src/handpose/box.js"
|
||||||
|
@ -116,7 +116,7 @@
|
||||||
"imports": []
|
"imports": []
|
||||||
},
|
},
|
||||||
"src/index.js": {
|
"src/index.js": {
|
||||||
"bytes": 6474,
|
"bytes": 7175,
|
||||||
"imports": [
|
"imports": [
|
||||||
{
|
{
|
||||||
"path": "src/facemesh/facemesh.js"
|
"path": "src/facemesh/facemesh.js"
|
||||||
|
@ -253,13 +253,13 @@
|
||||||
"dist/human.esm-nobundle.js.map": {
|
"dist/human.esm-nobundle.js.map": {
|
||||||
"imports": [],
|
"imports": [],
|
||||||
"inputs": {},
|
"inputs": {},
|
||||||
"bytes": 194920
|
"bytes": 197443
|
||||||
},
|
},
|
||||||
"dist/human.esm-nobundle.js": {
|
"dist/human.esm-nobundle.js": {
|
||||||
"imports": [],
|
"imports": [],
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"src/facemesh/blazeface.js": {
|
"src/facemesh/blazeface.js": {
|
||||||
"bytesInOutput": 3223
|
"bytesInOutput": 3255
|
||||||
},
|
},
|
||||||
"src/facemesh/keypoints.js": {
|
"src/facemesh/keypoints.js": {
|
||||||
"bytesInOutput": 1950
|
"bytesInOutput": 1950
|
||||||
|
@ -271,7 +271,7 @@
|
||||||
"bytesInOutput": 1176
|
"bytesInOutput": 1176
|
||||||
},
|
},
|
||||||
"src/facemesh/pipeline.js": {
|
"src/facemesh/pipeline.js": {
|
||||||
"bytesInOutput": 5541
|
"bytesInOutput": 5602
|
||||||
},
|
},
|
||||||
"src/facemesh/uvcoords.js": {
|
"src/facemesh/uvcoords.js": {
|
||||||
"bytesInOutput": 16790
|
"bytesInOutput": 16790
|
||||||
|
@ -280,7 +280,7 @@
|
||||||
"bytesInOutput": 9995
|
"bytesInOutput": 9995
|
||||||
},
|
},
|
||||||
"src/facemesh/facemesh.js": {
|
"src/facemesh/facemesh.js": {
|
||||||
"bytesInOutput": 1320
|
"bytesInOutput": 1391
|
||||||
},
|
},
|
||||||
"src/ssrnet/ssrnet.js": {
|
"src/ssrnet/ssrnet.js": {
|
||||||
"bytesInOutput": 1099
|
"bytesInOutput": 1099
|
||||||
|
@ -325,31 +325,31 @@
|
||||||
"bytesInOutput": 1400
|
"bytesInOutput": 1400
|
||||||
},
|
},
|
||||||
"src/handpose/handdetector.js": {
|
"src/handpose/handdetector.js": {
|
||||||
"bytesInOutput": 2074
|
"bytesInOutput": 2040
|
||||||
},
|
},
|
||||||
"src/handpose/keypoints.js": {
|
"src/handpose/keypoints.js": {
|
||||||
"bytesInOutput": 160
|
"bytesInOutput": 160
|
||||||
},
|
},
|
||||||
"src/handpose/util.js": {
|
"src/handpose/util.js": {
|
||||||
"bytesInOutput": 977
|
"bytesInOutput": 984
|
||||||
},
|
},
|
||||||
"src/handpose/pipeline.js": {
|
"src/handpose/pipeline.js": {
|
||||||
"bytesInOutput": 3230
|
"bytesInOutput": 3232
|
||||||
},
|
},
|
||||||
"src/handpose/handpose.js": {
|
"src/handpose/handpose.js": {
|
||||||
"bytesInOutput": 1326
|
"bytesInOutput": 1326
|
||||||
},
|
},
|
||||||
"config.js": {
|
"config.js": {
|
||||||
"bytesInOutput": 1136
|
"bytesInOutput": 1146
|
||||||
},
|
},
|
||||||
"package.json": {
|
"package.json": {
|
||||||
"bytesInOutput": 2275
|
"bytesInOutput": 2275
|
||||||
},
|
},
|
||||||
"src/index.js": {
|
"src/index.js": {
|
||||||
"bytesInOutput": 2904
|
"bytesInOutput": 3410
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"bytes": 68538
|
"bytes": 69193
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -1,7 +1,7 @@
|
||||||
{
|
{
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"config.js": {
|
"config.js": {
|
||||||
"bytes": 4536,
|
"bytes": 4774,
|
||||||
"imports": []
|
"imports": []
|
||||||
},
|
},
|
||||||
"node_modules/@tensorflow/tfjs-backend-cpu/dist/tf-backend-cpu.node.js": {
|
"node_modules/@tensorflow/tfjs-backend-cpu/dist/tf-backend-cpu.node.js": {
|
||||||
|
@ -161,7 +161,7 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"src/facemesh/blazeface.js": {
|
"src/facemesh/blazeface.js": {
|
||||||
"bytes": 7042,
|
"bytes": 7407,
|
||||||
"imports": [
|
"imports": [
|
||||||
{
|
{
|
||||||
"path": "node_modules/@tensorflow/tfjs/dist/tf.node.js"
|
"path": "node_modules/@tensorflow/tfjs/dist/tf.node.js"
|
||||||
|
@ -177,7 +177,7 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"src/facemesh/facemesh.js": {
|
"src/facemesh/facemesh.js": {
|
||||||
"bytes": 2649,
|
"bytes": 2816,
|
||||||
"imports": [
|
"imports": [
|
||||||
{
|
{
|
||||||
"path": "node_modules/@tensorflow/tfjs/dist/tf.node.js"
|
"path": "node_modules/@tensorflow/tfjs/dist/tf.node.js"
|
||||||
|
@ -204,7 +204,7 @@
|
||||||
"imports": []
|
"imports": []
|
||||||
},
|
},
|
||||||
"src/facemesh/pipeline.js": {
|
"src/facemesh/pipeline.js": {
|
||||||
"bytes": 14108,
|
"bytes": 14393,
|
||||||
"imports": [
|
"imports": [
|
||||||
{
|
{
|
||||||
"path": "node_modules/@tensorflow/tfjs/dist/tf.node.js"
|
"path": "node_modules/@tensorflow/tfjs/dist/tf.node.js"
|
||||||
|
@ -241,7 +241,7 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"src/handpose/handdetector.js": {
|
"src/handpose/handdetector.js": {
|
||||||
"bytes": 4253,
|
"bytes": 4296,
|
||||||
"imports": [
|
"imports": [
|
||||||
{
|
{
|
||||||
"path": "node_modules/@tensorflow/tfjs/dist/tf.node.js"
|
"path": "node_modules/@tensorflow/tfjs/dist/tf.node.js"
|
||||||
|
@ -291,7 +291,7 @@
|
||||||
"imports": []
|
"imports": []
|
||||||
},
|
},
|
||||||
"src/index.js": {
|
"src/index.js": {
|
||||||
"bytes": 6474,
|
"bytes": 7175,
|
||||||
"imports": [
|
"imports": [
|
||||||
{
|
{
|
||||||
"path": "node_modules/@tensorflow/tfjs/dist/tf.node.js"
|
"path": "node_modules/@tensorflow/tfjs/dist/tf.node.js"
|
||||||
|
@ -464,7 +464,7 @@
|
||||||
"dist/human.esm.js.map": {
|
"dist/human.esm.js.map": {
|
||||||
"imports": [],
|
"imports": [],
|
||||||
"inputs": {},
|
"inputs": {},
|
||||||
"bytes": 4955971
|
"bytes": 4958494
|
||||||
},
|
},
|
||||||
"dist/human.esm.js": {
|
"dist/human.esm.js": {
|
||||||
"imports": [],
|
"imports": [],
|
||||||
|
@ -527,7 +527,7 @@
|
||||||
"bytesInOutput": 765
|
"bytesInOutput": 765
|
||||||
},
|
},
|
||||||
"src/facemesh/blazeface.js": {
|
"src/facemesh/blazeface.js": {
|
||||||
"bytesInOutput": 3238
|
"bytesInOutput": 3268
|
||||||
},
|
},
|
||||||
"src/facemesh/keypoints.js": {
|
"src/facemesh/keypoints.js": {
|
||||||
"bytesInOutput": 1951
|
"bytesInOutput": 1951
|
||||||
|
@ -539,7 +539,7 @@
|
||||||
"bytesInOutput": 1195
|
"bytesInOutput": 1195
|
||||||
},
|
},
|
||||||
"src/facemesh/pipeline.js": {
|
"src/facemesh/pipeline.js": {
|
||||||
"bytesInOutput": 5520
|
"bytesInOutput": 5577
|
||||||
},
|
},
|
||||||
"src/facemesh/uvcoords.js": {
|
"src/facemesh/uvcoords.js": {
|
||||||
"bytesInOutput": 16791
|
"bytesInOutput": 16791
|
||||||
|
@ -548,7 +548,7 @@
|
||||||
"bytesInOutput": 9996
|
"bytesInOutput": 9996
|
||||||
},
|
},
|
||||||
"src/facemesh/facemesh.js": {
|
"src/facemesh/facemesh.js": {
|
||||||
"bytesInOutput": 1306
|
"bytesInOutput": 1376
|
||||||
},
|
},
|
||||||
"src/ssrnet/ssrnet.js": {
|
"src/ssrnet/ssrnet.js": {
|
||||||
"bytesInOutput": 1100
|
"bytesInOutput": 1100
|
||||||
|
@ -593,7 +593,7 @@
|
||||||
"bytesInOutput": 1386
|
"bytesInOutput": 1386
|
||||||
},
|
},
|
||||||
"src/handpose/handdetector.js": {
|
"src/handpose/handdetector.js": {
|
||||||
"bytesInOutput": 2084
|
"bytesInOutput": 2050
|
||||||
},
|
},
|
||||||
"src/handpose/keypoints.js": {
|
"src/handpose/keypoints.js": {
|
||||||
"bytesInOutput": 161
|
"bytesInOutput": 161
|
||||||
|
@ -608,16 +608,16 @@
|
||||||
"bytesInOutput": 1312
|
"bytesInOutput": 1312
|
||||||
},
|
},
|
||||||
"config.js": {
|
"config.js": {
|
||||||
"bytesInOutput": 1137
|
"bytesInOutput": 1147
|
||||||
},
|
},
|
||||||
"package.json": {
|
"package.json": {
|
||||||
"bytesInOutput": 2276
|
"bytesInOutput": 2276
|
||||||
},
|
},
|
||||||
"src/index.js": {
|
"src/index.js": {
|
||||||
"bytesInOutput": 2963
|
"bytesInOutput": 3495
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"bytes": 1105435
|
"bytes": 1106100
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -1,7 +1,7 @@
|
||||||
{
|
{
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"config.js": {
|
"config.js": {
|
||||||
"bytes": 4536,
|
"bytes": 4774,
|
||||||
"imports": []
|
"imports": []
|
||||||
},
|
},
|
||||||
"node_modules/@tensorflow/tfjs-backend-cpu/dist/tf-backend-cpu.node.js": {
|
"node_modules/@tensorflow/tfjs-backend-cpu/dist/tf-backend-cpu.node.js": {
|
||||||
|
@ -161,7 +161,7 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"src/facemesh/blazeface.js": {
|
"src/facemesh/blazeface.js": {
|
||||||
"bytes": 7042,
|
"bytes": 7407,
|
||||||
"imports": [
|
"imports": [
|
||||||
{
|
{
|
||||||
"path": "node_modules/@tensorflow/tfjs/dist/tf.node.js"
|
"path": "node_modules/@tensorflow/tfjs/dist/tf.node.js"
|
||||||
|
@ -177,7 +177,7 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"src/facemesh/facemesh.js": {
|
"src/facemesh/facemesh.js": {
|
||||||
"bytes": 2649,
|
"bytes": 2816,
|
||||||
"imports": [
|
"imports": [
|
||||||
{
|
{
|
||||||
"path": "node_modules/@tensorflow/tfjs/dist/tf.node.js"
|
"path": "node_modules/@tensorflow/tfjs/dist/tf.node.js"
|
||||||
|
@ -204,7 +204,7 @@
|
||||||
"imports": []
|
"imports": []
|
||||||
},
|
},
|
||||||
"src/facemesh/pipeline.js": {
|
"src/facemesh/pipeline.js": {
|
||||||
"bytes": 14108,
|
"bytes": 14393,
|
||||||
"imports": [
|
"imports": [
|
||||||
{
|
{
|
||||||
"path": "node_modules/@tensorflow/tfjs/dist/tf.node.js"
|
"path": "node_modules/@tensorflow/tfjs/dist/tf.node.js"
|
||||||
|
@ -241,7 +241,7 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"src/handpose/handdetector.js": {
|
"src/handpose/handdetector.js": {
|
||||||
"bytes": 4253,
|
"bytes": 4296,
|
||||||
"imports": [
|
"imports": [
|
||||||
{
|
{
|
||||||
"path": "node_modules/@tensorflow/tfjs/dist/tf.node.js"
|
"path": "node_modules/@tensorflow/tfjs/dist/tf.node.js"
|
||||||
|
@ -291,7 +291,7 @@
|
||||||
"imports": []
|
"imports": []
|
||||||
},
|
},
|
||||||
"src/index.js": {
|
"src/index.js": {
|
||||||
"bytes": 6474,
|
"bytes": 7175,
|
||||||
"imports": [
|
"imports": [
|
||||||
{
|
{
|
||||||
"path": "node_modules/@tensorflow/tfjs/dist/tf.node.js"
|
"path": "node_modules/@tensorflow/tfjs/dist/tf.node.js"
|
||||||
|
@ -464,7 +464,7 @@
|
||||||
"dist/human.js.map": {
|
"dist/human.js.map": {
|
||||||
"imports": [],
|
"imports": [],
|
||||||
"inputs": {},
|
"inputs": {},
|
||||||
"bytes": 4955971
|
"bytes": 4958494
|
||||||
},
|
},
|
||||||
"dist/human.js": {
|
"dist/human.js": {
|
||||||
"imports": [],
|
"imports": [],
|
||||||
|
@ -527,7 +527,7 @@
|
||||||
"bytesInOutput": 765
|
"bytesInOutput": 765
|
||||||
},
|
},
|
||||||
"src/facemesh/blazeface.js": {
|
"src/facemesh/blazeface.js": {
|
||||||
"bytesInOutput": 3238
|
"bytesInOutput": 3268
|
||||||
},
|
},
|
||||||
"src/facemesh/keypoints.js": {
|
"src/facemesh/keypoints.js": {
|
||||||
"bytesInOutput": 1951
|
"bytesInOutput": 1951
|
||||||
|
@ -539,7 +539,7 @@
|
||||||
"bytesInOutput": 1195
|
"bytesInOutput": 1195
|
||||||
},
|
},
|
||||||
"src/facemesh/pipeline.js": {
|
"src/facemesh/pipeline.js": {
|
||||||
"bytesInOutput": 5520
|
"bytesInOutput": 5577
|
||||||
},
|
},
|
||||||
"src/facemesh/uvcoords.js": {
|
"src/facemesh/uvcoords.js": {
|
||||||
"bytesInOutput": 16791
|
"bytesInOutput": 16791
|
||||||
|
@ -548,7 +548,7 @@
|
||||||
"bytesInOutput": 9996
|
"bytesInOutput": 9996
|
||||||
},
|
},
|
||||||
"src/facemesh/facemesh.js": {
|
"src/facemesh/facemesh.js": {
|
||||||
"bytesInOutput": 1306
|
"bytesInOutput": 1376
|
||||||
},
|
},
|
||||||
"src/ssrnet/ssrnet.js": {
|
"src/ssrnet/ssrnet.js": {
|
||||||
"bytesInOutput": 1100
|
"bytesInOutput": 1100
|
||||||
|
@ -593,7 +593,7 @@
|
||||||
"bytesInOutput": 1386
|
"bytesInOutput": 1386
|
||||||
},
|
},
|
||||||
"src/handpose/handdetector.js": {
|
"src/handpose/handdetector.js": {
|
||||||
"bytesInOutput": 2084
|
"bytesInOutput": 2050
|
||||||
},
|
},
|
||||||
"src/handpose/keypoints.js": {
|
"src/handpose/keypoints.js": {
|
||||||
"bytesInOutput": 161
|
"bytesInOutput": 161
|
||||||
|
@ -608,16 +608,16 @@
|
||||||
"bytesInOutput": 1312
|
"bytesInOutput": 1312
|
||||||
},
|
},
|
||||||
"config.js": {
|
"config.js": {
|
||||||
"bytesInOutput": 1137
|
"bytesInOutput": 1147
|
||||||
},
|
},
|
||||||
"package.json": {
|
"package.json": {
|
||||||
"bytesInOutput": 2276
|
"bytesInOutput": 2276
|
||||||
},
|
},
|
||||||
"src/index.js": {
|
"src/index.js": {
|
||||||
"bytesInOutput": 2963
|
"bytesInOutput": 3495
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"bytes": 1105444
|
"bytes": 1106109
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -78,6 +78,7 @@ class BlazeFaceModel {
|
||||||
this.scoreThreshold = config.detector.scoreThreshold;
|
this.scoreThreshold = config.detector.scoreThreshold;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// toto blazeface leaks two tensors per run
|
||||||
async getBoundingBoxes(inputImage) {
|
async getBoundingBoxes(inputImage) {
|
||||||
// sanity check on input
|
// sanity check on input
|
||||||
if ((!inputImage) || (inputImage.isDisposedInternal) || (inputImage.shape.length !== 4) || (inputImage.shape[1] < 1) || (inputImage.shape[2] < 1)) return null;
|
if ((!inputImage) || (inputImage.isDisposedInternal) || (inputImage.shape.length !== 4) || (inputImage.shape[1] < 1) || (inputImage.shape[2] < 1)) return null;
|
||||||
|
@ -101,12 +102,11 @@ class BlazeFaceModel {
|
||||||
const scoresOut = tf.sigmoid(logits).squeeze();
|
const scoresOut = tf.sigmoid(logits).squeeze();
|
||||||
return [prediction, decodedBounds, scoresOut];
|
return [prediction, decodedBounds, scoresOut];
|
||||||
});
|
});
|
||||||
|
|
||||||
const boxIndicesTensor = await tf.image.nonMaxSuppressionAsync(boxes, scores, this.maxFaces, this.iouThreshold, this.scoreThreshold);
|
const boxIndicesTensor = await tf.image.nonMaxSuppressionAsync(boxes, scores, this.maxFaces, this.iouThreshold, this.scoreThreshold);
|
||||||
const boxIndices = await boxIndicesTensor.array();
|
const boxIndices = await boxIndicesTensor.array();
|
||||||
boxIndicesTensor.dispose();
|
boxIndicesTensor.dispose();
|
||||||
let boundingBoxes = boxIndices.map((boxIndex) => tf.slice(boxes, [boxIndex, 0], [1, -1]));
|
const boundingBoxesMap = boxIndices.map((boxIndex) => tf.slice(boxes, [boxIndex, 0], [1, -1]));
|
||||||
boundingBoxes = await Promise.all(boundingBoxes.map(async (boundingBox) => {
|
const boundingBoxes = await Promise.all(boundingBoxesMap.map(async (boundingBox) => {
|
||||||
const vals = await boundingBox.array();
|
const vals = await boundingBox.array();
|
||||||
boundingBox.dispose();
|
boundingBox.dispose();
|
||||||
return vals;
|
return vals;
|
||||||
|
@ -114,19 +114,26 @@ class BlazeFaceModel {
|
||||||
const annotatedBoxes = [];
|
const annotatedBoxes = [];
|
||||||
for (let i = 0; i < boundingBoxes.length; i++) {
|
for (let i = 0; i < boundingBoxes.length; i++) {
|
||||||
const boundingBox = boundingBoxes[i];
|
const boundingBox = boundingBoxes[i];
|
||||||
const annotatedBox = tf.tidy(() => {
|
|
||||||
const box = createBox(boundingBox);
|
const box = createBox(boundingBox);
|
||||||
const boxIndex = boxIndices[i];
|
const boxIndex = boxIndices[i];
|
||||||
const anchor = this.anchorsData[boxIndex];
|
const anchor = this.anchorsData[boxIndex];
|
||||||
|
const sliced = tf.slice(detectedOutputs, [boxIndex, NUM_LANDMARKS - 1], [1, -1]);
|
||||||
|
const squeezed = sliced.squeeze();
|
||||||
|
const landmarks = squeezed.reshape([NUM_LANDMARKS, -1]);
|
||||||
|
/*
|
||||||
const landmarks = tf
|
const landmarks = tf
|
||||||
.slice(detectedOutputs, [boxIndex, NUM_LANDMARKS - 1], [1, -1])
|
.slice(detectedOutputs, [boxIndex, NUM_LANDMARKS - 1], [1, -1])
|
||||||
.squeeze()
|
.squeeze()
|
||||||
.reshape([NUM_LANDMARKS, -1]);
|
.reshape([NUM_LANDMARKS, -1]);
|
||||||
|
*/
|
||||||
const probability = tf.slice(scores, [boxIndex], [1]);
|
const probability = tf.slice(scores, [boxIndex], [1]);
|
||||||
return { box, landmarks, probability, anchor };
|
const annotatedBox = { box, landmarks, probability, anchor };
|
||||||
});
|
|
||||||
annotatedBoxes.push(annotatedBox);
|
annotatedBoxes.push(annotatedBox);
|
||||||
|
sliced.dispose();
|
||||||
|
squeezed.dispose();
|
||||||
|
// landmarks.dispose();
|
||||||
}
|
}
|
||||||
|
detectedOutputs.dispose();
|
||||||
boxes.dispose();
|
boxes.dispose();
|
||||||
scores.dispose();
|
scores.dispose();
|
||||||
detectedOutputs.dispose();
|
detectedOutputs.dispose();
|
||||||
|
@ -137,12 +144,11 @@ class BlazeFaceModel {
|
||||||
}
|
}
|
||||||
|
|
||||||
async estimateFaces(input) {
|
async estimateFaces(input) {
|
||||||
const image = tf.tidy(() => {
|
const imageRaw = !(input instanceof tf.Tensor) ? tf.browser.fromPixels(input) : input;
|
||||||
if (!(input instanceof tf.Tensor)) {
|
const imageCast = imageRaw.toFloat();
|
||||||
input = tf.browser.fromPixels(input);
|
const image = imageCast.expandDims(0);
|
||||||
}
|
imageRaw.dispose();
|
||||||
return input.toFloat().expandDims(0);
|
imageCast.dispose();
|
||||||
});
|
|
||||||
const { boxes, scaleFactor } = await this.getBoundingBoxes(image);
|
const { boxes, scaleFactor } = await this.getBoundingBoxes(image);
|
||||||
image.dispose();
|
image.dispose();
|
||||||
return Promise.all(boxes.map(async (face) => {
|
return Promise.all(boxes.map(async (face) => {
|
||||||
|
|
|
@ -13,10 +13,11 @@ class MediaPipeFaceMesh {
|
||||||
|
|
||||||
async estimateFaces(input, config) {
|
async estimateFaces(input, config) {
|
||||||
if (config) this.config = config;
|
if (config) this.config = config;
|
||||||
const image = tf.tidy(() => {
|
const imageRaw = !(input instanceof tf.Tensor) ? tf.browser.fromPixels(input) : input;
|
||||||
if (!(input instanceof tf.Tensor)) input = tf.browser.fromPixels(input);
|
const imageCast = imageRaw.toFloat();
|
||||||
return input.toFloat().expandDims(0);
|
const image = imageCast.expandDims(0);
|
||||||
});
|
imageRaw.dispose();
|
||||||
|
imageCast.dispose();
|
||||||
const predictions = await this.pipeline.predict(image, config);
|
const predictions = await this.pipeline.predict(image, config);
|
||||||
tf.dispose(image);
|
tf.dispose(image);
|
||||||
const results = [];
|
const results = [];
|
||||||
|
@ -42,8 +43,9 @@ class MediaPipeFaceMesh {
|
||||||
image: prediction.image ? tf.clone(prediction.image) : null,
|
image: prediction.image ? tf.clone(prediction.image) : null,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
prediction.confidence.dispose();
|
if (prediction.confidence) prediction.confidence.dispose();
|
||||||
prediction.image.dispose();
|
if (prediction.coords) prediction.coords.dispose();
|
||||||
|
if (prediction.image) prediction.image.dispose();
|
||||||
}
|
}
|
||||||
return results;
|
return results;
|
||||||
}
|
}
|
||||||
|
|
|
@ -132,21 +132,26 @@ class Pipeline {
|
||||||
this.skipFrames = config.detector.skipFrames;
|
this.skipFrames = config.detector.skipFrames;
|
||||||
this.maxFaces = config.detector.maxFaces;
|
this.maxFaces = config.detector.maxFaces;
|
||||||
if (this.shouldUpdateRegionsOfInterest()) {
|
if (this.shouldUpdateRegionsOfInterest()) {
|
||||||
const { boxes, scaleFactor } = await this.boundingBoxDetector.getBoundingBoxes(input);
|
// const { boxes, scaleFactor } = await this.boundingBoxDetector.getBoundingBoxes(input);
|
||||||
if (boxes.length === 0) {
|
const detector = await this.boundingBoxDetector.getBoundingBoxes(input);
|
||||||
|
if (detector.boxes.length === 0) {
|
||||||
this.regionsOfInterest = [];
|
this.regionsOfInterest = [];
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
const scaledBoxes = boxes.map((prediction) => {
|
const scaledBoxes = detector.boxes.map((prediction) => {
|
||||||
|
const startPoint = prediction.box.startPoint.squeeze();
|
||||||
|
const endPoint = prediction.box.endPoint.squeeze();
|
||||||
const predictionBox = {
|
const predictionBox = {
|
||||||
startPoint: prediction.box.startPoint.squeeze().arraySync(),
|
startPoint: startPoint.arraySync(),
|
||||||
endPoint: prediction.box.endPoint.squeeze().arraySync(),
|
endPoint: endPoint.arraySync(),
|
||||||
};
|
};
|
||||||
prediction.box.startPoint.dispose();
|
startPoint.dispose();
|
||||||
prediction.box.endPoint.dispose();
|
endPoint.dispose();
|
||||||
const scaledBox = bounding.scaleBoxCoordinates(predictionBox, scaleFactor);
|
const scaledBox = bounding.scaleBoxCoordinates(predictionBox, detector.scaleFactor);
|
||||||
const enlargedBox = bounding.enlargeBox(scaledBox);
|
const enlargedBox = bounding.enlargeBox(scaledBox);
|
||||||
const landmarks = prediction.landmarks.arraySync();
|
const landmarks = prediction.landmarks.arraySync();
|
||||||
|
prediction.box.startPoint.dispose();
|
||||||
|
prediction.box.endPoint.dispose();
|
||||||
prediction.landmarks.dispose();
|
prediction.landmarks.dispose();
|
||||||
prediction.probability.dispose();
|
prediction.probability.dispose();
|
||||||
return { ...enlargedBox, landmarks };
|
return { ...enlargedBox, landmarks };
|
||||||
|
@ -206,13 +211,15 @@ class Pipeline {
|
||||||
const transformedCoordsData = this.transformRawCoords(rawCoords, box, angle, rotationMatrix);
|
const transformedCoordsData = this.transformRawCoords(rawCoords, box, angle, rotationMatrix);
|
||||||
tf.dispose(rawCoords);
|
tf.dispose(rawCoords);
|
||||||
const landmarksBox = bounding.enlargeBox(this.calculateLandmarksBoundingBox(transformedCoordsData));
|
const landmarksBox = bounding.enlargeBox(this.calculateLandmarksBoundingBox(transformedCoordsData));
|
||||||
|
const confidence = flag.squeeze();
|
||||||
|
tf.dispose(flag);
|
||||||
if (config.mesh.enabled) {
|
if (config.mesh.enabled) {
|
||||||
const transformedCoords = tf.tensor2d(transformedCoordsData);
|
const transformedCoords = tf.tensor2d(transformedCoordsData);
|
||||||
this.regionsOfInterest[i] = { ...landmarksBox, landmarks: transformedCoords.arraySync() };
|
this.regionsOfInterest[i] = { ...landmarksBox, landmarks: transformedCoords.arraySync() };
|
||||||
const prediction = {
|
const prediction = {
|
||||||
coords: transformedCoords,
|
coords: transformedCoords,
|
||||||
box: landmarksBox,
|
box: landmarksBox,
|
||||||
confidence: flag.squeeze(),
|
confidence,
|
||||||
image: face,
|
image: face,
|
||||||
};
|
};
|
||||||
return prediction;
|
return prediction;
|
||||||
|
@ -220,7 +227,7 @@ class Pipeline {
|
||||||
const prediction = {
|
const prediction = {
|
||||||
coords: null,
|
coords: null,
|
||||||
box: landmarksBox,
|
box: landmarksBox,
|
||||||
confidence: flag.squeeze(),
|
confidence,
|
||||||
image: face,
|
image: face,
|
||||||
};
|
};
|
||||||
return prediction;
|
return prediction;
|
||||||
|
@ -278,7 +285,7 @@ class Pipeline {
|
||||||
const ys = landmarks.map((d) => d[1]);
|
const ys = landmarks.map((d) => d[1]);
|
||||||
const startPoint = [Math.min(...xs), Math.min(...ys)];
|
const startPoint = [Math.min(...xs), Math.min(...ys)];
|
||||||
const endPoint = [Math.max(...xs), Math.max(...ys)];
|
const endPoint = [Math.max(...xs), Math.max(...ys)];
|
||||||
return { startPoint, endPoint };
|
return { startPoint, endPoint, landmarks };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
exports.Pipeline = Pipeline;
|
exports.Pipeline = Pipeline;
|
||||||
|
|
|
@ -42,14 +42,11 @@ class HandDetector {
|
||||||
const boxes = this.normalizeBoxes(rawBoxes);
|
const boxes = this.normalizeBoxes(rawBoxes);
|
||||||
const boxesWithHandsTensor = await tf.image.nonMaxSuppressionAsync(boxes, scores, this.maxHands, this.iouThreshold, this.scoreThreshold);
|
const boxesWithHandsTensor = await tf.image.nonMaxSuppressionAsync(boxes, scores, this.maxHands, this.iouThreshold, this.scoreThreshold);
|
||||||
const boxesWithHands = await boxesWithHandsTensor.array();
|
const boxesWithHands = await boxesWithHandsTensor.array();
|
||||||
const toDispose = [
|
const toDispose = [normalizedInput, batchedPrediction, boxesWithHandsTensor, prediction, boxes, rawBoxes, scores];
|
||||||
normalizedInput, batchedPrediction, boxesWithHandsTensor, prediction,
|
// if (boxesWithHands.length === 0) {
|
||||||
boxes, rawBoxes, scores,
|
// toDispose.forEach((tensor) => tensor.dispose());
|
||||||
];
|
// return null;
|
||||||
if (boxesWithHands.length === 0) {
|
// }
|
||||||
toDispose.forEach((tensor) => tensor.dispose());
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
const detectedHands = tf.tidy(() => {
|
const detectedHands = tf.tidy(() => {
|
||||||
const detectedBoxes = [];
|
const detectedBoxes = [];
|
||||||
for (const i in boxesWithHands) {
|
for (const i in boxesWithHands) {
|
||||||
|
@ -61,6 +58,7 @@ class HandDetector {
|
||||||
}
|
}
|
||||||
return detectedBoxes;
|
return detectedBoxes;
|
||||||
});
|
});
|
||||||
|
toDispose.forEach((tensor) => tensor.dispose());
|
||||||
return detectedHands;
|
return detectedHands;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
43
src/index.js
43
src/index.js
|
@ -20,17 +20,32 @@ const models = {
|
||||||
gender: null,
|
gender: null,
|
||||||
emotion: null,
|
emotion: null,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// helper function: gets elapsed time on both browser and nodejs
|
||||||
const now = () => {
|
const now = () => {
|
||||||
if (typeof performance !== 'undefined') return performance.now();
|
if (typeof performance !== 'undefined') return performance.now();
|
||||||
return parseInt(Number(process.hrtime.bigint()) / 1000 / 1000);
|
return parseInt(Number(process.hrtime.bigint()) / 1000 / 1000);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// helper function: wrapper around console output
|
||||||
const log = (...msg) => {
|
const log = (...msg) => {
|
||||||
// eslint-disable-next-line no-console
|
// eslint-disable-next-line no-console
|
||||||
if (config.console) console.log(...msg);
|
if (msg && config.console) console.log(...msg);
|
||||||
};
|
};
|
||||||
|
|
||||||
// helper function that performs deep merge of multiple objects so it allows full inheriance with overrides
|
// helper function: measure tensor leak
|
||||||
|
let numTensors = 0;
|
||||||
|
const analyzeMemoryLeaks = false;
|
||||||
|
const analyze = (...msg) => {
|
||||||
|
if (!analyzeMemoryLeaks) return;
|
||||||
|
const current = tf.engine().state.numTensors;
|
||||||
|
const previous = numTensors;
|
||||||
|
numTensors = current;
|
||||||
|
const leaked = current - previous;
|
||||||
|
if (leaked !== 0) log(...msg, leaked);
|
||||||
|
};
|
||||||
|
|
||||||
|
// helper function: perform deep merge of multiple objects so it allows full inheriance with overrides
|
||||||
function mergeDeep(...objects) {
|
function mergeDeep(...objects) {
|
||||||
const isObject = (obj) => obj && typeof obj === 'object';
|
const isObject = (obj) => obj && typeof obj === 'object';
|
||||||
return objects.reduce((prev, obj) => {
|
return objects.reduce((prev, obj) => {
|
||||||
|
@ -97,12 +112,6 @@ async function detect(input, userConfig = {}) {
|
||||||
await tf.setBackend(config.backend);
|
await tf.setBackend(config.backend);
|
||||||
await tf.ready();
|
await tf.ready();
|
||||||
}
|
}
|
||||||
// explictly enable depthwiseconv since it's diasabled by default due to issues with large shaders
|
|
||||||
// let savedWebglPackDepthwiseConvFlag;
|
|
||||||
// if (tf.getBackend() === 'webgl') {
|
|
||||||
// savedWebglPackDepthwiseConvFlag = tf.env().get('WEBGL_PACK_DEPTHWISECONV');
|
|
||||||
// tf.env().set('WEBGL_PACK_DEPTHWISECONV', true);
|
|
||||||
// }
|
|
||||||
|
|
||||||
// load models if enabled
|
// load models if enabled
|
||||||
state = 'load';
|
state = 'load';
|
||||||
|
@ -111,18 +120,24 @@ async function detect(input, userConfig = {}) {
|
||||||
const perf = {};
|
const perf = {};
|
||||||
let timeStamp;
|
let timeStamp;
|
||||||
|
|
||||||
tf.engine().startScope();
|
if (config.scoped) tf.engine().startScope();
|
||||||
|
|
||||||
|
analyze('Start Detect:');
|
||||||
|
|
||||||
// run posenet
|
// run posenet
|
||||||
state = 'run:body';
|
state = 'run:body';
|
||||||
timeStamp = now();
|
timeStamp = now();
|
||||||
|
analyze('Start PoseNet');
|
||||||
const poseRes = config.body.enabled ? await models.posenet.estimatePoses(input, config.body) : [];
|
const poseRes = config.body.enabled ? await models.posenet.estimatePoses(input, config.body) : [];
|
||||||
|
analyze('End PoseNet:');
|
||||||
perf.body = Math.trunc(now() - timeStamp);
|
perf.body = Math.trunc(now() - timeStamp);
|
||||||
|
|
||||||
// run handpose
|
// run handpose
|
||||||
state = 'run:hand';
|
state = 'run:hand';
|
||||||
timeStamp = now();
|
timeStamp = now();
|
||||||
|
analyze('Start HandPose:');
|
||||||
const handRes = config.hand.enabled ? await models.handpose.estimateHands(input, config.hand) : [];
|
const handRes = config.hand.enabled ? await models.handpose.estimateHands(input, config.hand) : [];
|
||||||
|
analyze('End HandPose:');
|
||||||
perf.hand = Math.trunc(now() - timeStamp);
|
perf.hand = Math.trunc(now() - timeStamp);
|
||||||
|
|
||||||
// run facemesh, includes blazeface and iris
|
// run facemesh, includes blazeface and iris
|
||||||
|
@ -130,6 +145,7 @@ async function detect(input, userConfig = {}) {
|
||||||
if (config.face.enabled) {
|
if (config.face.enabled) {
|
||||||
state = 'run:face';
|
state = 'run:face';
|
||||||
timeStamp = now();
|
timeStamp = now();
|
||||||
|
analyze('Start FaceMesh:');
|
||||||
const faces = await models.facemesh.estimateFaces(input, config.face);
|
const faces = await models.facemesh.estimateFaces(input, config.face);
|
||||||
perf.face = Math.trunc(now() - timeStamp);
|
perf.face = Math.trunc(now() - timeStamp);
|
||||||
for (const face of faces) {
|
for (const face of faces) {
|
||||||
|
@ -149,6 +165,7 @@ async function detect(input, userConfig = {}) {
|
||||||
const emotionData = config.face.emotion.enabled ? await emotion.predict(face.image, config) : {};
|
const emotionData = config.face.emotion.enabled ? await emotion.predict(face.image, config) : {};
|
||||||
perf.emotion = Math.trunc(now() - timeStamp);
|
perf.emotion = Math.trunc(now() - timeStamp);
|
||||||
face.image.dispose();
|
face.image.dispose();
|
||||||
|
delete face.image;
|
||||||
// calculate iris distance
|
// calculate iris distance
|
||||||
// iris: array[ bottom, left, top, right, center ]
|
// iris: array[ bottom, left, top, right, center ]
|
||||||
const iris = (face.annotations.leftEyeIris && face.annotations.rightEyeIris)
|
const iris = (face.annotations.leftEyeIris && face.annotations.rightEyeIris)
|
||||||
|
@ -166,13 +183,13 @@ async function detect(input, userConfig = {}) {
|
||||||
iris: (iris !== 0) ? Math.trunc(100 * 11.7 /* human iris size in mm */ / iris) / 100 : 0,
|
iris: (iris !== 0) ? Math.trunc(100 * 11.7 /* human iris size in mm */ / iris) / 100 : 0,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
state = 'idle';
|
analyze('End FaceMesh:');
|
||||||
}
|
}
|
||||||
|
|
||||||
// set depthwiseconv to original value
|
state = 'idle';
|
||||||
// tf.env().set('WEBGL_PACK_DEPTHWISECONV', savedWebglPackDepthwiseConvFlag);
|
|
||||||
|
|
||||||
tf.engine().endScope();
|
if (config.scoped) tf.engine().endScope();
|
||||||
|
analyze('End Scope:');
|
||||||
|
|
||||||
// combine and return results
|
// combine and return results
|
||||||
perf.total = Object.values(perf).reduce((a, b) => a + b);
|
perf.total = Object.values(perf).reduce((a, b) => a + b);
|
||||||
|
|
Loading…
Reference in New Issue