mirror of https://github.com/vladmandic/human
reduced web worker latency
parent
27c0566a48
commit
6001145a53
|
@ -8,10 +8,13 @@ const log = (...msg) => {
|
|||
};
|
||||
|
||||
onmessage = async (msg) => {
|
||||
// worker.postMessage({ image: image.data.buffer, width: canvas.width, height: canvas.height, config }, [image.data.buffer]);
|
||||
const image = new ImageData(new Uint8ClampedArray(msg.data.image), msg.data.width, msg.data.height);
|
||||
config = msg.data.config;
|
||||
let result = {};
|
||||
try {
|
||||
result = await human.detect(msg.data.image, config);
|
||||
// result = await human.detect(image, config);
|
||||
result = {};
|
||||
} catch (err) {
|
||||
result.error = err.message;
|
||||
log('Worker thread error:', err.message);
|
||||
|
|
|
@ -17,16 +17,17 @@ const config = {
|
|||
detector: { maxFaces: 10, skipFrames: 10, minConfidence: 0.5, iouThreshold: 0.3, scoreThreshold: 0.7 },
|
||||
mesh: { enabled: true },
|
||||
iris: { enabled: true },
|
||||
age: { enabled: true, skipFrames: 10 },
|
||||
gender: { enabled: true },
|
||||
emotion: { enabled: true, minConfidence: 0.5, useGrayscale: true },
|
||||
age: { enabled: false, skipFrames: 10 },
|
||||
gender: { enabled: false },
|
||||
emotion: { enabled: false, minConfidence: 0.5, useGrayscale: true },
|
||||
},
|
||||
body: { enabled: true, maxDetections: 10, scoreThreshold: 0.7, nmsRadius: 20 },
|
||||
hand: { enabled: true, skipFrames: 10, minConfidence: 0.5, iouThreshold: 0.3, scoreThreshold: 0.7 },
|
||||
body: { enabled: false, maxDetections: 10, scoreThreshold: 0.7, nmsRadius: 20 },
|
||||
hand: { enabled: false, skipFrames: 10, minConfidence: 0.5, iouThreshold: 0.3, scoreThreshold: 0.7 },
|
||||
};
|
||||
let settings;
|
||||
let worker;
|
||||
let timeStamp;
|
||||
const fps = [];
|
||||
|
||||
function str(...msg) {
|
||||
if (!Array.isArray(msg)) return msg;
|
||||
|
@ -44,6 +45,7 @@ const log = (...msg) => {
|
|||
};
|
||||
|
||||
async function drawFace(result, canvas) {
|
||||
if (!result) return;
|
||||
const ctx = canvas.getContext('2d');
|
||||
ctx.strokeStyle = ui.baseColor;
|
||||
ctx.font = ui.baseFont;
|
||||
|
@ -96,6 +98,7 @@ async function drawFace(result, canvas) {
|
|||
}
|
||||
|
||||
async function drawBody(result, canvas) {
|
||||
if (!result) return;
|
||||
const ctx = canvas.getContext('2d');
|
||||
ctx.fillStyle = ui.baseColor;
|
||||
ctx.strokeStyle = ui.baseColor;
|
||||
|
@ -157,6 +160,7 @@ async function drawBody(result, canvas) {
|
|||
}
|
||||
|
||||
async function drawHand(result, canvas) {
|
||||
if (!result) return;
|
||||
const ctx = canvas.getContext('2d');
|
||||
ctx.font = ui.baseFont;
|
||||
ctx.lineWidth = ui.baseLineWidth;
|
||||
|
@ -203,6 +207,13 @@ async function drawHand(result, canvas) {
|
|||
async function drawResults(input, result, canvas) {
|
||||
// update fps
|
||||
settings.setValue('FPS', Math.round(1000 / (performance.now() - timeStamp)));
|
||||
fps.push(1000 / (performance.now() - timeStamp));
|
||||
if (fps.length > 20) fps.shift();
|
||||
settings.setValue('FPS', Math.round(10 * fps.reduce((a, b) => a + b) / fps.length) / 10);
|
||||
|
||||
// eslint-disable-next-line no-use-before-define
|
||||
requestAnimationFrame(() => runHumanDetect(input, canvas)); // immediate loop
|
||||
|
||||
// draw image from video
|
||||
const ctx = canvas.getContext('2d');
|
||||
ctx.drawImage(input, 0, 0, input.width, input.height, 0, 0, canvas.width, canvas.height);
|
||||
|
@ -213,27 +224,24 @@ async function drawResults(input, result, canvas) {
|
|||
// update log
|
||||
const engine = await human.tf.engine();
|
||||
const memory = `${engine.state.numBytes.toLocaleString()} bytes ${engine.state.numDataBuffers.toLocaleString()} buffers ${engine.state.numTensors.toLocaleString()} tensors`;
|
||||
const gpu = engine.backendInstance.numBytesInGPU ? `GPU: ${engine.backendInstance.numBytesInGPU.toLocaleString()} bytes` : '';
|
||||
const gpu = engine.backendInstance ? `GPU: ${engine.backendInstance.numBytesInGPU.toLocaleString()} bytes` : '';
|
||||
document.getElementById('log').innerText = `
|
||||
TFJS Version: ${human.tf.version_core} | Backend: ${human.tf.getBackend()} | Memory: ${memory} ${gpu}
|
||||
Performance: ${str(result.performance)} | Object size: ${(str(result)).length.toLocaleString()} bytes
|
||||
`;
|
||||
}
|
||||
|
||||
async function webWorker(input, image, canvas) {
|
||||
// simple wrapper for worker.postmessage that creates worker if one does not exist
|
||||
function webWorker(input, image, canvas) {
|
||||
if (!worker) {
|
||||
// create new webworker and add event handler only once
|
||||
log('Creating worker thread');
|
||||
// create new webworker
|
||||
worker = new Worker('demo-esm-webworker.js', { type: 'module' });
|
||||
// after receiving message from webworker, parse&draw results and send new frame for processing
|
||||
worker.addEventListener('message', async (msg) => {
|
||||
await drawResults(input, msg.data, canvas);
|
||||
// eslint-disable-next-line no-use-before-define
|
||||
requestAnimationFrame(() => runHumanDetect(input, canvas)); // immediate loop
|
||||
});
|
||||
worker.addEventListener('message', async (msg) => drawResults(input, msg.data, canvas));
|
||||
}
|
||||
// const offscreen = image.transferControlToOffscreen();
|
||||
worker.postMessage({ image, config });
|
||||
// pass image data as arraybuffer to worker by reference to avoid copy
|
||||
worker.postMessage({ image: image.data.buffer, width: canvas.width, height: canvas.height, config }, [image.data.buffer]);
|
||||
}
|
||||
|
||||
async function runHumanDetect(input, canvas) {
|
||||
|
@ -247,17 +255,17 @@ async function runHumanDetect(input, canvas) {
|
|||
const ctx = offscreen.getContext('2d');
|
||||
ctx.drawImage(input, 0, 0, input.width, input.height, 0, 0, canvas.width, canvas.height);
|
||||
const data = ctx.getImageData(0, 0, canvas.width, canvas.height);
|
||||
// perform detection
|
||||
await webWorker(input, data, canvas);
|
||||
// perform detection in worker
|
||||
webWorker(input, data, canvas);
|
||||
} else {
|
||||
let result = {};
|
||||
try {
|
||||
// perform detection
|
||||
result = await human.detect(input, config);
|
||||
} catch (err) {
|
||||
log('Error during execution:', err.message);
|
||||
}
|
||||
await drawResults(input, result, canvas);
|
||||
if (input.readyState) requestAnimationFrame(() => runHumanDetect(input, canvas)); // immediate loop
|
||||
drawResults(input, result, canvas);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -89,6 +89,7 @@ var require_blazeface = __commonJS((exports2) => {
|
|||
this.inputSizeData = [config2.detector.inputSize, config2.detector.inputSize];
|
||||
this.inputSize = tf2.tensor1d([config2.detector.inputSize, config2.detector.inputSize]);
|
||||
this.iouThreshold = config2.detector.iouThreshold;
|
||||
this.scaleFaces = 0.8;
|
||||
this.scoreThreshold = config2.detector.scoreThreshold;
|
||||
}
|
||||
async getBoundingBoxes(inputImage) {
|
||||
|
@ -132,7 +133,7 @@ var require_blazeface = __commonJS((exports2) => {
|
|||
scaleFactor: [inputImage.shape[2] / this.inputSizeData[0], inputImage.shape[1] / this.inputSizeData[1]]
|
||||
};
|
||||
}
|
||||
async estimateFaces(input, returnTensors = false, annotateBoxes = true) {
|
||||
async estimateFaces(input) {
|
||||
const image = tf2.tidy(() => {
|
||||
if (!(input instanceof tf2.Tensor)) {
|
||||
input = tf2.browser.fromPixels(input);
|
||||
|
@ -141,32 +142,8 @@ var require_blazeface = __commonJS((exports2) => {
|
|||
});
|
||||
const {boxes, scaleFactor} = await this.getBoundingBoxes(image);
|
||||
image.dispose();
|
||||
if (returnTensors) {
|
||||
return boxes.map((face) => {
|
||||
const scaledBox = scaleBoxFromPrediction(face, scaleFactor);
|
||||
const normalizedFace = {
|
||||
topLeft: scaledBox.slice([0], [2]),
|
||||
bottomRight: scaledBox.slice([2], [2])
|
||||
};
|
||||
if (annotateBoxes) {
|
||||
const {landmarks, probability, anchor} = face;
|
||||
const normalizedLandmarks = landmarks.add(anchor).mul(scaleFactor);
|
||||
normalizedFace.landmarks = normalizedLandmarks;
|
||||
normalizedFace.probability = probability;
|
||||
}
|
||||
return normalizedFace;
|
||||
});
|
||||
}
|
||||
return Promise.all(boxes.map(async (face) => {
|
||||
const scaledBox = scaleBoxFromPrediction(face, scaleFactor);
|
||||
let normalizedFace;
|
||||
if (!annotateBoxes) {
|
||||
const boxData = await scaledBox.array();
|
||||
normalizedFace = {
|
||||
topLeft: boxData.slice(0, 2),
|
||||
bottomRight: boxData.slice(2)
|
||||
};
|
||||
} else {
|
||||
const [landmarkData, boxData, probabilityData] = await Promise.all([face.landmarks, scaledBox, face.probability].map(async (d) => d.array()));
|
||||
const anchor = face.anchor;
|
||||
const [scaleFactorX, scaleFactorY] = scaleFactor;
|
||||
|
@ -174,7 +151,7 @@ var require_blazeface = __commonJS((exports2) => {
|
|||
(landmark[0] + anchor[0]) * scaleFactorX,
|
||||
(landmark[1] + anchor[1]) * scaleFactorY
|
||||
]);
|
||||
normalizedFace = {
|
||||
const normalizedFace = {
|
||||
topLeft: boxData.slice(0, 2),
|
||||
bottomRight: boxData.slice(2),
|
||||
landmarks: scaledLandmarks,
|
||||
|
@ -183,7 +160,6 @@ var require_blazeface = __commonJS((exports2) => {
|
|||
disposeBox(face.box);
|
||||
face.landmarks.dispose();
|
||||
face.probability.dispose();
|
||||
}
|
||||
scaledBox.dispose();
|
||||
return normalizedFace;
|
||||
}));
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
File diff suppressed because one or more lines are too long
|
@ -78,6 +78,7 @@ class BlazeFaceModel {
|
|||
this.inputSizeData = [config.detector.inputSize, config.detector.inputSize];
|
||||
this.inputSize = tf.tensor1d([config.detector.inputSize, config.detector.inputSize]);
|
||||
this.iouThreshold = config.detector.iouThreshold;
|
||||
this.scaleFaces = 0.8;
|
||||
this.scoreThreshold = config.detector.scoreThreshold;
|
||||
}
|
||||
|
||||
|
@ -86,6 +87,7 @@ class BlazeFaceModel {
|
|||
const resizedImage = inputImage.resizeBilinear([this.width, this.height]);
|
||||
const normalizedImage = tf.mul(tf.sub(resizedImage.div(255), 0.5), 2);
|
||||
const batchedPrediction = this.blazeFaceModel.predict(normalizedImage);
|
||||
// todo: add handler for blazeface-front and blazeface-back
|
||||
const prediction = batchedPrediction.squeeze();
|
||||
const decodedBounds = decodeBounds(prediction, this.anchors, this.inputSize);
|
||||
const logits = tf.slice(prediction, [0, 0], [-1, 1]);
|
||||
|
@ -109,7 +111,8 @@ class BlazeFaceModel {
|
|||
const box = createBox(boundingBox);
|
||||
const boxIndex = boxIndices[i];
|
||||
const anchor = this.anchorsData[boxIndex];
|
||||
const landmarks = tf.slice(detectedOutputs, [boxIndex, NUM_LANDMARKS - 1], [1, -1])
|
||||
const landmarks = tf
|
||||
.slice(detectedOutputs, [boxIndex, NUM_LANDMARKS - 1], [1, -1])
|
||||
.squeeze()
|
||||
.reshape([NUM_LANDMARKS, -1]);
|
||||
const probability = tf.slice(scores, [boxIndex], [1]);
|
||||
|
@ -126,7 +129,7 @@ class BlazeFaceModel {
|
|||
};
|
||||
}
|
||||
|
||||
async estimateFaces(input, returnTensors = false, annotateBoxes = true) {
|
||||
async estimateFaces(input) {
|
||||
const image = tf.tidy(() => {
|
||||
if (!(input instanceof tf.Tensor)) {
|
||||
input = tf.browser.fromPixels(input);
|
||||
|
@ -135,32 +138,8 @@ class BlazeFaceModel {
|
|||
});
|
||||
const { boxes, scaleFactor } = await this.getBoundingBoxes(image);
|
||||
image.dispose();
|
||||
if (returnTensors) {
|
||||
return boxes.map((face) => {
|
||||
const scaledBox = scaleBoxFromPrediction(face, scaleFactor);
|
||||
const normalizedFace = {
|
||||
topLeft: scaledBox.slice([0], [2]),
|
||||
bottomRight: scaledBox.slice([2], [2]),
|
||||
};
|
||||
if (annotateBoxes) {
|
||||
const { landmarks, probability, anchor } = face;
|
||||
const normalizedLandmarks = landmarks.add(anchor).mul(scaleFactor);
|
||||
normalizedFace.landmarks = normalizedLandmarks;
|
||||
normalizedFace.probability = probability;
|
||||
}
|
||||
return normalizedFace;
|
||||
});
|
||||
}
|
||||
return Promise.all(boxes.map(async (face) => {
|
||||
const scaledBox = scaleBoxFromPrediction(face, scaleFactor);
|
||||
let normalizedFace;
|
||||
if (!annotateBoxes) {
|
||||
const boxData = await scaledBox.array();
|
||||
normalizedFace = {
|
||||
topLeft: boxData.slice(0, 2),
|
||||
bottomRight: boxData.slice(2),
|
||||
};
|
||||
} else {
|
||||
const [landmarkData, boxData, probabilityData] = await Promise.all([face.landmarks, scaledBox, face.probability].map(async (d) => d.array()));
|
||||
const anchor = face.anchor;
|
||||
const [scaleFactorX, scaleFactorY] = scaleFactor;
|
||||
|
@ -169,7 +148,7 @@ class BlazeFaceModel {
|
|||
(landmark[0] + anchor[0]) * scaleFactorX,
|
||||
(landmark[1] + anchor[1]) * scaleFactorY,
|
||||
]));
|
||||
normalizedFace = {
|
||||
const normalizedFace = {
|
||||
topLeft: boxData.slice(0, 2),
|
||||
bottomRight: boxData.slice(2),
|
||||
landmarks: scaledLandmarks,
|
||||
|
@ -178,7 +157,6 @@ class BlazeFaceModel {
|
|||
disposeBox(face.box);
|
||||
face.landmarks.dispose();
|
||||
face.probability.dispose();
|
||||
}
|
||||
scaledBox.dispose();
|
||||
return normalizedFace;
|
||||
}));
|
||||
|
|
Loading…
Reference in New Issue