diff --git a/demo/browser.js b/demo/browser.js
index 1228728f..051dede5 100644
--- a/demo/browser.js
+++ b/demo/browser.js
@@ -134,11 +134,11 @@ async function drawResults(input) {
const avgDetect = Math.trunc(10 * ui.detectFPS.reduce((a, b) => a + b, 0) / ui.detectFPS.length) / 10;
const avgDraw = Math.trunc(10 * ui.drawFPS.reduce((a, b) => a + b, 0) / ui.drawFPS.length) / 10;
const warning = (ui.detectFPS.length > 5) && (avgDetect < 5) ? 'warning: your performance is low: try switching to higher performance backend, lowering resolution or disabling some models' : '';
- document.getElementById('log').innerText = `
- video: ${ui.camera.name} | facing: ${ui.camera.facing} | screen: ${window.innerWidth} x ${window.innerHeight} camera: ${ui.camera.width} x ${ui.camera.height} ${processing}
- backend: ${human.tf.getBackend()} | ${memory}
- performance: ${str(result.performance)}ms FPS process:${avgDetect} refresh:${avgDraw}
- ${warning}
+ document.getElementById('log').innerHTML = `
+ video: ${ui.camera.name} | facing: ${ui.camera.facing} | screen: ${window.innerWidth} x ${window.innerHeight} camera: ${ui.camera.width} x ${ui.camera.height} ${processing}
+ backend: ${human.tf.getBackend()} | ${memory}
+ performance: ${str(result.performance)}ms FPS process:${avgDetect} refresh:${avgDraw}
+ ${warning}
`;
ui.framesDraw++;
diff --git a/demo/menu.js b/demo/menu.js
index be9336b3..3eeac1f6 100644
--- a/demo/menu.js
+++ b/demo/menu.js
@@ -300,7 +300,7 @@ class Menu {
const width = canvas.width / values.length;
const max = 1 + Math.max(...values);
const height = canvas.height / max;
- for (const i in values) {
+ for (let i = 0; i < values.length; i++) {
const gradient = ctx.createLinearGradient(0, (max - values[i]) * height, 0, 0);
gradient.addColorStop(0.1, theme.chartColor);
gradient.addColorStop(0.4, theme.background);
diff --git a/src/gesture/gesture.js b/src/gesture/gesture.js
index e200ff64..f2308878 100644
--- a/src/gesture/gesture.js
+++ b/src/gesture/gesture.js
@@ -1,7 +1,7 @@
exports.body = (res) => {
if (!res) return [];
const gestures = [];
- for (const i in res) {
+ for (let i = 0; i < res.length; i++) {
// raising hands
const leftWrist = res[i].keypoints.find((a) => (a.part === 'leftWrist'));
const rightWrist = res[i].keypoints.find((a) => (a.part === 'rightWrist'));
@@ -21,7 +21,7 @@ exports.body = (res) => {
exports.face = (res) => {
if (!res) return [];
const gestures = [];
- for (const i in res) {
+ for (let i = 0; i < res.length; i++) {
if (res[i].mesh && res[i].mesh.length > 0) {
const eyeFacing = res[i].mesh[35][2] - res[i].mesh[263][2];
if (Math.abs(eyeFacing) < 10) gestures.push({ face: i, gesture: 'facing camera' });
@@ -42,7 +42,7 @@ exports.face = (res) => {
exports.hand = (res) => {
if (!res) return [];
const gestures = [];
- for (const i in res) {
+ for (let i = 0; i < res.length; i++) {
const fingers = [];
for (const [finger, pos] of Object.entries(res[i]['annotations'])) {
if (finger !== 'palmBase') fingers.push({ name: finger.toLowerCase(), position: pos[0] }); // get tip of each finger
diff --git a/src/hand/handdetector.js b/src/hand/handdetector.js
index 3edc584b..8446cdbb 100644
--- a/src/hand/handdetector.js
+++ b/src/hand/handdetector.js
@@ -50,24 +50,24 @@ class HandDetector {
const batched = this.model.predict(input);
const predictions = batched.squeeze();
batched.dispose();
- const scores = tf.tidy(() => tf.sigmoid(tf.slice(predictions, [0, 0], [-1, 1])).squeeze());
- const scoresVal = scores.dataSync();
+ const scoresT = tf.tidy(() => tf.sigmoid(tf.slice(predictions, [0, 0], [-1, 1])).squeeze());
+ const scores = scoresT.dataSync();
const rawBoxes = tf.slice(predictions, [0, 1], [-1, 4]);
const boxes = this.normalizeBoxes(rawBoxes);
rawBoxes.dispose();
const filteredT = await tf.image.nonMaxSuppressionAsync(boxes, scores, config.hand.maxHands, config.hand.iouThreshold, config.hand.scoreThreshold);
const filtered = filteredT.arraySync();
- scores.dispose();
+ scoresT.dispose();
filteredT.dispose();
const hands = [];
- for (const boxIndex of filtered) {
- if (scoresVal[boxIndex] >= config.hand.minConfidence) {
- const matchingBox = tf.slice(boxes, [boxIndex, 0], [1, -1]);
- const rawPalmLandmarks = tf.slice(predictions, [boxIndex, 5], [1, 14]);
- const palmLandmarks = tf.tidy(() => this.normalizeLandmarks(rawPalmLandmarks, boxIndex).reshape([-1, 2]));
+ for (const index of filtered) {
+ if (scores[index] >= config.hand.minConfidence) {
+ const matchingBox = tf.slice(boxes, [index, 0], [1, -1]);
+ const rawPalmLandmarks = tf.slice(predictions, [index, 5], [1, 14]);
+ const palmLandmarks = tf.tidy(() => this.normalizeLandmarks(rawPalmLandmarks, index).reshape([-1, 2]));
rawPalmLandmarks.dispose();
- hands.push({ box: matchingBox, palmLandmarks, confidence: scoresVal[boxIndex] });
+ hands.push({ box: matchingBox, palmLandmarks, confidence: scores[index] });
}
}
predictions.dispose();
diff --git a/src/hand/handpipeline.js b/src/hand/handpipeline.js
index ff7537ea..bd339bbc 100644
--- a/src/hand/handpipeline.js
+++ b/src/hand/handpipeline.js
@@ -28,9 +28,9 @@ const PALM_LANDMARKS_INDEX_OF_PALM_BASE = 0;
const PALM_LANDMARKS_INDEX_OF_MIDDLE_FINGER_BASE = 2;
class HandPipeline {
- constructor(boundingBoxDetector, meshDetector, inputSize) {
- this.boxDetector = boundingBoxDetector;
- this.meshDetector = meshDetector;
+ constructor(handDetector, landmarkDetector, inputSize) {
+ this.handDetector = handDetector;
+ this.landmarkDetector = landmarkDetector;
this.inputSize = inputSize;
this.storedBoxes = [];
this.skipped = 1000;
@@ -90,23 +90,23 @@ class HandPipeline {
// run new detector every skipFrames unless we only want box to start with
let boxes;
if ((this.skipped > config.hand.skipFrames) || !config.hand.landmarks || !config.videoOptimized) {
- boxes = await this.boxDetector.estimateHandBounds(image, config);
+ boxes = await this.handDetector.estimateHandBounds(image, config);
// don't reset on test image
if ((image.shape[1] !== 255) && (image.shape[2] !== 255)) this.skipped = 0;
}
// if detector result count doesn't match current working set, use it to reset current working set
if (boxes && (boxes.length > 0) && ((boxes.length !== this.detectedHands) && (this.detectedHands !== config.hand.maxHands) || !config.hand.landmarks)) {
- this.storedBoxes = [];
this.detectedHands = 0;
- for (const possible of boxes) this.storedBoxes.push(possible);
+ this.storedBoxes = [...boxes];
+ // for (const possible of boxes) this.storedBoxes.push(possible);
if (this.storedBoxes.length > 0) useFreshBox = true;
}
const hands = [];
// console.log(`skipped: ${this.skipped} max: ${config.hand.maxHands} detected: ${this.detectedHands} stored: ${this.storedBoxes.length} new: ${boxes?.length}`);
// go through working set of boxes
- for (const i in this.storedBoxes) {
+ for (let i = 0; i < this.storedBoxes.length; i++) {
const currentBox = this.storedBoxes[i];
if (!currentBox) continue;
if (config.hand.landmarks) {
@@ -120,11 +120,11 @@ class HandPipeline {
const handImage = croppedInput.div(255);
croppedInput.dispose();
rotatedImage.dispose();
- const [confidence, keypoints] = await this.meshDetector.predict(handImage);
+ const [confidenceT, keypoints] = await this.landmarkDetector.predict(handImage);
handImage.dispose();
- const confidenceValue = confidence.dataSync()[0];
- confidence.dispose();
- if (confidenceValue >= config.hand.minConfidence) {
+ const confidence = confidenceT.dataSync()[0];
+ confidenceT.dispose();
+ if (confidence >= config.hand.minConfidence) {
const keypointsReshaped = tf.reshape(keypoints, [-1, 3]);
const rawCoords = keypointsReshaped.arraySync();
keypoints.dispose();
@@ -134,7 +134,7 @@ class HandPipeline {
this.storedBoxes[i] = nextBoundingBox;
const result = {
landmarks: coords,
- confidence: confidenceValue,
+ confidence,
box: {
topLeft: nextBoundingBox.startPoint,
bottomRight: nextBoundingBox.endPoint,
diff --git a/wiki b/wiki
index bcac4981..9595a995 160000
--- a/wiki
+++ b/wiki
@@ -1 +1 @@
-Subproject commit bcac4981f7df29e367259caf6b3b73e5ecde6519
+Subproject commit 9595a995f7bcf2c6b0d70fed98260c8ab4a6f0d7