mirror of https://github.com/vladmandic/human
update hand detector processing algorithm
parent
ff6cadead0
commit
b0bd103db2
|
@ -11,9 +11,8 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
|
|||
|
||||
### **HEAD -> main** 2021/08/31 mandic00@live.com
|
||||
|
||||
|
||||
### **origin/main** 2021/08/31 mandic00@live.com
|
||||
|
||||
- simplify canvas handling in nodejs
|
||||
- full rebuild
|
||||
|
||||
### **2.1.5** 2021/08/31 mandic00@live.com
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ let userConfig = {
|
|||
/*
|
||||
wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.9.0/dist/',
|
||||
async: false,
|
||||
cacheSensitivity: 0,
|
||||
cacheSensitivity: 0.75,
|
||||
filter: {
|
||||
enabled: false,
|
||||
flip: false,
|
||||
|
@ -49,11 +49,12 @@ let userConfig = {
|
|||
},
|
||||
object: { enabled: false },
|
||||
gesture: { enabled: true },
|
||||
hand: { enabled: false },
|
||||
hand: { enabled: true },
|
||||
body: { enabled: false },
|
||||
// body: { enabled: true, modelPath: 'movenet-multipose.json' },
|
||||
// body: { enabled: true, modelPath: 'posenet.json' },
|
||||
segmentation: { enabled: false },
|
||||
/*
|
||||
*/
|
||||
};
|
||||
|
||||
|
@ -82,6 +83,7 @@ const ui = {
|
|||
buffered: true, // should output be buffered between frames
|
||||
interpolated: true, // should output be interpolated for smoothness between frames
|
||||
iconSize: '48px', // ui icon sizes
|
||||
autoPlay: false, // start webcam & detection on load
|
||||
|
||||
// internal variables
|
||||
busy: false, // internal camera busy flag
|
||||
|
@ -375,9 +377,9 @@ async function setupCamera() {
|
|||
canvas.height = video.videoHeight;
|
||||
ui.menuWidth.input.setAttribute('value', video.videoWidth);
|
||||
ui.menuHeight.input.setAttribute('value', video.videoHeight);
|
||||
if (live) video.play();
|
||||
if (live || ui.autoPlay) video.play();
|
||||
// eslint-disable-next-line no-use-before-define
|
||||
if (live && !ui.detectThread) runHumanDetect(video, canvas);
|
||||
if ((live || ui.autoPlay) && !ui.detectThread) runHumanDetect(video, canvas);
|
||||
ui.busy = false;
|
||||
resolve();
|
||||
};
|
||||
|
@ -936,6 +938,10 @@ async function main() {
|
|||
ui.bench = JSON.parse(params.get('bench'));
|
||||
log('overriding bench:', ui.bench);
|
||||
}
|
||||
if (params.has('play')) {
|
||||
ui.autoPlay = true;
|
||||
log('overriding autoplay:', true);
|
||||
}
|
||||
if (params.has('draw')) {
|
||||
ui.drawWarmup = JSON.parse(params.get('draw'));
|
||||
log('overriding drawWarmup:', ui.drawWarmup);
|
||||
|
|
|
@ -66,7 +66,7 @@
|
|||
"@tensorflow/tfjs-layers": "^3.9.0",
|
||||
"@tensorflow/tfjs-node": "^3.9.0",
|
||||
"@tensorflow/tfjs-node-gpu": "^3.9.0",
|
||||
"@types/node": "^16.7.8",
|
||||
"@types/node": "^16.7.10",
|
||||
"@typescript-eslint/eslint-plugin": "^4.30.0",
|
||||
"@typescript-eslint/parser": "^4.30.0",
|
||||
"@vladmandic/pilogger": "^0.2.18",
|
||||
|
|
|
@ -331,9 +331,9 @@ const config: Config = {
|
|||
// e.g., if model is running st 25 FPS, we can re-use existing bounding
|
||||
// box for updated hand skeleton analysis as the hand probably
|
||||
// hasn't moved much in short time (10 * 1/25 = 0.25 sec)
|
||||
minConfidence: 0.1, // threshold for discarding a prediction
|
||||
iouThreshold: 0.1, // ammount of overlap between two detected objects before one object is removed
|
||||
maxDetected: 2, // maximum number of hands detected in the input
|
||||
minConfidence: 0.8, // threshold for discarding a prediction
|
||||
iouThreshold: 0.2, // ammount of overlap between two detected objects before one object is removed
|
||||
maxDetected: 1, // maximum number of hands detected in the input
|
||||
// should be set to the minimum number for performance
|
||||
landmarks: true, // detect hand landmarks or just hand boundary box
|
||||
detector: {
|
||||
|
|
|
@ -407,6 +407,7 @@ export async function hand(inCanvas: HTMLCanvasElement, result: Array<Hand>, dra
|
|||
}
|
||||
if (localOptions.drawLabels) {
|
||||
const addHandLabel = (part, title) => {
|
||||
if (!part) return;
|
||||
ctx.fillStyle = localOptions.useDepth ? `rgba(${127.5 + (2 * part[part.length - 1][2])}, ${127.5 - (2 * part[part.length - 1][2])}, 255, 0.5)` : localOptions.color;
|
||||
ctx.fillText(title, part[part.length - 1][0] + 4, part[part.length - 1][1] + 4);
|
||||
};
|
||||
|
|
|
@ -167,6 +167,11 @@ export function estimate(landmarks) {
|
|||
// step 1: calculate slopes
|
||||
const slopesXY: Array<number[]> = [];
|
||||
const slopesYZ: Array<number[]> = [];
|
||||
const fingerCurls: Array<number> = [];
|
||||
const fingerDirections: Array<number> = [];
|
||||
if (!landmarks) return { curls: fingerCurls, directions: fingerDirections };
|
||||
|
||||
// step 1: calculate slopes
|
||||
for (const finger of Finger.all) {
|
||||
const points = Finger.getPoints(finger);
|
||||
const slopeAtXY: Array<number> = [];
|
||||
|
@ -186,8 +191,6 @@ export function estimate(landmarks) {
|
|||
}
|
||||
|
||||
// step 2: calculate orientations
|
||||
const fingerCurls: Array<number> = [];
|
||||
const fingerDirections: Array<number> = [];
|
||||
for (const finger of Finger.all) {
|
||||
// start finger predictions from palm - except for thumb
|
||||
const pointIndexAt = (finger === Finger.thumb) ? 1 : 0;
|
||||
|
|
|
@ -40,31 +40,23 @@ export class HandDetector {
|
|||
}
|
||||
|
||||
async getBoxes(input, config) {
|
||||
const batched = this.model.predict(input) as Tensor;
|
||||
const predictions = tf.squeeze(batched);
|
||||
tf.dispose(batched);
|
||||
const scoresT = tf.tidy(() => tf.squeeze(tf.sigmoid(tf.slice(predictions, [0, 0], [-1, 1]))));
|
||||
const scores = await scoresT.data();
|
||||
const rawBoxes = tf.slice(predictions, [0, 1], [-1, 4]);
|
||||
const boxes = this.normalizeBoxes(rawBoxes);
|
||||
tf.dispose(rawBoxes);
|
||||
const filteredT = await tf.image.nonMaxSuppressionAsync(boxes, scores, config.hand.maxDetected, config.hand.iouThreshold, config.hand.minConfidence);
|
||||
const filtered = await filteredT.array();
|
||||
|
||||
tf.dispose(scoresT);
|
||||
tf.dispose(filteredT);
|
||||
const t: Record<string, Tensor> = {};
|
||||
t.batched = this.model.predict(input) as Tensor;
|
||||
t.predictions = tf.squeeze(t.batched);
|
||||
t.scores = tf.tidy(() => tf.squeeze(tf.sigmoid(tf.slice(t.predictions, [0, 0], [-1, 1]))));
|
||||
const scores = await t.scores.data();
|
||||
t.boxes = tf.slice(t.predictions, [0, 1], [-1, 4]);
|
||||
t.norm = this.normalizeBoxes(t.boxes);
|
||||
t.nms = await tf.image.nonMaxSuppressionAsync(t.norm, t.scores, 10 * config.hand.maxDetected, config.hand.iouThreshold, config.hand.minConfidence);
|
||||
const nms = await t.nms.array() as Array<number>;
|
||||
const hands: Array<{ box: Tensor, palmLandmarks: Tensor, confidence: number }> = [];
|
||||
for (const index of filtered) {
|
||||
if (scores[index] >= config.hand.minConfidence) {
|
||||
const matchingBox = tf.slice(boxes, [index, 0], [1, -1]);
|
||||
const rawPalmLandmarks = tf.slice(predictions, [index, 5], [1, 14]);
|
||||
const palmLandmarks = tf.tidy(() => tf.reshape(this.normalizeLandmarks(rawPalmLandmarks, index), [-1, 2]));
|
||||
tf.dispose(rawPalmLandmarks);
|
||||
hands.push({ box: matchingBox, palmLandmarks, confidence: scores[index] });
|
||||
}
|
||||
for (const index of nms) {
|
||||
const palmBox = tf.slice(t.norm, [index, 0], [1, -1]);
|
||||
const palmLandmarks = tf.tidy(() => tf.reshape(this.normalizeLandmarks(tf.slice(t.predictions, [index, 5], [1, 14]), index), [-1, 2]));
|
||||
hands.push({ box: palmBox, palmLandmarks, confidence: scores[index] });
|
||||
// console.log('handdetector:getBoxes', nms.length, index, scores[index], config.hand.maxDetected, config.hand.iouThreshold, config.hand.minConfidence, palmBox.dataSync());
|
||||
}
|
||||
tf.dispose(predictions);
|
||||
tf.dispose(boxes);
|
||||
for (const tensor of Object.keys(t)) tf.dispose(t[tensor]); // dispose all
|
||||
return hands;
|
||||
}
|
||||
|
||||
|
|
|
@ -85,7 +85,7 @@ export class HandPipeline {
|
|||
// run new detector every skipFrames unless we only want box to start with
|
||||
let boxes;
|
||||
|
||||
// console.log(this.skipped, config.hand.skipFrames, !config.hand.landmarks, !config.skipFrame);
|
||||
// console.log('handpipeline:estimateHands:skip criteria', this.skipped, config.hand.skipFrames, !config.hand.landmarks, !config.skipFrame); // should skip hand detector?
|
||||
if ((this.skipped === 0) || (this.skipped > config.hand.skipFrames) || !config.hand.landmarks || !config.skipFrame) {
|
||||
boxes = await this.handDetector.estimateHandBounds(image, config);
|
||||
this.skipped = 0;
|
||||
|
@ -120,7 +120,7 @@ export class HandPipeline {
|
|||
tf.dispose(handImage);
|
||||
const confidence = (await confidenceT.data())[0];
|
||||
tf.dispose(confidenceT);
|
||||
if (confidence >= config.hand.minConfidence) {
|
||||
if (confidence >= config.hand.minConfidence / 4) {
|
||||
const keypointsReshaped = tf.reshape(keypoints, [-1, 3]);
|
||||
const rawCoords = await keypointsReshaped.array();
|
||||
tf.dispose(keypoints);
|
||||
|
@ -135,6 +135,7 @@ export class HandPipeline {
|
|||
};
|
||||
hands.push(result);
|
||||
} else {
|
||||
// console.log('handpipeline:estimateHands low', confidence);
|
||||
this.storedBoxes[i] = null;
|
||||
}
|
||||
tf.dispose(keypoints);
|
||||
|
|
|
@ -148,8 +148,8 @@ export class Human {
|
|||
* @param userConfig: {@link Config}
|
||||
*/
|
||||
constructor(userConfig?: Config | Record<string, unknown>) {
|
||||
Human.version = app.version;
|
||||
Object.defineProperty(this, 'version', { value: app.version });
|
||||
Human.version = app.version; // expose version property on instance of class
|
||||
Object.defineProperty(this, 'version', { value: app.version }); // expose version property directly on class itself
|
||||
defaults.wasmPath = `https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@${tf.version_core}/dist/`;
|
||||
this.config = mergeDeep(defaults, userConfig || {});
|
||||
this.tf = tf;
|
||||
|
@ -427,6 +427,7 @@ export class Human {
|
|||
const skipFrame = diff < Math.max(this.config.cacheSensitivity, this.#lastCacheDiff);
|
||||
// if difference is above 10x threshold, don't use last value to force reset cache for significant change of scenes or images
|
||||
this.#lastCacheDiff = diff > 10 * this.config.cacheSensitivity ? 0 : diff;
|
||||
// console.log('skipFrame', skipFrame, this.config.cacheSensitivity, diff);
|
||||
return skipFrame;
|
||||
}
|
||||
|
||||
|
|
|
@ -59,9 +59,10 @@ export function calc(newResult: Result): Result {
|
|||
.map((b, j) => ((bufferedFactor - 1) * bufferedResult.hand[i].box[j] + b) / bufferedFactor)) as [number, number, number, number];
|
||||
const boxRaw = (newResult.hand[i].boxRaw // update boxRaw
|
||||
.map((b, j) => ((bufferedFactor - 1) * bufferedResult.hand[i].boxRaw[j] + b) / bufferedFactor)) as [number, number, number, number];
|
||||
const keypoints = newResult.hand[i].keypoints // update landmarks
|
||||
const keypoints = newResult.hand[i].keypoints ? newResult.hand[i].keypoints // update landmarks
|
||||
.map((landmark, j) => landmark
|
||||
.map((coord, k) => (((bufferedFactor - 1) * bufferedResult.hand[i].keypoints[j][k] + coord) / bufferedFactor)) as [number, number, number]);
|
||||
.map((coord, k) => (((bufferedFactor - 1) * bufferedResult.hand[i].keypoints[j][k] + coord) / bufferedFactor)) as [number, number, number])
|
||||
: [];
|
||||
const keys = Object.keys(newResult.hand[i].annotations); // update annotations
|
||||
const annotations = {};
|
||||
for (const key of keys) {
|
||||
|
|
Loading…
Reference in New Issue