autodetect number of bodies and hands

pull/193/head
Vladimir Mandic 2021-09-25 19:14:03 -04:00
parent 50c84cc6d1
commit 0282d92d16
132 changed files with 1203 additions and 1143 deletions

View File

@ -9,7 +9,10 @@
## Changelog
### **HEAD -> main** 2021/09/24 mandic00@live.com
### **HEAD -> main** 2021/09/25 mandic00@live.com
### **origin/main** 2021/09/25 mandic00@live.com
- new release

View File

@ -168,7 +168,7 @@ var config = {
body: {
enabled: true,
modelPath: "movenet-lightning.json",
maxDetected: 1,
maxDetected: -1,
minConfidence: 0.2,
skipFrames: 1
},
@ -178,7 +178,7 @@ var config = {
skipFrames: 18,
minConfidence: 0.8,
iouThreshold: 0.2,
maxDetected: 1,
maxDetected: -1,
landmarks: true,
detector: {
modelPath: "handdetect.json"
@ -9586,6 +9586,7 @@ function fakeOps(kernelNames, config3) {
}
// src/handtrack/handtrack.ts
var boxScaleFact = 1.5;
var models2 = [null, null];
var modelOutputNodes = ["StatefulPartitionedCall/Postprocessor/Slice", "StatefulPartitionedCall/Postprocessor/ExpandDims_1"];
var inputSize = [[0, 0], [0, 0]];
@ -9671,7 +9672,15 @@ async function detectHands(input, config3) {
tfjs_esm_exports.dispose(t.nms);
for (const res of Array.from(nms)) {
const boxSlice = tfjs_esm_exports.slice(t.boxes, res, 1);
const yxBox = await boxSlice.data();
let yxBox = [0, 0, 0, 0];
if (config3.hand.landmarks) {
const detectedBox = await boxSlice.data();
const boxCenter = [(detectedBox[0] + detectedBox[2]) / 2, (detectedBox[1] + detectedBox[3]) / 2];
const boxDiff = [+boxCenter[0] - detectedBox[0], +boxCenter[1] - detectedBox[1], -boxCenter[0] + detectedBox[2], -boxCenter[1] + detectedBox[3]];
yxBox = [boxCenter[0] - boxScaleFact * boxDiff[0], boxCenter[1] - boxScaleFact * boxDiff[1], boxCenter[0] + boxScaleFact * boxDiff[2], boxCenter[1] + boxScaleFact * boxDiff[3]];
} else {
yxBox = await boxSlice.data();
}
const boxRaw3 = [yxBox[1], yxBox[0], yxBox[3] - yxBox[1], yxBox[2] - yxBox[0]];
const box6 = [Math.trunc(boxRaw3[0] * outputSize[0]), Math.trunc(boxRaw3[1] * outputSize[1]), Math.trunc(boxRaw3[2] * outputSize[0]), Math.trunc(boxRaw3[3] * outputSize[1])];
tfjs_esm_exports.dispose(boxSlice);
@ -9689,7 +9698,6 @@ async function detectHands(input, config3) {
hands.length = config3.hand.maxDetected || 1;
return hands;
}
var boxScaleFact = 1.5;
function updateBoxes(h, keypoints3) {
const finger = [keypoints3.map((pt) => pt[0]), keypoints3.map((pt) => pt[1])];
const minmax = [Math.min(...finger[0]), Math.max(...finger[0]), Math.min(...finger[1]), Math.max(...finger[1])];
@ -10037,6 +10045,7 @@ async function load9(config3) {
if (env.initial)
model6 = null;
if (!model6) {
fakeOps(["size"], config3);
model6 = await tfjs_esm_exports.loadGraphModel(join(config3.modelBasePath, config3.body.modelPath || ""));
if (!model6 || !model6["modelUrl"])
log("load model failed:", config3.body.modelPath);
@ -10089,8 +10098,8 @@ async function parseSinglePose(res, config3, image24) {
}
async function parseMultiPose(res, config3, image24) {
const persons2 = [];
for (let p = 0; p < res[0].length; p++) {
const kpt3 = res[0][p];
for (let id = 0; id < res[0].length; id++) {
const kpt3 = res[0][id];
score2 = Math.round(100 * kpt3[51 + 4]) / 100;
if (score2 < config3.body.minConfidence)
continue;
@ -10101,20 +10110,14 @@ async function parseMultiPose(res, config3, image24) {
keypoints2.push({
part: bodyParts2[i],
score: partScore,
positionRaw: [
kpt3[3 * i + 1],
kpt3[3 * i + 0]
],
position: [
Math.trunc(kpt3[3 * i + 1] * (image24.shape[2] || 0)),
Math.trunc(kpt3[3 * i + 0] * (image24.shape[1] || 0))
]
positionRaw: [kpt3[3 * i + 1], kpt3[3 * i + 0]],
position: [Math.trunc(kpt3[3 * i + 1] * (image24.shape[2] || 0)), Math.trunc(kpt3[3 * i + 0] * (image24.shape[1] || 0))]
});
}
}
boxRaw2 = [kpt3[51 + 1], kpt3[51 + 0], kpt3[51 + 3] - kpt3[51 + 1], kpt3[51 + 2] - kpt3[51 + 0]];
persons2.push({
id: p,
id,
score: score2,
boxRaw: boxRaw2,
box: [
@ -10123,7 +10126,7 @@ async function parseMultiPose(res, config3, image24) {
Math.trunc(boxRaw2[2] * (image24.shape[2] || 0)),
Math.trunc(boxRaw2[3] * (image24.shape[1] || 0))
],
keypoints: keypoints2
keypoints: [...keypoints2]
});
}
return persons2;
@ -10152,13 +10155,13 @@ async function predict9(image24, config3) {
if (!resT)
resolve([]);
const res = await resT.array();
let persons2;
let body4;
if (resT.shape[2] === 17)
persons2 = await parseSinglePose(res, config3, image24);
body4 = await parseSinglePose(res, config3, image24);
else if (resT.shape[2] === 56)
persons2 = await parseMultiPose(res, config3, image24);
body4 = await parseMultiPose(res, config3, image24);
tfjs_esm_exports.dispose(resT);
resolve(persons2);
resolve(body4);
});
}
@ -12649,29 +12652,32 @@ var Human = class {
if (elapsedTime > 0)
this.performance.face = elapsedTime;
}
if (this.config.async && (this.config.body.maxDetected === -1 || this.config.hand.maxDetected === -1))
faceRes = await faceRes;
this.analyze("Start Body:");
this.state = "detect:body";
const bodyConfig = this.config.body.maxDetected === -1 ? mergeDeep(this.config, { body: { maxDetected: 1 * faceRes.length } }) : this.config;
if (this.config.async) {
if ((_a = this.config.body.modelPath) == null ? void 0 : _a.includes("posenet"))
bodyRes = this.config.body.enabled ? predict4(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? predict4(img.tensor, bodyConfig) : [];
else if ((_b = this.config.body.modelPath) == null ? void 0 : _b.includes("blazepose"))
bodyRes = this.config.body.enabled ? predict7(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? predict7(img.tensor, bodyConfig) : [];
else if ((_c = this.config.body.modelPath) == null ? void 0 : _c.includes("efficientpose"))
bodyRes = this.config.body.enabled ? predict8(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? predict8(img.tensor, bodyConfig) : [];
else if ((_d = this.config.body.modelPath) == null ? void 0 : _d.includes("movenet"))
bodyRes = this.config.body.enabled ? predict9(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? predict9(img.tensor, bodyConfig) : [];
if (this.performance.body)
delete this.performance.body;
} else {
timeStamp = now();
if ((_e = this.config.body.modelPath) == null ? void 0 : _e.includes("posenet"))
bodyRes = this.config.body.enabled ? await predict4(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? await predict4(img.tensor, bodyConfig) : [];
else if ((_f = this.config.body.modelPath) == null ? void 0 : _f.includes("blazepose"))
bodyRes = this.config.body.enabled ? await predict7(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? await predict7(img.tensor, bodyConfig) : [];
else if ((_g = this.config.body.modelPath) == null ? void 0 : _g.includes("efficientpose"))
bodyRes = this.config.body.enabled ? await predict8(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? await predict8(img.tensor, bodyConfig) : [];
else if ((_h = this.config.body.modelPath) == null ? void 0 : _h.includes("movenet"))
bodyRes = this.config.body.enabled ? await predict9(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? await predict9(img.tensor, bodyConfig) : [];
elapsedTime = Math.trunc(now() - timeStamp);
if (elapsedTime > 0)
this.performance.body = elapsedTime;
@ -12679,19 +12685,20 @@ var Human = class {
this.analyze("End Body:");
this.analyze("Start Hand:");
this.state = "detect:hand";
const handConfig = this.config.hand.maxDetected === -1 ? mergeDeep(this.config, { hand: { maxDetected: 2 * faceRes.length } }) : this.config;
if (this.config.async) {
if ((_j = (_i = this.config.hand.detector) == null ? void 0 : _i.modelPath) == null ? void 0 : _j.includes("handdetect"))
handRes = this.config.hand.enabled ? predict5(img.tensor, this.config) : [];
handRes = this.config.hand.enabled ? predict5(img.tensor, handConfig) : [];
else if ((_l = (_k = this.config.hand.detector) == null ? void 0 : _k.modelPath) == null ? void 0 : _l.includes("handtrack"))
handRes = this.config.hand.enabled ? predict6(img.tensor, this.config) : [];
handRes = this.config.hand.enabled ? predict6(img.tensor, handConfig) : [];
if (this.performance.hand)
delete this.performance.hand;
} else {
timeStamp = now();
if ((_n = (_m = this.config.hand.detector) == null ? void 0 : _m.modelPath) == null ? void 0 : _n.includes("handdetect"))
handRes = this.config.hand.enabled ? await predict5(img.tensor, this.config) : [];
handRes = this.config.hand.enabled ? await predict5(img.tensor, handConfig) : [];
else if ((_p = (_o = this.config.hand.detector) == null ? void 0 : _o.modelPath) == null ? void 0 : _p.includes("handtrack"))
handRes = this.config.hand.enabled ? await predict6(img.tensor, this.config) : [];
handRes = this.config.hand.enabled ? await predict6(img.tensor, handConfig) : [];
elapsedTime = Math.trunc(now() - timeStamp);
if (elapsedTime > 0)
this.performance.hand = elapsedTime;

File diff suppressed because one or more lines are too long

71
dist/human.esm.js vendored
View File

@ -164,7 +164,7 @@ var config = {
body: {
enabled: true,
modelPath: "movenet-lightning.json",
maxDetected: 1,
maxDetected: -1,
minConfidence: 0.2,
skipFrames: 1
},
@ -174,7 +174,7 @@ var config = {
skipFrames: 18,
minConfidence: 0.8,
iouThreshold: 0.2,
maxDetected: 1,
maxDetected: -1,
landmarks: true,
detector: {
modelPath: "handdetect.json"
@ -69654,6 +69654,7 @@ function fakeOps(kernelNames, config3) {
}
// src/handtrack/handtrack.ts
var boxScaleFact = 1.5;
var models2 = [null, null];
var modelOutputNodes = ["StatefulPartitionedCall/Postprocessor/Slice", "StatefulPartitionedCall/Postprocessor/ExpandDims_1"];
var inputSize = [[0, 0], [0, 0]];
@ -69739,7 +69740,15 @@ async function detectHands(input2, config3) {
dispose(t.nms);
for (const res of Array.from(nms)) {
const boxSlice = slice(t.boxes, res, 1);
const yxBox = await boxSlice.data();
let yxBox = [0, 0, 0, 0];
if (config3.hand.landmarks) {
const detectedBox = await boxSlice.data();
const boxCenter = [(detectedBox[0] + detectedBox[2]) / 2, (detectedBox[1] + detectedBox[3]) / 2];
const boxDiff = [+boxCenter[0] - detectedBox[0], +boxCenter[1] - detectedBox[1], -boxCenter[0] + detectedBox[2], -boxCenter[1] + detectedBox[3]];
yxBox = [boxCenter[0] - boxScaleFact * boxDiff[0], boxCenter[1] - boxScaleFact * boxDiff[1], boxCenter[0] + boxScaleFact * boxDiff[2], boxCenter[1] + boxScaleFact * boxDiff[3]];
} else {
yxBox = await boxSlice.data();
}
const boxRaw3 = [yxBox[1], yxBox[0], yxBox[3] - yxBox[1], yxBox[2] - yxBox[0]];
const box6 = [Math.trunc(boxRaw3[0] * outputSize[0]), Math.trunc(boxRaw3[1] * outputSize[1]), Math.trunc(boxRaw3[2] * outputSize[0]), Math.trunc(boxRaw3[3] * outputSize[1])];
dispose(boxSlice);
@ -69757,7 +69766,6 @@ async function detectHands(input2, config3) {
hands.length = config3.hand.maxDetected || 1;
return hands;
}
var boxScaleFact = 1.5;
function updateBoxes(h, keypoints3) {
const finger = [keypoints3.map((pt) => pt[0]), keypoints3.map((pt) => pt[1])];
const minmax = [Math.min(...finger[0]), Math.max(...finger[0]), Math.min(...finger[1]), Math.max(...finger[1])];
@ -70105,6 +70113,7 @@ async function load9(config3) {
if (env2.initial)
model7 = null;
if (!model7) {
fakeOps(["size"], config3);
model7 = await loadGraphModel(join(config3.modelBasePath, config3.body.modelPath || ""));
if (!model7 || !model7["modelUrl"])
log("load model failed:", config3.body.modelPath);
@ -70157,8 +70166,8 @@ async function parseSinglePose(res, config3, image7) {
}
async function parseMultiPose(res, config3, image7) {
const persons2 = [];
for (let p2 = 0; p2 < res[0].length; p2++) {
const kpt3 = res[0][p2];
for (let id = 0; id < res[0].length; id++) {
const kpt3 = res[0][id];
score2 = Math.round(100 * kpt3[51 + 4]) / 100;
if (score2 < config3.body.minConfidence)
continue;
@ -70169,20 +70178,14 @@ async function parseMultiPose(res, config3, image7) {
keypoints2.push({
part: bodyParts2[i],
score: partScore,
positionRaw: [
kpt3[3 * i + 1],
kpt3[3 * i + 0]
],
position: [
Math.trunc(kpt3[3 * i + 1] * (image7.shape[2] || 0)),
Math.trunc(kpt3[3 * i + 0] * (image7.shape[1] || 0))
]
positionRaw: [kpt3[3 * i + 1], kpt3[3 * i + 0]],
position: [Math.trunc(kpt3[3 * i + 1] * (image7.shape[2] || 0)), Math.trunc(kpt3[3 * i + 0] * (image7.shape[1] || 0))]
});
}
}
boxRaw2 = [kpt3[51 + 1], kpt3[51 + 0], kpt3[51 + 3] - kpt3[51 + 1], kpt3[51 + 2] - kpt3[51 + 0]];
persons2.push({
id: p2,
id,
score: score2,
boxRaw: boxRaw2,
box: [
@ -70191,7 +70194,7 @@ async function parseMultiPose(res, config3, image7) {
Math.trunc(boxRaw2[2] * (image7.shape[2] || 0)),
Math.trunc(boxRaw2[3] * (image7.shape[1] || 0))
],
keypoints: keypoints2
keypoints: [...keypoints2]
});
}
return persons2;
@ -70220,13 +70223,13 @@ async function predict9(image7, config3) {
if (!resT)
resolve([]);
const res = await resT.array();
let persons2;
let body4;
if (resT.shape[2] === 17)
persons2 = await parseSinglePose(res, config3, image7);
body4 = await parseSinglePose(res, config3, image7);
else if (resT.shape[2] === 56)
persons2 = await parseMultiPose(res, config3, image7);
body4 = await parseMultiPose(res, config3, image7);
dispose(resT);
resolve(persons2);
resolve(body4);
});
}
@ -72717,29 +72720,32 @@ var Human = class {
if (elapsedTime > 0)
this.performance.face = elapsedTime;
}
if (this.config.async && (this.config.body.maxDetected === -1 || this.config.hand.maxDetected === -1))
faceRes = await faceRes;
this.analyze("Start Body:");
this.state = "detect:body";
const bodyConfig = this.config.body.maxDetected === -1 ? mergeDeep(this.config, { body: { maxDetected: 1 * faceRes.length } }) : this.config;
if (this.config.async) {
if ((_a = this.config.body.modelPath) == null ? void 0 : _a.includes("posenet"))
bodyRes = this.config.body.enabled ? predict4(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? predict4(img.tensor, bodyConfig) : [];
else if ((_b = this.config.body.modelPath) == null ? void 0 : _b.includes("blazepose"))
bodyRes = this.config.body.enabled ? predict7(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? predict7(img.tensor, bodyConfig) : [];
else if ((_c = this.config.body.modelPath) == null ? void 0 : _c.includes("efficientpose"))
bodyRes = this.config.body.enabled ? predict8(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? predict8(img.tensor, bodyConfig) : [];
else if ((_d = this.config.body.modelPath) == null ? void 0 : _d.includes("movenet"))
bodyRes = this.config.body.enabled ? predict9(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? predict9(img.tensor, bodyConfig) : [];
if (this.performance.body)
delete this.performance.body;
} else {
timeStamp = now();
if ((_e = this.config.body.modelPath) == null ? void 0 : _e.includes("posenet"))
bodyRes = this.config.body.enabled ? await predict4(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? await predict4(img.tensor, bodyConfig) : [];
else if ((_f = this.config.body.modelPath) == null ? void 0 : _f.includes("blazepose"))
bodyRes = this.config.body.enabled ? await predict7(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? await predict7(img.tensor, bodyConfig) : [];
else if ((_g = this.config.body.modelPath) == null ? void 0 : _g.includes("efficientpose"))
bodyRes = this.config.body.enabled ? await predict8(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? await predict8(img.tensor, bodyConfig) : [];
else if ((_h = this.config.body.modelPath) == null ? void 0 : _h.includes("movenet"))
bodyRes = this.config.body.enabled ? await predict9(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? await predict9(img.tensor, bodyConfig) : [];
elapsedTime = Math.trunc(now() - timeStamp);
if (elapsedTime > 0)
this.performance.body = elapsedTime;
@ -72747,19 +72753,20 @@ var Human = class {
this.analyze("End Body:");
this.analyze("Start Hand:");
this.state = "detect:hand";
const handConfig = this.config.hand.maxDetected === -1 ? mergeDeep(this.config, { hand: { maxDetected: 2 * faceRes.length } }) : this.config;
if (this.config.async) {
if ((_j = (_i = this.config.hand.detector) == null ? void 0 : _i.modelPath) == null ? void 0 : _j.includes("handdetect"))
handRes = this.config.hand.enabled ? predict5(img.tensor, this.config) : [];
handRes = this.config.hand.enabled ? predict5(img.tensor, handConfig) : [];
else if ((_l = (_k = this.config.hand.detector) == null ? void 0 : _k.modelPath) == null ? void 0 : _l.includes("handtrack"))
handRes = this.config.hand.enabled ? predict6(img.tensor, this.config) : [];
handRes = this.config.hand.enabled ? predict6(img.tensor, handConfig) : [];
if (this.performance.hand)
delete this.performance.hand;
} else {
timeStamp = now();
if ((_n = (_m = this.config.hand.detector) == null ? void 0 : _m.modelPath) == null ? void 0 : _n.includes("handdetect"))
handRes = this.config.hand.enabled ? await predict5(img.tensor, this.config) : [];
handRes = this.config.hand.enabled ? await predict5(img.tensor, handConfig) : [];
else if ((_p = (_o = this.config.hand.detector) == null ? void 0 : _o.modelPath) == null ? void 0 : _p.includes("handtrack"))
handRes = this.config.hand.enabled ? await predict6(img.tensor, this.config) : [];
handRes = this.config.hand.enabled ? await predict6(img.tensor, handConfig) : [];
elapsedTime = Math.trunc(now() - timeStamp);
if (elapsedTime > 0)
this.performance.hand = elapsedTime;

File diff suppressed because one or more lines are too long

494
dist/human.js vendored

File diff suppressed because one or more lines are too long

View File

@ -211,7 +211,7 @@ var config = {
body: {
enabled: true,
modelPath: "movenet-lightning.json",
maxDetected: 1,
maxDetected: -1,
minConfidence: 0.2,
skipFrames: 1
},
@ -221,7 +221,7 @@ var config = {
skipFrames: 18,
minConfidence: 0.8,
iouThreshold: 0.2,
maxDetected: 1,
maxDetected: -1,
landmarks: true,
detector: {
modelPath: "handdetect.json"
@ -9638,6 +9638,7 @@ function fakeOps(kernelNames, config3) {
}
// src/handtrack/handtrack.ts
var boxScaleFact = 1.5;
var models2 = [null, null];
var modelOutputNodes = ["StatefulPartitionedCall/Postprocessor/Slice", "StatefulPartitionedCall/Postprocessor/ExpandDims_1"];
var inputSize = [[0, 0], [0, 0]];
@ -9723,7 +9724,15 @@ async function detectHands(input, config3) {
tf16.dispose(t.nms);
for (const res of Array.from(nms)) {
const boxSlice = tf16.slice(t.boxes, res, 1);
const yxBox = await boxSlice.data();
let yxBox = [0, 0, 0, 0];
if (config3.hand.landmarks) {
const detectedBox = await boxSlice.data();
const boxCenter = [(detectedBox[0] + detectedBox[2]) / 2, (detectedBox[1] + detectedBox[3]) / 2];
const boxDiff = [+boxCenter[0] - detectedBox[0], +boxCenter[1] - detectedBox[1], -boxCenter[0] + detectedBox[2], -boxCenter[1] + detectedBox[3]];
yxBox = [boxCenter[0] - boxScaleFact * boxDiff[0], boxCenter[1] - boxScaleFact * boxDiff[1], boxCenter[0] + boxScaleFact * boxDiff[2], boxCenter[1] + boxScaleFact * boxDiff[3]];
} else {
yxBox = await boxSlice.data();
}
const boxRaw3 = [yxBox[1], yxBox[0], yxBox[3] - yxBox[1], yxBox[2] - yxBox[0]];
const box6 = [Math.trunc(boxRaw3[0] * outputSize[0]), Math.trunc(boxRaw3[1] * outputSize[1]), Math.trunc(boxRaw3[2] * outputSize[0]), Math.trunc(boxRaw3[3] * outputSize[1])];
tf16.dispose(boxSlice);
@ -9741,7 +9750,6 @@ async function detectHands(input, config3) {
hands.length = config3.hand.maxDetected || 1;
return hands;
}
var boxScaleFact = 1.5;
function updateBoxes(h, keypoints3) {
const finger = [keypoints3.map((pt) => pt[0]), keypoints3.map((pt) => pt[1])];
const minmax = [Math.min(...finger[0]), Math.max(...finger[0]), Math.min(...finger[1]), Math.max(...finger[1])];
@ -10094,6 +10102,7 @@ async function load9(config3) {
if (env.initial)
model6 = null;
if (!model6) {
fakeOps(["size"], config3);
model6 = await tf19.loadGraphModel(join(config3.modelBasePath, config3.body.modelPath || ""));
if (!model6 || !model6["modelUrl"])
log("load model failed:", config3.body.modelPath);
@ -10146,8 +10155,8 @@ async function parseSinglePose(res, config3, image24) {
}
async function parseMultiPose(res, config3, image24) {
const persons2 = [];
for (let p = 0; p < res[0].length; p++) {
const kpt3 = res[0][p];
for (let id = 0; id < res[0].length; id++) {
const kpt3 = res[0][id];
score2 = Math.round(100 * kpt3[51 + 4]) / 100;
if (score2 < config3.body.minConfidence)
continue;
@ -10158,20 +10167,14 @@ async function parseMultiPose(res, config3, image24) {
keypoints2.push({
part: bodyParts2[i],
score: partScore,
positionRaw: [
kpt3[3 * i + 1],
kpt3[3 * i + 0]
],
position: [
Math.trunc(kpt3[3 * i + 1] * (image24.shape[2] || 0)),
Math.trunc(kpt3[3 * i + 0] * (image24.shape[1] || 0))
]
positionRaw: [kpt3[3 * i + 1], kpt3[3 * i + 0]],
position: [Math.trunc(kpt3[3 * i + 1] * (image24.shape[2] || 0)), Math.trunc(kpt3[3 * i + 0] * (image24.shape[1] || 0))]
});
}
}
boxRaw2 = [kpt3[51 + 1], kpt3[51 + 0], kpt3[51 + 3] - kpt3[51 + 1], kpt3[51 + 2] - kpt3[51 + 0]];
persons2.push({
id: p,
id,
score: score2,
boxRaw: boxRaw2,
box: [
@ -10180,7 +10183,7 @@ async function parseMultiPose(res, config3, image24) {
Math.trunc(boxRaw2[2] * (image24.shape[2] || 0)),
Math.trunc(boxRaw2[3] * (image24.shape[1] || 0))
],
keypoints: keypoints2
keypoints: [...keypoints2]
});
}
return persons2;
@ -10209,13 +10212,13 @@ async function predict9(image24, config3) {
if (!resT)
resolve([]);
const res = await resT.array();
let persons2;
let body4;
if (resT.shape[2] === 17)
persons2 = await parseSinglePose(res, config3, image24);
body4 = await parseSinglePose(res, config3, image24);
else if (resT.shape[2] === 56)
persons2 = await parseMultiPose(res, config3, image24);
body4 = await parseMultiPose(res, config3, image24);
tf19.dispose(resT);
resolve(persons2);
resolve(body4);
});
}
@ -12714,29 +12717,32 @@ var Human = class {
if (elapsedTime > 0)
this.performance.face = elapsedTime;
}
if (this.config.async && (this.config.body.maxDetected === -1 || this.config.hand.maxDetected === -1))
faceRes = await faceRes;
this.analyze("Start Body:");
this.state = "detect:body";
const bodyConfig = this.config.body.maxDetected === -1 ? mergeDeep(this.config, { body: { maxDetected: 1 * faceRes.length } }) : this.config;
if (this.config.async) {
if ((_a = this.config.body.modelPath) == null ? void 0 : _a.includes("posenet"))
bodyRes = this.config.body.enabled ? predict4(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? predict4(img.tensor, bodyConfig) : [];
else if ((_b = this.config.body.modelPath) == null ? void 0 : _b.includes("blazepose"))
bodyRes = this.config.body.enabled ? predict7(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? predict7(img.tensor, bodyConfig) : [];
else if ((_c = this.config.body.modelPath) == null ? void 0 : _c.includes("efficientpose"))
bodyRes = this.config.body.enabled ? predict8(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? predict8(img.tensor, bodyConfig) : [];
else if ((_d = this.config.body.modelPath) == null ? void 0 : _d.includes("movenet"))
bodyRes = this.config.body.enabled ? predict9(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? predict9(img.tensor, bodyConfig) : [];
if (this.performance.body)
delete this.performance.body;
} else {
timeStamp = now();
if ((_e = this.config.body.modelPath) == null ? void 0 : _e.includes("posenet"))
bodyRes = this.config.body.enabled ? await predict4(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? await predict4(img.tensor, bodyConfig) : [];
else if ((_f = this.config.body.modelPath) == null ? void 0 : _f.includes("blazepose"))
bodyRes = this.config.body.enabled ? await predict7(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? await predict7(img.tensor, bodyConfig) : [];
else if ((_g = this.config.body.modelPath) == null ? void 0 : _g.includes("efficientpose"))
bodyRes = this.config.body.enabled ? await predict8(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? await predict8(img.tensor, bodyConfig) : [];
else if ((_h = this.config.body.modelPath) == null ? void 0 : _h.includes("movenet"))
bodyRes = this.config.body.enabled ? await predict9(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? await predict9(img.tensor, bodyConfig) : [];
elapsedTime = Math.trunc(now() - timeStamp);
if (elapsedTime > 0)
this.performance.body = elapsedTime;
@ -12744,19 +12750,20 @@ var Human = class {
this.analyze("End Body:");
this.analyze("Start Hand:");
this.state = "detect:hand";
const handConfig = this.config.hand.maxDetected === -1 ? mergeDeep(this.config, { hand: { maxDetected: 2 * faceRes.length } }) : this.config;
if (this.config.async) {
if ((_j = (_i = this.config.hand.detector) == null ? void 0 : _i.modelPath) == null ? void 0 : _j.includes("handdetect"))
handRes = this.config.hand.enabled ? predict5(img.tensor, this.config) : [];
handRes = this.config.hand.enabled ? predict5(img.tensor, handConfig) : [];
else if ((_l = (_k = this.config.hand.detector) == null ? void 0 : _k.modelPath) == null ? void 0 : _l.includes("handtrack"))
handRes = this.config.hand.enabled ? predict6(img.tensor, this.config) : [];
handRes = this.config.hand.enabled ? predict6(img.tensor, handConfig) : [];
if (this.performance.hand)
delete this.performance.hand;
} else {
timeStamp = now();
if ((_n = (_m = this.config.hand.detector) == null ? void 0 : _m.modelPath) == null ? void 0 : _n.includes("handdetect"))
handRes = this.config.hand.enabled ? await predict5(img.tensor, this.config) : [];
handRes = this.config.hand.enabled ? await predict5(img.tensor, handConfig) : [];
else if ((_p = (_o = this.config.hand.detector) == null ? void 0 : _o.modelPath) == null ? void 0 : _p.includes("handtrack"))
handRes = this.config.hand.enabled ? await predict6(img.tensor, this.config) : [];
handRes = this.config.hand.enabled ? await predict6(img.tensor, handConfig) : [];
elapsedTime = Math.trunc(now() - timeStamp);
if (elapsedTime > 0)
this.performance.hand = elapsedTime;

View File

@ -212,7 +212,7 @@ var config = {
body: {
enabled: true,
modelPath: "movenet-lightning.json",
maxDetected: 1,
maxDetected: -1,
minConfidence: 0.2,
skipFrames: 1
},
@ -222,7 +222,7 @@ var config = {
skipFrames: 18,
minConfidence: 0.8,
iouThreshold: 0.2,
maxDetected: 1,
maxDetected: -1,
landmarks: true,
detector: {
modelPath: "handdetect.json"
@ -9639,6 +9639,7 @@ function fakeOps(kernelNames, config3) {
}
// src/handtrack/handtrack.ts
var boxScaleFact = 1.5;
var models2 = [null, null];
var modelOutputNodes = ["StatefulPartitionedCall/Postprocessor/Slice", "StatefulPartitionedCall/Postprocessor/ExpandDims_1"];
var inputSize = [[0, 0], [0, 0]];
@ -9724,7 +9725,15 @@ async function detectHands(input, config3) {
tf16.dispose(t.nms);
for (const res of Array.from(nms)) {
const boxSlice = tf16.slice(t.boxes, res, 1);
const yxBox = await boxSlice.data();
let yxBox = [0, 0, 0, 0];
if (config3.hand.landmarks) {
const detectedBox = await boxSlice.data();
const boxCenter = [(detectedBox[0] + detectedBox[2]) / 2, (detectedBox[1] + detectedBox[3]) / 2];
const boxDiff = [+boxCenter[0] - detectedBox[0], +boxCenter[1] - detectedBox[1], -boxCenter[0] + detectedBox[2], -boxCenter[1] + detectedBox[3]];
yxBox = [boxCenter[0] - boxScaleFact * boxDiff[0], boxCenter[1] - boxScaleFact * boxDiff[1], boxCenter[0] + boxScaleFact * boxDiff[2], boxCenter[1] + boxScaleFact * boxDiff[3]];
} else {
yxBox = await boxSlice.data();
}
const boxRaw3 = [yxBox[1], yxBox[0], yxBox[3] - yxBox[1], yxBox[2] - yxBox[0]];
const box6 = [Math.trunc(boxRaw3[0] * outputSize[0]), Math.trunc(boxRaw3[1] * outputSize[1]), Math.trunc(boxRaw3[2] * outputSize[0]), Math.trunc(boxRaw3[3] * outputSize[1])];
tf16.dispose(boxSlice);
@ -9742,7 +9751,6 @@ async function detectHands(input, config3) {
hands.length = config3.hand.maxDetected || 1;
return hands;
}
var boxScaleFact = 1.5;
function updateBoxes(h, keypoints3) {
const finger = [keypoints3.map((pt) => pt[0]), keypoints3.map((pt) => pt[1])];
const minmax = [Math.min(...finger[0]), Math.max(...finger[0]), Math.min(...finger[1]), Math.max(...finger[1])];
@ -10095,6 +10103,7 @@ async function load9(config3) {
if (env.initial)
model6 = null;
if (!model6) {
fakeOps(["size"], config3);
model6 = await tf19.loadGraphModel(join(config3.modelBasePath, config3.body.modelPath || ""));
if (!model6 || !model6["modelUrl"])
log("load model failed:", config3.body.modelPath);
@ -10147,8 +10156,8 @@ async function parseSinglePose(res, config3, image24) {
}
async function parseMultiPose(res, config3, image24) {
const persons2 = [];
for (let p = 0; p < res[0].length; p++) {
const kpt3 = res[0][p];
for (let id = 0; id < res[0].length; id++) {
const kpt3 = res[0][id];
score2 = Math.round(100 * kpt3[51 + 4]) / 100;
if (score2 < config3.body.minConfidence)
continue;
@ -10159,20 +10168,14 @@ async function parseMultiPose(res, config3, image24) {
keypoints2.push({
part: bodyParts2[i],
score: partScore,
positionRaw: [
kpt3[3 * i + 1],
kpt3[3 * i + 0]
],
position: [
Math.trunc(kpt3[3 * i + 1] * (image24.shape[2] || 0)),
Math.trunc(kpt3[3 * i + 0] * (image24.shape[1] || 0))
]
positionRaw: [kpt3[3 * i + 1], kpt3[3 * i + 0]],
position: [Math.trunc(kpt3[3 * i + 1] * (image24.shape[2] || 0)), Math.trunc(kpt3[3 * i + 0] * (image24.shape[1] || 0))]
});
}
}
boxRaw2 = [kpt3[51 + 1], kpt3[51 + 0], kpt3[51 + 3] - kpt3[51 + 1], kpt3[51 + 2] - kpt3[51 + 0]];
persons2.push({
id: p,
id,
score: score2,
boxRaw: boxRaw2,
box: [
@ -10181,7 +10184,7 @@ async function parseMultiPose(res, config3, image24) {
Math.trunc(boxRaw2[2] * (image24.shape[2] || 0)),
Math.trunc(boxRaw2[3] * (image24.shape[1] || 0))
],
keypoints: keypoints2
keypoints: [...keypoints2]
});
}
return persons2;
@ -10210,13 +10213,13 @@ async function predict9(image24, config3) {
if (!resT)
resolve([]);
const res = await resT.array();
let persons2;
let body4;
if (resT.shape[2] === 17)
persons2 = await parseSinglePose(res, config3, image24);
body4 = await parseSinglePose(res, config3, image24);
else if (resT.shape[2] === 56)
persons2 = await parseMultiPose(res, config3, image24);
body4 = await parseMultiPose(res, config3, image24);
tf19.dispose(resT);
resolve(persons2);
resolve(body4);
});
}
@ -12715,29 +12718,32 @@ var Human = class {
if (elapsedTime > 0)
this.performance.face = elapsedTime;
}
if (this.config.async && (this.config.body.maxDetected === -1 || this.config.hand.maxDetected === -1))
faceRes = await faceRes;
this.analyze("Start Body:");
this.state = "detect:body";
const bodyConfig = this.config.body.maxDetected === -1 ? mergeDeep(this.config, { body: { maxDetected: 1 * faceRes.length } }) : this.config;
if (this.config.async) {
if ((_a = this.config.body.modelPath) == null ? void 0 : _a.includes("posenet"))
bodyRes = this.config.body.enabled ? predict4(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? predict4(img.tensor, bodyConfig) : [];
else if ((_b = this.config.body.modelPath) == null ? void 0 : _b.includes("blazepose"))
bodyRes = this.config.body.enabled ? predict7(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? predict7(img.tensor, bodyConfig) : [];
else if ((_c = this.config.body.modelPath) == null ? void 0 : _c.includes("efficientpose"))
bodyRes = this.config.body.enabled ? predict8(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? predict8(img.tensor, bodyConfig) : [];
else if ((_d = this.config.body.modelPath) == null ? void 0 : _d.includes("movenet"))
bodyRes = this.config.body.enabled ? predict9(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? predict9(img.tensor, bodyConfig) : [];
if (this.performance.body)
delete this.performance.body;
} else {
timeStamp = now();
if ((_e = this.config.body.modelPath) == null ? void 0 : _e.includes("posenet"))
bodyRes = this.config.body.enabled ? await predict4(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? await predict4(img.tensor, bodyConfig) : [];
else if ((_f = this.config.body.modelPath) == null ? void 0 : _f.includes("blazepose"))
bodyRes = this.config.body.enabled ? await predict7(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? await predict7(img.tensor, bodyConfig) : [];
else if ((_g = this.config.body.modelPath) == null ? void 0 : _g.includes("efficientpose"))
bodyRes = this.config.body.enabled ? await predict8(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? await predict8(img.tensor, bodyConfig) : [];
else if ((_h = this.config.body.modelPath) == null ? void 0 : _h.includes("movenet"))
bodyRes = this.config.body.enabled ? await predict9(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? await predict9(img.tensor, bodyConfig) : [];
elapsedTime = Math.trunc(now() - timeStamp);
if (elapsedTime > 0)
this.performance.body = elapsedTime;
@ -12745,19 +12751,20 @@ var Human = class {
this.analyze("End Body:");
this.analyze("Start Hand:");
this.state = "detect:hand";
const handConfig = this.config.hand.maxDetected === -1 ? mergeDeep(this.config, { hand: { maxDetected: 2 * faceRes.length } }) : this.config;
if (this.config.async) {
if ((_j = (_i = this.config.hand.detector) == null ? void 0 : _i.modelPath) == null ? void 0 : _j.includes("handdetect"))
handRes = this.config.hand.enabled ? predict5(img.tensor, this.config) : [];
handRes = this.config.hand.enabled ? predict5(img.tensor, handConfig) : [];
else if ((_l = (_k = this.config.hand.detector) == null ? void 0 : _k.modelPath) == null ? void 0 : _l.includes("handtrack"))
handRes = this.config.hand.enabled ? predict6(img.tensor, this.config) : [];
handRes = this.config.hand.enabled ? predict6(img.tensor, handConfig) : [];
if (this.performance.hand)
delete this.performance.hand;
} else {
timeStamp = now();
if ((_n = (_m = this.config.hand.detector) == null ? void 0 : _m.modelPath) == null ? void 0 : _n.includes("handdetect"))
handRes = this.config.hand.enabled ? await predict5(img.tensor, this.config) : [];
handRes = this.config.hand.enabled ? await predict5(img.tensor, handConfig) : [];
else if ((_p = (_o = this.config.hand.detector) == null ? void 0 : _o.modelPath) == null ? void 0 : _p.includes("handtrack"))
handRes = this.config.hand.enabled ? await predict6(img.tensor, this.config) : [];
handRes = this.config.hand.enabled ? await predict6(img.tensor, handConfig) : [];
elapsedTime = Math.trunc(now() - timeStamp);
if (elapsedTime > 0)
this.performance.hand = elapsedTime;

71
dist/human.node.js vendored
View File

@ -211,7 +211,7 @@ var config = {
body: {
enabled: true,
modelPath: "movenet-lightning.json",
maxDetected: 1,
maxDetected: -1,
minConfidence: 0.2,
skipFrames: 1
},
@ -221,7 +221,7 @@ var config = {
skipFrames: 18,
minConfidence: 0.8,
iouThreshold: 0.2,
maxDetected: 1,
maxDetected: -1,
landmarks: true,
detector: {
modelPath: "handdetect.json"
@ -9638,6 +9638,7 @@ function fakeOps(kernelNames, config3) {
}
// src/handtrack/handtrack.ts
var boxScaleFact = 1.5;
var models2 = [null, null];
var modelOutputNodes = ["StatefulPartitionedCall/Postprocessor/Slice", "StatefulPartitionedCall/Postprocessor/ExpandDims_1"];
var inputSize = [[0, 0], [0, 0]];
@ -9723,7 +9724,15 @@ async function detectHands(input, config3) {
tf16.dispose(t.nms);
for (const res of Array.from(nms)) {
const boxSlice = tf16.slice(t.boxes, res, 1);
const yxBox = await boxSlice.data();
let yxBox = [0, 0, 0, 0];
if (config3.hand.landmarks) {
const detectedBox = await boxSlice.data();
const boxCenter = [(detectedBox[0] + detectedBox[2]) / 2, (detectedBox[1] + detectedBox[3]) / 2];
const boxDiff = [+boxCenter[0] - detectedBox[0], +boxCenter[1] - detectedBox[1], -boxCenter[0] + detectedBox[2], -boxCenter[1] + detectedBox[3]];
yxBox = [boxCenter[0] - boxScaleFact * boxDiff[0], boxCenter[1] - boxScaleFact * boxDiff[1], boxCenter[0] + boxScaleFact * boxDiff[2], boxCenter[1] + boxScaleFact * boxDiff[3]];
} else {
yxBox = await boxSlice.data();
}
const boxRaw3 = [yxBox[1], yxBox[0], yxBox[3] - yxBox[1], yxBox[2] - yxBox[0]];
const box6 = [Math.trunc(boxRaw3[0] * outputSize[0]), Math.trunc(boxRaw3[1] * outputSize[1]), Math.trunc(boxRaw3[2] * outputSize[0]), Math.trunc(boxRaw3[3] * outputSize[1])];
tf16.dispose(boxSlice);
@ -9741,7 +9750,6 @@ async function detectHands(input, config3) {
hands.length = config3.hand.maxDetected || 1;
return hands;
}
var boxScaleFact = 1.5;
function updateBoxes(h, keypoints3) {
const finger = [keypoints3.map((pt) => pt[0]), keypoints3.map((pt) => pt[1])];
const minmax = [Math.min(...finger[0]), Math.max(...finger[0]), Math.min(...finger[1]), Math.max(...finger[1])];
@ -10094,6 +10102,7 @@ async function load9(config3) {
if (env.initial)
model6 = null;
if (!model6) {
fakeOps(["size"], config3);
model6 = await tf19.loadGraphModel(join(config3.modelBasePath, config3.body.modelPath || ""));
if (!model6 || !model6["modelUrl"])
log("load model failed:", config3.body.modelPath);
@ -10146,8 +10155,8 @@ async function parseSinglePose(res, config3, image24) {
}
async function parseMultiPose(res, config3, image24) {
const persons2 = [];
for (let p = 0; p < res[0].length; p++) {
const kpt3 = res[0][p];
for (let id = 0; id < res[0].length; id++) {
const kpt3 = res[0][id];
score2 = Math.round(100 * kpt3[51 + 4]) / 100;
if (score2 < config3.body.minConfidence)
continue;
@ -10158,20 +10167,14 @@ async function parseMultiPose(res, config3, image24) {
keypoints2.push({
part: bodyParts2[i],
score: partScore,
positionRaw: [
kpt3[3 * i + 1],
kpt3[3 * i + 0]
],
position: [
Math.trunc(kpt3[3 * i + 1] * (image24.shape[2] || 0)),
Math.trunc(kpt3[3 * i + 0] * (image24.shape[1] || 0))
]
positionRaw: [kpt3[3 * i + 1], kpt3[3 * i + 0]],
position: [Math.trunc(kpt3[3 * i + 1] * (image24.shape[2] || 0)), Math.trunc(kpt3[3 * i + 0] * (image24.shape[1] || 0))]
});
}
}
boxRaw2 = [kpt3[51 + 1], kpt3[51 + 0], kpt3[51 + 3] - kpt3[51 + 1], kpt3[51 + 2] - kpt3[51 + 0]];
persons2.push({
id: p,
id,
score: score2,
boxRaw: boxRaw2,
box: [
@ -10180,7 +10183,7 @@ async function parseMultiPose(res, config3, image24) {
Math.trunc(boxRaw2[2] * (image24.shape[2] || 0)),
Math.trunc(boxRaw2[3] * (image24.shape[1] || 0))
],
keypoints: keypoints2
keypoints: [...keypoints2]
});
}
return persons2;
@ -10209,13 +10212,13 @@ async function predict9(image24, config3) {
if (!resT)
resolve([]);
const res = await resT.array();
let persons2;
let body4;
if (resT.shape[2] === 17)
persons2 = await parseSinglePose(res, config3, image24);
body4 = await parseSinglePose(res, config3, image24);
else if (resT.shape[2] === 56)
persons2 = await parseMultiPose(res, config3, image24);
body4 = await parseMultiPose(res, config3, image24);
tf19.dispose(resT);
resolve(persons2);
resolve(body4);
});
}
@ -12714,29 +12717,32 @@ var Human = class {
if (elapsedTime > 0)
this.performance.face = elapsedTime;
}
if (this.config.async && (this.config.body.maxDetected === -1 || this.config.hand.maxDetected === -1))
faceRes = await faceRes;
this.analyze("Start Body:");
this.state = "detect:body";
const bodyConfig = this.config.body.maxDetected === -1 ? mergeDeep(this.config, { body: { maxDetected: 1 * faceRes.length } }) : this.config;
if (this.config.async) {
if ((_a = this.config.body.modelPath) == null ? void 0 : _a.includes("posenet"))
bodyRes = this.config.body.enabled ? predict4(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? predict4(img.tensor, bodyConfig) : [];
else if ((_b = this.config.body.modelPath) == null ? void 0 : _b.includes("blazepose"))
bodyRes = this.config.body.enabled ? predict7(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? predict7(img.tensor, bodyConfig) : [];
else if ((_c = this.config.body.modelPath) == null ? void 0 : _c.includes("efficientpose"))
bodyRes = this.config.body.enabled ? predict8(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? predict8(img.tensor, bodyConfig) : [];
else if ((_d = this.config.body.modelPath) == null ? void 0 : _d.includes("movenet"))
bodyRes = this.config.body.enabled ? predict9(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? predict9(img.tensor, bodyConfig) : [];
if (this.performance.body)
delete this.performance.body;
} else {
timeStamp = now();
if ((_e = this.config.body.modelPath) == null ? void 0 : _e.includes("posenet"))
bodyRes = this.config.body.enabled ? await predict4(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? await predict4(img.tensor, bodyConfig) : [];
else if ((_f = this.config.body.modelPath) == null ? void 0 : _f.includes("blazepose"))
bodyRes = this.config.body.enabled ? await predict7(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? await predict7(img.tensor, bodyConfig) : [];
else if ((_g = this.config.body.modelPath) == null ? void 0 : _g.includes("efficientpose"))
bodyRes = this.config.body.enabled ? await predict8(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? await predict8(img.tensor, bodyConfig) : [];
else if ((_h = this.config.body.modelPath) == null ? void 0 : _h.includes("movenet"))
bodyRes = this.config.body.enabled ? await predict9(img.tensor, this.config) : [];
bodyRes = this.config.body.enabled ? await predict9(img.tensor, bodyConfig) : [];
elapsedTime = Math.trunc(now() - timeStamp);
if (elapsedTime > 0)
this.performance.body = elapsedTime;
@ -12744,19 +12750,20 @@ var Human = class {
this.analyze("End Body:");
this.analyze("Start Hand:");
this.state = "detect:hand";
const handConfig = this.config.hand.maxDetected === -1 ? mergeDeep(this.config, { hand: { maxDetected: 2 * faceRes.length } }) : this.config;
if (this.config.async) {
if ((_j = (_i = this.config.hand.detector) == null ? void 0 : _i.modelPath) == null ? void 0 : _j.includes("handdetect"))
handRes = this.config.hand.enabled ? predict5(img.tensor, this.config) : [];
handRes = this.config.hand.enabled ? predict5(img.tensor, handConfig) : [];
else if ((_l = (_k = this.config.hand.detector) == null ? void 0 : _k.modelPath) == null ? void 0 : _l.includes("handtrack"))
handRes = this.config.hand.enabled ? predict6(img.tensor, this.config) : [];
handRes = this.config.hand.enabled ? predict6(img.tensor, handConfig) : [];
if (this.performance.hand)
delete this.performance.hand;
} else {
timeStamp = now();
if ((_n = (_m = this.config.hand.detector) == null ? void 0 : _m.modelPath) == null ? void 0 : _n.includes("handdetect"))
handRes = this.config.hand.enabled ? await predict5(img.tensor, this.config) : [];
handRes = this.config.hand.enabled ? await predict5(img.tensor, handConfig) : [];
else if ((_p = (_o = this.config.hand.detector) == null ? void 0 : _o.modelPath) == null ? void 0 : _p.includes("handtrack"))
handRes = this.config.hand.enabled ? await predict6(img.tensor, this.config) : [];
handRes = this.config.hand.enabled ? await predict6(img.tensor, handConfig) : [];
elapsedTime = Math.trunc(now() - timeStamp);
if (elapsedTime > 0)
this.performance.hand = elapsedTime;

View File

@ -3,7 +3,7 @@
Sample Images used by `Human` library demos and automated tests
Not required for normal funcioning of library
Samples were generated using default configuration without any fine-tuning using command:
Samples were generated using command:
```shell
node test/test-node-canvas.js samples/in/ samples/out/

Binary file not shown.

Before

Width:  |  Height:  |  Size: 621 KiB

After

Width:  |  Height:  |  Size: 318 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 645 KiB

After

Width:  |  Height:  |  Size: 328 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 601 KiB

After

Width:  |  Height:  |  Size: 309 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 574 KiB

After

Width:  |  Height:  |  Size: 296 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 601 KiB

After

Width:  |  Height:  |  Size: 307 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 544 KiB

After

Width:  |  Height:  |  Size: 119 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 756 KiB

After

Width:  |  Height:  |  Size: 148 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 535 KiB

After

Width:  |  Height:  |  Size: 96 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 681 KiB

After

Width:  |  Height:  |  Size: 106 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 343 KiB

After

Width:  |  Height:  |  Size: 172 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 312 KiB

After

Width:  |  Height:  |  Size: 252 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 578 KiB

After

Width:  |  Height:  |  Size: 107 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 411 KiB

After

Width:  |  Height:  |  Size: 232 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.1 MiB

After

Width:  |  Height:  |  Size: 217 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 381 KiB

After

Width:  |  Height:  |  Size: 371 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 295 KiB

After

Width:  |  Height:  |  Size: 358 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 359 KiB

After

Width:  |  Height:  |  Size: 350 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 464 KiB

After

Width:  |  Height:  |  Size: 451 KiB

View File

Before

Width:  |  Height:  |  Size: 148 KiB

After

Width:  |  Height:  |  Size: 148 KiB

View File

Before

Width:  |  Height:  |  Size: 97 KiB

After

Width:  |  Height:  |  Size: 97 KiB

View File

Before

Width:  |  Height:  |  Size: 182 KiB

After

Width:  |  Height:  |  Size: 182 KiB

View File

Before

Width:  |  Height:  |  Size: 139 KiB

After

Width:  |  Height:  |  Size: 139 KiB

View File

Before

Width:  |  Height:  |  Size: 116 KiB

After

Width:  |  Height:  |  Size: 116 KiB

View File

Before

Width:  |  Height:  |  Size: 139 KiB

After

Width:  |  Height:  |  Size: 139 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 248 KiB

View File

Before

Width:  |  Height:  |  Size: 79 KiB

After

Width:  |  Height:  |  Size: 79 KiB

View File

Before

Width:  |  Height:  |  Size: 166 KiB

After

Width:  |  Height:  |  Size: 166 KiB

View File

Before

Width:  |  Height:  |  Size: 266 KiB

After

Width:  |  Height:  |  Size: 266 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 44 KiB

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.2 KiB

After

Width:  |  Height:  |  Size: 9.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 43 KiB

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 237 KiB

After

Width:  |  Height:  |  Size: 288 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 260 KiB

After

Width:  |  Height:  |  Size: 306 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 248 KiB

After

Width:  |  Height:  |  Size: 285 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 235 KiB

After

Width:  |  Height:  |  Size: 274 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 240 KiB

After

Width:  |  Height:  |  Size: 284 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 56 KiB

After

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 74 KiB

After

Width:  |  Height:  |  Size: 73 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 61 KiB

After

Width:  |  Height:  |  Size: 66 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 94 KiB

After

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 74 KiB

After

Width:  |  Height:  |  Size: 74 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 87 KiB

After

Width:  |  Height:  |  Size: 95 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 51 KiB

After

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 85 KiB

After

Width:  |  Height:  |  Size: 82 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 59 KiB

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 79 KiB

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 90 KiB

After

Width:  |  Height:  |  Size: 96 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 53 KiB

After

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 74 KiB

After

Width:  |  Height:  |  Size: 75 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 102 KiB

After

Width:  |  Height:  |  Size: 120 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 80 KiB

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 74 KiB

After

Width:  |  Height:  |  Size: 82 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 61 KiB

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 73 KiB

After

Width:  |  Height:  |  Size: 73 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 100 KiB

After

Width:  |  Height:  |  Size: 100 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 90 KiB

After

Width:  |  Height:  |  Size: 98 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 142 KiB

After

Width:  |  Height:  |  Size: 166 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 67 KiB

After

Width:  |  Height:  |  Size: 73 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 70 KiB

After

Width:  |  Height:  |  Size: 72 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 101 KiB

After

Width:  |  Height:  |  Size: 105 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 174 KiB

After

Width:  |  Height:  |  Size: 190 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 81 KiB

After

Width:  |  Height:  |  Size: 93 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 168 KiB

After

Width:  |  Height:  |  Size: 180 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 94 KiB

After

Width:  |  Height:  |  Size: 120 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 142 KiB

After

Width:  |  Height:  |  Size: 163 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 55 KiB

After

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 340 KiB

After

Width:  |  Height:  |  Size: 374 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 127 KiB

After

Width:  |  Height:  |  Size: 142 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 305 KiB

After

Width:  |  Height:  |  Size: 321 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 296 KiB

After

Width:  |  Height:  |  Size: 327 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 386 KiB

After

Width:  |  Height:  |  Size: 424 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 214 KiB

After

Width:  |  Height:  |  Size: 223 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 215 KiB

After

Width:  |  Height:  |  Size: 232 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 107 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 67 KiB

After

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 74 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 127 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 88 KiB

BIN
samples/out/person-lexi.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 103 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 113 KiB

After

Width:  |  Height:  |  Size: 118 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 185 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 71 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 48 KiB

After

Width:  |  Height:  |  Size: 50 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 76 KiB

After

Width:  |  Height:  |  Size: 79 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 120 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 181 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 106 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 142 KiB

Some files were not shown because too many files have changed in this diff Show More