mirror of https://github.com/vladmandic/human
update build with automatic linter
parent
4be6fbc545
commit
8a95539741
|
@ -11,11 +11,11 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
|
|||
|
||||
### **HEAD -> main** 2021/06/02 mandic00@live.com
|
||||
|
||||
- switch worker from module to iife importscripts
|
||||
- release candidate
|
||||
- added samples to git
|
||||
- implemented drag & drop for image processing
|
||||
|
||||
### **origin/main** 2021/06/01 mandic00@live.com
|
||||
|
||||
- release candidate
|
||||
- breaking changes to results.face output properties
|
||||
- breaking changes to results.object output properties
|
||||
- breaking changes to results.hand output properties
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -128,6 +128,7 @@ var config = {
|
|||
async: true,
|
||||
warmup: "full",
|
||||
cacheSensitivity: 0.75,
|
||||
skipFrame: false,
|
||||
filter: {
|
||||
enabled: true,
|
||||
width: 0,
|
||||
|
@ -187,12 +188,13 @@ var config = {
|
|||
enabled: true,
|
||||
modelPath: "movenet-lightning.json",
|
||||
maxDetected: 1,
|
||||
minConfidence: 0.2
|
||||
minConfidence: 0.2,
|
||||
skipFrames: 16
|
||||
},
|
||||
hand: {
|
||||
enabled: true,
|
||||
rotation: true,
|
||||
skipFrames: 18,
|
||||
skipFrames: 19,
|
||||
minConfidence: 0.1,
|
||||
iouThreshold: 0.1,
|
||||
maxDetected: 2,
|
||||
|
@ -210,7 +212,7 @@ var config = {
|
|||
minConfidence: 0.2,
|
||||
iouThreshold: 0.4,
|
||||
maxDetected: 10,
|
||||
skipFrames: 19
|
||||
skipFrames: 20
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -4084,8 +4086,8 @@ async function predict(input, config3) {
|
|||
if (!prediction || prediction.isDisposedInternal)
|
||||
continue;
|
||||
const meshRaw = prediction.mesh.map((pt) => [
|
||||
pt[0] / input.shape[2],
|
||||
pt[1] / input.shape[1],
|
||||
pt[0] / (input.shape[2] || 0),
|
||||
pt[1] / (input.shape[1] || 0),
|
||||
pt[2] / facePipeline.meshSize
|
||||
]);
|
||||
const annotations3 = {};
|
||||
|
@ -4096,14 +4098,14 @@ async function predict(input, config3) {
|
|||
const clampedBox = prediction.box ? [
|
||||
Math.trunc(Math.max(0, prediction.box.startPoint[0])),
|
||||
Math.trunc(Math.max(0, prediction.box.startPoint[1])),
|
||||
Math.trunc(Math.min(input.shape[2], prediction.box.endPoint[0]) - Math.max(0, prediction.box.startPoint[0])),
|
||||
Math.trunc(Math.min(input.shape[1], prediction.box.endPoint[1]) - Math.max(0, prediction.box.startPoint[1]))
|
||||
Math.trunc(Math.min(input.shape[2] || 0, prediction.box.endPoint[0]) - Math.max(0, prediction.box.startPoint[0])),
|
||||
Math.trunc(Math.min(input.shape[1] || 0, prediction.box.endPoint[1]) - Math.max(0, prediction.box.startPoint[1]))
|
||||
] : [0, 0, 0, 0];
|
||||
const boxRaw3 = prediction.box ? [
|
||||
prediction.box.startPoint[0] / input.shape[2],
|
||||
prediction.box.startPoint[1] / input.shape[1],
|
||||
(prediction.box.endPoint[0] - prediction.box.startPoint[0]) / input.shape[2],
|
||||
(prediction.box.endPoint[1] - prediction.box.startPoint[1]) / input.shape[1]
|
||||
prediction.box.startPoint[0] / (input.shape[2] || 0),
|
||||
prediction.box.startPoint[1] / (input.shape[1] || 0),
|
||||
(prediction.box.endPoint[0] - prediction.box.startPoint[0]) / (input.shape[2] || 0),
|
||||
(prediction.box.endPoint[1] - prediction.box.startPoint[1]) / (input.shape[1] || 0)
|
||||
] : [0, 0, 0, 0];
|
||||
results.push({
|
||||
id: id++,
|
||||
|
@ -4274,13 +4276,13 @@ function match(embedding, db, threshold = 0) {
|
|||
}
|
||||
function enhance(input) {
|
||||
const image15 = tf7.tidy(() => {
|
||||
const tensor = input.image || input.tensor || input;
|
||||
if (!(tensor instanceof tf7.Tensor))
|
||||
const tensor2 = input.image || input.tensor || input;
|
||||
if (!(tensor2 instanceof tf7.Tensor))
|
||||
return null;
|
||||
const box6 = [[0.05, 0.15, 0.85, 0.85]];
|
||||
if (!model2.inputs[0].shape)
|
||||
return null;
|
||||
const crop = tensor.shape.length === 3 ? tf7.image.cropAndResize(tf7.expandDims(tensor, 0), box6, [0], [model2.inputs[0].shape[2], model2.inputs[0].shape[1]]) : tf7.image.cropAndResize(tensor, box6, [0], [model2.inputs[0].shape[2], model2.inputs[0].shape[1]]);
|
||||
const crop = tensor2.shape.length === 3 ? tf7.image.cropAndResize(tf7.expandDims(tensor2, 0), box6, [0], [model2.inputs[0].shape[2], model2.inputs[0].shape[1]]) : tf7.image.cropAndResize(tensor2, box6, [0], [model2.inputs[0].shape[2], model2.inputs[0].shape[1]]);
|
||||
const norm = crop.mul(255);
|
||||
return norm;
|
||||
});
|
||||
|
@ -4452,21 +4454,21 @@ var detectFace = async (parent, input) => {
|
|||
const rotation = calculateFaceAngle(faces[i], [input.shape[2], input.shape[1]]);
|
||||
parent.analyze("Start Emotion:");
|
||||
if (parent.config.async) {
|
||||
emotionRes = parent.config.face.emotion.enabled ? predict2(faces[i].image, parent.config, i, faces.length) : {};
|
||||
emotionRes = parent.config.face.emotion.enabled ? predict2(faces[i].image || tf8.tensor([]), parent.config, i, faces.length) : {};
|
||||
} else {
|
||||
parent.state = "run:emotion";
|
||||
timeStamp = now();
|
||||
emotionRes = parent.config.face.emotion.enabled ? await predict2(faces[i].image, parent.config, i, faces.length) : {};
|
||||
emotionRes = parent.config.face.emotion.enabled ? await predict2(faces[i].image || tf8.tensor([]), parent.config, i, faces.length) : {};
|
||||
parent.performance.emotion = Math.trunc(now() - timeStamp);
|
||||
}
|
||||
parent.analyze("End Emotion:");
|
||||
parent.analyze("Start Description:");
|
||||
if (parent.config.async) {
|
||||
descRes = parent.config.face.description.enabled ? predict3(faces[i], parent.config, i, faces.length) : [];
|
||||
descRes = parent.config.face.description.enabled ? predict3(faces[i].image || tf8.tensor([]), parent.config, i, faces.length) : [];
|
||||
} else {
|
||||
parent.state = "run:description";
|
||||
timeStamp = now();
|
||||
descRes = parent.config.face.description.enabled ? await predict3(faces[i].image, parent.config, i, faces.length) : [];
|
||||
descRes = parent.config.face.description.enabled ? await predict3(faces[i].image || tf8.tensor([]), parent.config, i, faces.length) : [];
|
||||
parent.performance.embedding = Math.trunc(now() - timeStamp);
|
||||
}
|
||||
parent.analyze("End Description:");
|
||||
|
@ -4838,7 +4840,7 @@ async function predict4(input, config3) {
|
|||
results3d[1] = results3d[1].sigmoid();
|
||||
return results3d;
|
||||
});
|
||||
const buffers = await Promise.all(res.map((tensor) => tensor.buffer()));
|
||||
const buffers = await Promise.all(res.map((tensor2) => tensor2.buffer()));
|
||||
for (const t of res)
|
||||
t.dispose();
|
||||
const decoded = await decode(buffers[0], buffers[1], buffers[2], buffers[3], config3.body.maxDetected, config3.body.minConfidence);
|
||||
|
@ -8183,19 +8185,19 @@ async function predict5(input, config3) {
|
|||
}
|
||||
box6[2] -= box6[0];
|
||||
box6[3] -= box6[1];
|
||||
boxRaw3 = [box6[0] / input.shape[2], box6[1] / input.shape[1], box6[2] / input.shape[2], box6[3] / input.shape[1]];
|
||||
boxRaw3 = [box6[0] / (input.shape[2] || 0), box6[1] / (input.shape[1] || 0), box6[2] / (input.shape[2] || 0), box6[3] / (input.shape[1] || 0)];
|
||||
} else {
|
||||
box6 = predictions[i].box ? [
|
||||
Math.trunc(Math.max(0, predictions[i].box.topLeft[0])),
|
||||
Math.trunc(Math.max(0, predictions[i].box.topLeft[1])),
|
||||
Math.trunc(Math.min(input.shape[2], predictions[i].box.bottomRight[0]) - Math.max(0, predictions[i].box.topLeft[0])),
|
||||
Math.trunc(Math.min(input.shape[1], predictions[i].box.bottomRight[1]) - Math.max(0, predictions[i].box.topLeft[1]))
|
||||
Math.trunc(Math.min(input.shape[2] || 0, predictions[i].box.bottomRight[0]) - Math.max(0, predictions[i].box.topLeft[0])),
|
||||
Math.trunc(Math.min(input.shape[1] || 0, predictions[i].box.bottomRight[1]) - Math.max(0, predictions[i].box.topLeft[1]))
|
||||
] : [0, 0, 0, 0];
|
||||
boxRaw3 = [
|
||||
predictions[i].box.topLeft[0] / input.shape[2],
|
||||
predictions[i].box.topLeft[1] / input.shape[1],
|
||||
(predictions[i].box.bottomRight[0] - predictions[i].box.topLeft[0]) / input.shape[2],
|
||||
(predictions[i].box.bottomRight[1] - predictions[i].box.topLeft[1]) / input.shape[1]
|
||||
predictions[i].box.topLeft[0] / (input.shape[2] || 0),
|
||||
predictions[i].box.topLeft[1] / (input.shape[1] || 0),
|
||||
(predictions[i].box.bottomRight[0] - predictions[i].box.topLeft[0]) / (input.shape[2] || 0),
|
||||
(predictions[i].box.bottomRight[1] - predictions[i].box.topLeft[1]) / (input.shape[1] || 0)
|
||||
];
|
||||
}
|
||||
hands.push({ id: i, score: Math.round(100 * predictions[i].confidence) / 100, box: box6, boxRaw: boxRaw3, keypoints: keypoints3, annotations: annotations3 });
|
||||
|
@ -8334,7 +8336,7 @@ async function predict6(image15, config3) {
|
|||
return [];
|
||||
if (!config3.body.enabled)
|
||||
return [];
|
||||
const imgSize = { width: image15.shape[2], height: image15.shape[1] };
|
||||
const imgSize = { width: image15.shape[2] || 0, height: image15.shape[1] || 0 };
|
||||
const resize = tf14.image.resizeBilinear(image15, [model4["width"], model4["height"]], false);
|
||||
const normalize = tf14.div(resize, [255]);
|
||||
resize.dispose();
|
||||
|
@ -8418,7 +8420,7 @@ async function predict7(image15, config3) {
|
|||
}
|
||||
skipped3 = 0;
|
||||
return new Promise(async (resolve) => {
|
||||
const tensor = tf15.tidy(() => {
|
||||
const tensor2 = tf15.tidy(() => {
|
||||
if (!model5.inputs[0].shape)
|
||||
return null;
|
||||
const resize = tf15.image.resizeBilinear(image15, [model5.inputs[0].shape[2], model5.inputs[0].shape[1]], false);
|
||||
|
@ -8428,8 +8430,8 @@ async function predict7(image15, config3) {
|
|||
});
|
||||
let resT;
|
||||
if (config3.body.enabled)
|
||||
resT = await model5.predict(tensor);
|
||||
tensor.dispose();
|
||||
resT = await model5.predict(tensor2);
|
||||
tensor2.dispose();
|
||||
if (resT) {
|
||||
keypoints.length = 0;
|
||||
const squeeze3 = resT.squeeze();
|
||||
|
@ -8503,7 +8505,7 @@ async function predict8(image15, config3) {
|
|||
}
|
||||
skipped4 = 0;
|
||||
return new Promise(async (resolve) => {
|
||||
const tensor = tf16.tidy(() => {
|
||||
const tensor2 = tf16.tidy(() => {
|
||||
if (!model6.inputs[0].shape)
|
||||
return null;
|
||||
const resize = tf16.image.resizeBilinear(image15, [model6.inputs[0].shape[2], model6.inputs[0].shape[1]], false);
|
||||
|
@ -8512,8 +8514,8 @@ async function predict8(image15, config3) {
|
|||
});
|
||||
let resT;
|
||||
if (config3.body.enabled)
|
||||
resT = await model6.predict(tensor);
|
||||
tensor.dispose();
|
||||
resT = await model6.predict(tensor2);
|
||||
tensor2.dispose();
|
||||
if (resT) {
|
||||
keypoints2.length = 0;
|
||||
const res = resT.arraySync();
|
||||
|
@ -8530,8 +8532,8 @@ async function predict8(image15, config3) {
|
|||
kpt3[id][0]
|
||||
],
|
||||
position: [
|
||||
Math.round(image15.shape[2] * kpt3[id][1]),
|
||||
Math.round(image15.shape[1] * kpt3[id][0])
|
||||
Math.round((image15.shape[2] || 0) * kpt3[id][1]),
|
||||
Math.round((image15.shape[1] || 0) * kpt3[id][0])
|
||||
]
|
||||
});
|
||||
}
|
||||
|
@ -9660,7 +9662,7 @@ var inCanvas;
|
|||
var outCanvas;
|
||||
var fx;
|
||||
function process4(input, config3) {
|
||||
let tensor;
|
||||
let tensor2;
|
||||
if (!input)
|
||||
throw new Error("Human: Input is missing");
|
||||
if (!(input instanceof tf19.Tensor) && !(typeof Image !== "undefined" && input instanceof Image) && !(typeof ImageData !== "undefined" && input instanceof ImageData) && !(typeof ImageBitmap !== "undefined" && input instanceof ImageBitmap) && !(typeof HTMLImageElement !== "undefined" && input instanceof HTMLImageElement) && !(typeof HTMLMediaElement !== "undefined" && input instanceof HTMLMediaElement) && !(typeof HTMLVideoElement !== "undefined" && input instanceof HTMLVideoElement) && !(typeof HTMLCanvasElement !== "undefined" && input instanceof HTMLCanvasElement) && !(typeof OffscreenCanvas !== "undefined" && input instanceof OffscreenCanvas)) {
|
||||
|
@ -9668,7 +9670,7 @@ function process4(input, config3) {
|
|||
}
|
||||
if (input instanceof tf19.Tensor) {
|
||||
if (input.shape && input.shape.length === 4 && input.shape[0] === 1 && input.shape[3] === 3)
|
||||
tensor = tf19.clone(input);
|
||||
tensor2 = tf19.clone(input);
|
||||
else
|
||||
throw new Error(`Human: Input tensor shape must be [1, height, width, 3] and instead was ${input.shape}`);
|
||||
} else {
|
||||
|
@ -9784,12 +9786,12 @@ function process4(input, config3) {
|
|||
pixels = tf19.browser.fromPixels(data);
|
||||
}
|
||||
const casted = pixels.toFloat();
|
||||
tensor = casted.expandDims(0);
|
||||
tensor2 = casted.expandDims(0);
|
||||
pixels.dispose();
|
||||
casted.dispose();
|
||||
}
|
||||
const canvas2 = config3.filter.return ? outCanvas : null;
|
||||
return { tensor, canvas: canvas2 };
|
||||
return { tensor: tensor2, canvas: canvas2 };
|
||||
}
|
||||
|
||||
// src/draw/draw.ts
|
||||
|
|
|
@ -129,6 +129,7 @@ var config = {
|
|||
async: true,
|
||||
warmup: "full",
|
||||
cacheSensitivity: 0.75,
|
||||
skipFrame: false,
|
||||
filter: {
|
||||
enabled: true,
|
||||
width: 0,
|
||||
|
@ -188,12 +189,13 @@ var config = {
|
|||
enabled: true,
|
||||
modelPath: "movenet-lightning.json",
|
||||
maxDetected: 1,
|
||||
minConfidence: 0.2
|
||||
minConfidence: 0.2,
|
||||
skipFrames: 16
|
||||
},
|
||||
hand: {
|
||||
enabled: true,
|
||||
rotation: true,
|
||||
skipFrames: 18,
|
||||
skipFrames: 19,
|
||||
minConfidence: 0.1,
|
||||
iouThreshold: 0.1,
|
||||
maxDetected: 2,
|
||||
|
@ -211,7 +213,7 @@ var config = {
|
|||
minConfidence: 0.2,
|
||||
iouThreshold: 0.4,
|
||||
maxDetected: 10,
|
||||
skipFrames: 19
|
||||
skipFrames: 20
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -4085,8 +4087,8 @@ async function predict(input, config3) {
|
|||
if (!prediction || prediction.isDisposedInternal)
|
||||
continue;
|
||||
const meshRaw = prediction.mesh.map((pt) => [
|
||||
pt[0] / input.shape[2],
|
||||
pt[1] / input.shape[1],
|
||||
pt[0] / (input.shape[2] || 0),
|
||||
pt[1] / (input.shape[1] || 0),
|
||||
pt[2] / facePipeline.meshSize
|
||||
]);
|
||||
const annotations3 = {};
|
||||
|
@ -4097,14 +4099,14 @@ async function predict(input, config3) {
|
|||
const clampedBox = prediction.box ? [
|
||||
Math.trunc(Math.max(0, prediction.box.startPoint[0])),
|
||||
Math.trunc(Math.max(0, prediction.box.startPoint[1])),
|
||||
Math.trunc(Math.min(input.shape[2], prediction.box.endPoint[0]) - Math.max(0, prediction.box.startPoint[0])),
|
||||
Math.trunc(Math.min(input.shape[1], prediction.box.endPoint[1]) - Math.max(0, prediction.box.startPoint[1]))
|
||||
Math.trunc(Math.min(input.shape[2] || 0, prediction.box.endPoint[0]) - Math.max(0, prediction.box.startPoint[0])),
|
||||
Math.trunc(Math.min(input.shape[1] || 0, prediction.box.endPoint[1]) - Math.max(0, prediction.box.startPoint[1]))
|
||||
] : [0, 0, 0, 0];
|
||||
const boxRaw3 = prediction.box ? [
|
||||
prediction.box.startPoint[0] / input.shape[2],
|
||||
prediction.box.startPoint[1] / input.shape[1],
|
||||
(prediction.box.endPoint[0] - prediction.box.startPoint[0]) / input.shape[2],
|
||||
(prediction.box.endPoint[1] - prediction.box.startPoint[1]) / input.shape[1]
|
||||
prediction.box.startPoint[0] / (input.shape[2] || 0),
|
||||
prediction.box.startPoint[1] / (input.shape[1] || 0),
|
||||
(prediction.box.endPoint[0] - prediction.box.startPoint[0]) / (input.shape[2] || 0),
|
||||
(prediction.box.endPoint[1] - prediction.box.startPoint[1]) / (input.shape[1] || 0)
|
||||
] : [0, 0, 0, 0];
|
||||
results.push({
|
||||
id: id++,
|
||||
|
@ -4275,13 +4277,13 @@ function match(embedding, db, threshold = 0) {
|
|||
}
|
||||
function enhance(input) {
|
||||
const image15 = tf7.tidy(() => {
|
||||
const tensor = input.image || input.tensor || input;
|
||||
if (!(tensor instanceof tf7.Tensor))
|
||||
const tensor2 = input.image || input.tensor || input;
|
||||
if (!(tensor2 instanceof tf7.Tensor))
|
||||
return null;
|
||||
const box6 = [[0.05, 0.15, 0.85, 0.85]];
|
||||
if (!model2.inputs[0].shape)
|
||||
return null;
|
||||
const crop = tensor.shape.length === 3 ? tf7.image.cropAndResize(tf7.expandDims(tensor, 0), box6, [0], [model2.inputs[0].shape[2], model2.inputs[0].shape[1]]) : tf7.image.cropAndResize(tensor, box6, [0], [model2.inputs[0].shape[2], model2.inputs[0].shape[1]]);
|
||||
const crop = tensor2.shape.length === 3 ? tf7.image.cropAndResize(tf7.expandDims(tensor2, 0), box6, [0], [model2.inputs[0].shape[2], model2.inputs[0].shape[1]]) : tf7.image.cropAndResize(tensor2, box6, [0], [model2.inputs[0].shape[2], model2.inputs[0].shape[1]]);
|
||||
const norm = crop.mul(255);
|
||||
return norm;
|
||||
});
|
||||
|
@ -4453,21 +4455,21 @@ var detectFace = async (parent, input) => {
|
|||
const rotation = calculateFaceAngle(faces[i], [input.shape[2], input.shape[1]]);
|
||||
parent.analyze("Start Emotion:");
|
||||
if (parent.config.async) {
|
||||
emotionRes = parent.config.face.emotion.enabled ? predict2(faces[i].image, parent.config, i, faces.length) : {};
|
||||
emotionRes = parent.config.face.emotion.enabled ? predict2(faces[i].image || tf8.tensor([]), parent.config, i, faces.length) : {};
|
||||
} else {
|
||||
parent.state = "run:emotion";
|
||||
timeStamp = now();
|
||||
emotionRes = parent.config.face.emotion.enabled ? await predict2(faces[i].image, parent.config, i, faces.length) : {};
|
||||
emotionRes = parent.config.face.emotion.enabled ? await predict2(faces[i].image || tf8.tensor([]), parent.config, i, faces.length) : {};
|
||||
parent.performance.emotion = Math.trunc(now() - timeStamp);
|
||||
}
|
||||
parent.analyze("End Emotion:");
|
||||
parent.analyze("Start Description:");
|
||||
if (parent.config.async) {
|
||||
descRes = parent.config.face.description.enabled ? predict3(faces[i], parent.config, i, faces.length) : [];
|
||||
descRes = parent.config.face.description.enabled ? predict3(faces[i].image || tf8.tensor([]), parent.config, i, faces.length) : [];
|
||||
} else {
|
||||
parent.state = "run:description";
|
||||
timeStamp = now();
|
||||
descRes = parent.config.face.description.enabled ? await predict3(faces[i].image, parent.config, i, faces.length) : [];
|
||||
descRes = parent.config.face.description.enabled ? await predict3(faces[i].image || tf8.tensor([]), parent.config, i, faces.length) : [];
|
||||
parent.performance.embedding = Math.trunc(now() - timeStamp);
|
||||
}
|
||||
parent.analyze("End Description:");
|
||||
|
@ -4839,7 +4841,7 @@ async function predict4(input, config3) {
|
|||
results3d[1] = results3d[1].sigmoid();
|
||||
return results3d;
|
||||
});
|
||||
const buffers = await Promise.all(res.map((tensor) => tensor.buffer()));
|
||||
const buffers = await Promise.all(res.map((tensor2) => tensor2.buffer()));
|
||||
for (const t of res)
|
||||
t.dispose();
|
||||
const decoded = await decode(buffers[0], buffers[1], buffers[2], buffers[3], config3.body.maxDetected, config3.body.minConfidence);
|
||||
|
@ -8184,19 +8186,19 @@ async function predict5(input, config3) {
|
|||
}
|
||||
box6[2] -= box6[0];
|
||||
box6[3] -= box6[1];
|
||||
boxRaw3 = [box6[0] / input.shape[2], box6[1] / input.shape[1], box6[2] / input.shape[2], box6[3] / input.shape[1]];
|
||||
boxRaw3 = [box6[0] / (input.shape[2] || 0), box6[1] / (input.shape[1] || 0), box6[2] / (input.shape[2] || 0), box6[3] / (input.shape[1] || 0)];
|
||||
} else {
|
||||
box6 = predictions[i].box ? [
|
||||
Math.trunc(Math.max(0, predictions[i].box.topLeft[0])),
|
||||
Math.trunc(Math.max(0, predictions[i].box.topLeft[1])),
|
||||
Math.trunc(Math.min(input.shape[2], predictions[i].box.bottomRight[0]) - Math.max(0, predictions[i].box.topLeft[0])),
|
||||
Math.trunc(Math.min(input.shape[1], predictions[i].box.bottomRight[1]) - Math.max(0, predictions[i].box.topLeft[1]))
|
||||
Math.trunc(Math.min(input.shape[2] || 0, predictions[i].box.bottomRight[0]) - Math.max(0, predictions[i].box.topLeft[0])),
|
||||
Math.trunc(Math.min(input.shape[1] || 0, predictions[i].box.bottomRight[1]) - Math.max(0, predictions[i].box.topLeft[1]))
|
||||
] : [0, 0, 0, 0];
|
||||
boxRaw3 = [
|
||||
predictions[i].box.topLeft[0] / input.shape[2],
|
||||
predictions[i].box.topLeft[1] / input.shape[1],
|
||||
(predictions[i].box.bottomRight[0] - predictions[i].box.topLeft[0]) / input.shape[2],
|
||||
(predictions[i].box.bottomRight[1] - predictions[i].box.topLeft[1]) / input.shape[1]
|
||||
predictions[i].box.topLeft[0] / (input.shape[2] || 0),
|
||||
predictions[i].box.topLeft[1] / (input.shape[1] || 0),
|
||||
(predictions[i].box.bottomRight[0] - predictions[i].box.topLeft[0]) / (input.shape[2] || 0),
|
||||
(predictions[i].box.bottomRight[1] - predictions[i].box.topLeft[1]) / (input.shape[1] || 0)
|
||||
];
|
||||
}
|
||||
hands.push({ id: i, score: Math.round(100 * predictions[i].confidence) / 100, box: box6, boxRaw: boxRaw3, keypoints: keypoints3, annotations: annotations3 });
|
||||
|
@ -8335,7 +8337,7 @@ async function predict6(image15, config3) {
|
|||
return [];
|
||||
if (!config3.body.enabled)
|
||||
return [];
|
||||
const imgSize = { width: image15.shape[2], height: image15.shape[1] };
|
||||
const imgSize = { width: image15.shape[2] || 0, height: image15.shape[1] || 0 };
|
||||
const resize = tf14.image.resizeBilinear(image15, [model4["width"], model4["height"]], false);
|
||||
const normalize = tf14.div(resize, [255]);
|
||||
resize.dispose();
|
||||
|
@ -8419,7 +8421,7 @@ async function predict7(image15, config3) {
|
|||
}
|
||||
skipped3 = 0;
|
||||
return new Promise(async (resolve) => {
|
||||
const tensor = tf15.tidy(() => {
|
||||
const tensor2 = tf15.tidy(() => {
|
||||
if (!model5.inputs[0].shape)
|
||||
return null;
|
||||
const resize = tf15.image.resizeBilinear(image15, [model5.inputs[0].shape[2], model5.inputs[0].shape[1]], false);
|
||||
|
@ -8429,8 +8431,8 @@ async function predict7(image15, config3) {
|
|||
});
|
||||
let resT;
|
||||
if (config3.body.enabled)
|
||||
resT = await model5.predict(tensor);
|
||||
tensor.dispose();
|
||||
resT = await model5.predict(tensor2);
|
||||
tensor2.dispose();
|
||||
if (resT) {
|
||||
keypoints.length = 0;
|
||||
const squeeze3 = resT.squeeze();
|
||||
|
@ -8504,7 +8506,7 @@ async function predict8(image15, config3) {
|
|||
}
|
||||
skipped4 = 0;
|
||||
return new Promise(async (resolve) => {
|
||||
const tensor = tf16.tidy(() => {
|
||||
const tensor2 = tf16.tidy(() => {
|
||||
if (!model6.inputs[0].shape)
|
||||
return null;
|
||||
const resize = tf16.image.resizeBilinear(image15, [model6.inputs[0].shape[2], model6.inputs[0].shape[1]], false);
|
||||
|
@ -8513,8 +8515,8 @@ async function predict8(image15, config3) {
|
|||
});
|
||||
let resT;
|
||||
if (config3.body.enabled)
|
||||
resT = await model6.predict(tensor);
|
||||
tensor.dispose();
|
||||
resT = await model6.predict(tensor2);
|
||||
tensor2.dispose();
|
||||
if (resT) {
|
||||
keypoints2.length = 0;
|
||||
const res = resT.arraySync();
|
||||
|
@ -8531,8 +8533,8 @@ async function predict8(image15, config3) {
|
|||
kpt3[id][0]
|
||||
],
|
||||
position: [
|
||||
Math.round(image15.shape[2] * kpt3[id][1]),
|
||||
Math.round(image15.shape[1] * kpt3[id][0])
|
||||
Math.round((image15.shape[2] || 0) * kpt3[id][1]),
|
||||
Math.round((image15.shape[1] || 0) * kpt3[id][0])
|
||||
]
|
||||
});
|
||||
}
|
||||
|
@ -9661,7 +9663,7 @@ var inCanvas;
|
|||
var outCanvas;
|
||||
var fx;
|
||||
function process4(input, config3) {
|
||||
let tensor;
|
||||
let tensor2;
|
||||
if (!input)
|
||||
throw new Error("Human: Input is missing");
|
||||
if (!(input instanceof tf19.Tensor) && !(typeof Image !== "undefined" && input instanceof Image) && !(typeof ImageData !== "undefined" && input instanceof ImageData) && !(typeof ImageBitmap !== "undefined" && input instanceof ImageBitmap) && !(typeof HTMLImageElement !== "undefined" && input instanceof HTMLImageElement) && !(typeof HTMLMediaElement !== "undefined" && input instanceof HTMLMediaElement) && !(typeof HTMLVideoElement !== "undefined" && input instanceof HTMLVideoElement) && !(typeof HTMLCanvasElement !== "undefined" && input instanceof HTMLCanvasElement) && !(typeof OffscreenCanvas !== "undefined" && input instanceof OffscreenCanvas)) {
|
||||
|
@ -9669,7 +9671,7 @@ function process4(input, config3) {
|
|||
}
|
||||
if (input instanceof tf19.Tensor) {
|
||||
if (input.shape && input.shape.length === 4 && input.shape[0] === 1 && input.shape[3] === 3)
|
||||
tensor = tf19.clone(input);
|
||||
tensor2 = tf19.clone(input);
|
||||
else
|
||||
throw new Error(`Human: Input tensor shape must be [1, height, width, 3] and instead was ${input.shape}`);
|
||||
} else {
|
||||
|
@ -9785,12 +9787,12 @@ function process4(input, config3) {
|
|||
pixels = tf19.browser.fromPixels(data);
|
||||
}
|
||||
const casted = pixels.toFloat();
|
||||
tensor = casted.expandDims(0);
|
||||
tensor2 = casted.expandDims(0);
|
||||
pixels.dispose();
|
||||
casted.dispose();
|
||||
}
|
||||
const canvas2 = config3.filter.return ? outCanvas : null;
|
||||
return { tensor, canvas: canvas2 };
|
||||
return { tensor: tensor2, canvas: canvas2 };
|
||||
}
|
||||
|
||||
// src/draw/draw.ts
|
||||
|
|
|
@ -128,6 +128,7 @@ var config = {
|
|||
async: true,
|
||||
warmup: "full",
|
||||
cacheSensitivity: 0.75,
|
||||
skipFrame: false,
|
||||
filter: {
|
||||
enabled: true,
|
||||
width: 0,
|
||||
|
@ -187,12 +188,13 @@ var config = {
|
|||
enabled: true,
|
||||
modelPath: "movenet-lightning.json",
|
||||
maxDetected: 1,
|
||||
minConfidence: 0.2
|
||||
minConfidence: 0.2,
|
||||
skipFrames: 16
|
||||
},
|
||||
hand: {
|
||||
enabled: true,
|
||||
rotation: true,
|
||||
skipFrames: 18,
|
||||
skipFrames: 19,
|
||||
minConfidence: 0.1,
|
||||
iouThreshold: 0.1,
|
||||
maxDetected: 2,
|
||||
|
@ -210,7 +212,7 @@ var config = {
|
|||
minConfidence: 0.2,
|
||||
iouThreshold: 0.4,
|
||||
maxDetected: 10,
|
||||
skipFrames: 19
|
||||
skipFrames: 20
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -4084,8 +4086,8 @@ async function predict(input, config3) {
|
|||
if (!prediction || prediction.isDisposedInternal)
|
||||
continue;
|
||||
const meshRaw = prediction.mesh.map((pt) => [
|
||||
pt[0] / input.shape[2],
|
||||
pt[1] / input.shape[1],
|
||||
pt[0] / (input.shape[2] || 0),
|
||||
pt[1] / (input.shape[1] || 0),
|
||||
pt[2] / facePipeline.meshSize
|
||||
]);
|
||||
const annotations3 = {};
|
||||
|
@ -4096,14 +4098,14 @@ async function predict(input, config3) {
|
|||
const clampedBox = prediction.box ? [
|
||||
Math.trunc(Math.max(0, prediction.box.startPoint[0])),
|
||||
Math.trunc(Math.max(0, prediction.box.startPoint[1])),
|
||||
Math.trunc(Math.min(input.shape[2], prediction.box.endPoint[0]) - Math.max(0, prediction.box.startPoint[0])),
|
||||
Math.trunc(Math.min(input.shape[1], prediction.box.endPoint[1]) - Math.max(0, prediction.box.startPoint[1]))
|
||||
Math.trunc(Math.min(input.shape[2] || 0, prediction.box.endPoint[0]) - Math.max(0, prediction.box.startPoint[0])),
|
||||
Math.trunc(Math.min(input.shape[1] || 0, prediction.box.endPoint[1]) - Math.max(0, prediction.box.startPoint[1]))
|
||||
] : [0, 0, 0, 0];
|
||||
const boxRaw3 = prediction.box ? [
|
||||
prediction.box.startPoint[0] / input.shape[2],
|
||||
prediction.box.startPoint[1] / input.shape[1],
|
||||
(prediction.box.endPoint[0] - prediction.box.startPoint[0]) / input.shape[2],
|
||||
(prediction.box.endPoint[1] - prediction.box.startPoint[1]) / input.shape[1]
|
||||
prediction.box.startPoint[0] / (input.shape[2] || 0),
|
||||
prediction.box.startPoint[1] / (input.shape[1] || 0),
|
||||
(prediction.box.endPoint[0] - prediction.box.startPoint[0]) / (input.shape[2] || 0),
|
||||
(prediction.box.endPoint[1] - prediction.box.startPoint[1]) / (input.shape[1] || 0)
|
||||
] : [0, 0, 0, 0];
|
||||
results.push({
|
||||
id: id++,
|
||||
|
@ -4274,13 +4276,13 @@ function match(embedding, db, threshold = 0) {
|
|||
}
|
||||
function enhance(input) {
|
||||
const image15 = tf7.tidy(() => {
|
||||
const tensor = input.image || input.tensor || input;
|
||||
if (!(tensor instanceof tf7.Tensor))
|
||||
const tensor2 = input.image || input.tensor || input;
|
||||
if (!(tensor2 instanceof tf7.Tensor))
|
||||
return null;
|
||||
const box6 = [[0.05, 0.15, 0.85, 0.85]];
|
||||
if (!model2.inputs[0].shape)
|
||||
return null;
|
||||
const crop = tensor.shape.length === 3 ? tf7.image.cropAndResize(tf7.expandDims(tensor, 0), box6, [0], [model2.inputs[0].shape[2], model2.inputs[0].shape[1]]) : tf7.image.cropAndResize(tensor, box6, [0], [model2.inputs[0].shape[2], model2.inputs[0].shape[1]]);
|
||||
const crop = tensor2.shape.length === 3 ? tf7.image.cropAndResize(tf7.expandDims(tensor2, 0), box6, [0], [model2.inputs[0].shape[2], model2.inputs[0].shape[1]]) : tf7.image.cropAndResize(tensor2, box6, [0], [model2.inputs[0].shape[2], model2.inputs[0].shape[1]]);
|
||||
const norm = crop.mul(255);
|
||||
return norm;
|
||||
});
|
||||
|
@ -4452,21 +4454,21 @@ var detectFace = async (parent, input) => {
|
|||
const rotation = calculateFaceAngle(faces[i], [input.shape[2], input.shape[1]]);
|
||||
parent.analyze("Start Emotion:");
|
||||
if (parent.config.async) {
|
||||
emotionRes = parent.config.face.emotion.enabled ? predict2(faces[i].image, parent.config, i, faces.length) : {};
|
||||
emotionRes = parent.config.face.emotion.enabled ? predict2(faces[i].image || tf8.tensor([]), parent.config, i, faces.length) : {};
|
||||
} else {
|
||||
parent.state = "run:emotion";
|
||||
timeStamp = now();
|
||||
emotionRes = parent.config.face.emotion.enabled ? await predict2(faces[i].image, parent.config, i, faces.length) : {};
|
||||
emotionRes = parent.config.face.emotion.enabled ? await predict2(faces[i].image || tf8.tensor([]), parent.config, i, faces.length) : {};
|
||||
parent.performance.emotion = Math.trunc(now() - timeStamp);
|
||||
}
|
||||
parent.analyze("End Emotion:");
|
||||
parent.analyze("Start Description:");
|
||||
if (parent.config.async) {
|
||||
descRes = parent.config.face.description.enabled ? predict3(faces[i], parent.config, i, faces.length) : [];
|
||||
descRes = parent.config.face.description.enabled ? predict3(faces[i].image || tf8.tensor([]), parent.config, i, faces.length) : [];
|
||||
} else {
|
||||
parent.state = "run:description";
|
||||
timeStamp = now();
|
||||
descRes = parent.config.face.description.enabled ? await predict3(faces[i].image, parent.config, i, faces.length) : [];
|
||||
descRes = parent.config.face.description.enabled ? await predict3(faces[i].image || tf8.tensor([]), parent.config, i, faces.length) : [];
|
||||
parent.performance.embedding = Math.trunc(now() - timeStamp);
|
||||
}
|
||||
parent.analyze("End Description:");
|
||||
|
@ -4838,7 +4840,7 @@ async function predict4(input, config3) {
|
|||
results3d[1] = results3d[1].sigmoid();
|
||||
return results3d;
|
||||
});
|
||||
const buffers = await Promise.all(res.map((tensor) => tensor.buffer()));
|
||||
const buffers = await Promise.all(res.map((tensor2) => tensor2.buffer()));
|
||||
for (const t of res)
|
||||
t.dispose();
|
||||
const decoded = await decode(buffers[0], buffers[1], buffers[2], buffers[3], config3.body.maxDetected, config3.body.minConfidence);
|
||||
|
@ -8183,19 +8185,19 @@ async function predict5(input, config3) {
|
|||
}
|
||||
box6[2] -= box6[0];
|
||||
box6[3] -= box6[1];
|
||||
boxRaw3 = [box6[0] / input.shape[2], box6[1] / input.shape[1], box6[2] / input.shape[2], box6[3] / input.shape[1]];
|
||||
boxRaw3 = [box6[0] / (input.shape[2] || 0), box6[1] / (input.shape[1] || 0), box6[2] / (input.shape[2] || 0), box6[3] / (input.shape[1] || 0)];
|
||||
} else {
|
||||
box6 = predictions[i].box ? [
|
||||
Math.trunc(Math.max(0, predictions[i].box.topLeft[0])),
|
||||
Math.trunc(Math.max(0, predictions[i].box.topLeft[1])),
|
||||
Math.trunc(Math.min(input.shape[2], predictions[i].box.bottomRight[0]) - Math.max(0, predictions[i].box.topLeft[0])),
|
||||
Math.trunc(Math.min(input.shape[1], predictions[i].box.bottomRight[1]) - Math.max(0, predictions[i].box.topLeft[1]))
|
||||
Math.trunc(Math.min(input.shape[2] || 0, predictions[i].box.bottomRight[0]) - Math.max(0, predictions[i].box.topLeft[0])),
|
||||
Math.trunc(Math.min(input.shape[1] || 0, predictions[i].box.bottomRight[1]) - Math.max(0, predictions[i].box.topLeft[1]))
|
||||
] : [0, 0, 0, 0];
|
||||
boxRaw3 = [
|
||||
predictions[i].box.topLeft[0] / input.shape[2],
|
||||
predictions[i].box.topLeft[1] / input.shape[1],
|
||||
(predictions[i].box.bottomRight[0] - predictions[i].box.topLeft[0]) / input.shape[2],
|
||||
(predictions[i].box.bottomRight[1] - predictions[i].box.topLeft[1]) / input.shape[1]
|
||||
predictions[i].box.topLeft[0] / (input.shape[2] || 0),
|
||||
predictions[i].box.topLeft[1] / (input.shape[1] || 0),
|
||||
(predictions[i].box.bottomRight[0] - predictions[i].box.topLeft[0]) / (input.shape[2] || 0),
|
||||
(predictions[i].box.bottomRight[1] - predictions[i].box.topLeft[1]) / (input.shape[1] || 0)
|
||||
];
|
||||
}
|
||||
hands.push({ id: i, score: Math.round(100 * predictions[i].confidence) / 100, box: box6, boxRaw: boxRaw3, keypoints: keypoints3, annotations: annotations3 });
|
||||
|
@ -8334,7 +8336,7 @@ async function predict6(image15, config3) {
|
|||
return [];
|
||||
if (!config3.body.enabled)
|
||||
return [];
|
||||
const imgSize = { width: image15.shape[2], height: image15.shape[1] };
|
||||
const imgSize = { width: image15.shape[2] || 0, height: image15.shape[1] || 0 };
|
||||
const resize = tf14.image.resizeBilinear(image15, [model4["width"], model4["height"]], false);
|
||||
const normalize = tf14.div(resize, [255]);
|
||||
resize.dispose();
|
||||
|
@ -8418,7 +8420,7 @@ async function predict7(image15, config3) {
|
|||
}
|
||||
skipped3 = 0;
|
||||
return new Promise(async (resolve) => {
|
||||
const tensor = tf15.tidy(() => {
|
||||
const tensor2 = tf15.tidy(() => {
|
||||
if (!model5.inputs[0].shape)
|
||||
return null;
|
||||
const resize = tf15.image.resizeBilinear(image15, [model5.inputs[0].shape[2], model5.inputs[0].shape[1]], false);
|
||||
|
@ -8428,8 +8430,8 @@ async function predict7(image15, config3) {
|
|||
});
|
||||
let resT;
|
||||
if (config3.body.enabled)
|
||||
resT = await model5.predict(tensor);
|
||||
tensor.dispose();
|
||||
resT = await model5.predict(tensor2);
|
||||
tensor2.dispose();
|
||||
if (resT) {
|
||||
keypoints.length = 0;
|
||||
const squeeze3 = resT.squeeze();
|
||||
|
@ -8503,7 +8505,7 @@ async function predict8(image15, config3) {
|
|||
}
|
||||
skipped4 = 0;
|
||||
return new Promise(async (resolve) => {
|
||||
const tensor = tf16.tidy(() => {
|
||||
const tensor2 = tf16.tidy(() => {
|
||||
if (!model6.inputs[0].shape)
|
||||
return null;
|
||||
const resize = tf16.image.resizeBilinear(image15, [model6.inputs[0].shape[2], model6.inputs[0].shape[1]], false);
|
||||
|
@ -8512,8 +8514,8 @@ async function predict8(image15, config3) {
|
|||
});
|
||||
let resT;
|
||||
if (config3.body.enabled)
|
||||
resT = await model6.predict(tensor);
|
||||
tensor.dispose();
|
||||
resT = await model6.predict(tensor2);
|
||||
tensor2.dispose();
|
||||
if (resT) {
|
||||
keypoints2.length = 0;
|
||||
const res = resT.arraySync();
|
||||
|
@ -8530,8 +8532,8 @@ async function predict8(image15, config3) {
|
|||
kpt3[id][0]
|
||||
],
|
||||
position: [
|
||||
Math.round(image15.shape[2] * kpt3[id][1]),
|
||||
Math.round(image15.shape[1] * kpt3[id][0])
|
||||
Math.round((image15.shape[2] || 0) * kpt3[id][1]),
|
||||
Math.round((image15.shape[1] || 0) * kpt3[id][0])
|
||||
]
|
||||
});
|
||||
}
|
||||
|
@ -9660,7 +9662,7 @@ var inCanvas;
|
|||
var outCanvas;
|
||||
var fx;
|
||||
function process4(input, config3) {
|
||||
let tensor;
|
||||
let tensor2;
|
||||
if (!input)
|
||||
throw new Error("Human: Input is missing");
|
||||
if (!(input instanceof tf19.Tensor) && !(typeof Image !== "undefined" && input instanceof Image) && !(typeof ImageData !== "undefined" && input instanceof ImageData) && !(typeof ImageBitmap !== "undefined" && input instanceof ImageBitmap) && !(typeof HTMLImageElement !== "undefined" && input instanceof HTMLImageElement) && !(typeof HTMLMediaElement !== "undefined" && input instanceof HTMLMediaElement) && !(typeof HTMLVideoElement !== "undefined" && input instanceof HTMLVideoElement) && !(typeof HTMLCanvasElement !== "undefined" && input instanceof HTMLCanvasElement) && !(typeof OffscreenCanvas !== "undefined" && input instanceof OffscreenCanvas)) {
|
||||
|
@ -9668,7 +9670,7 @@ function process4(input, config3) {
|
|||
}
|
||||
if (input instanceof tf19.Tensor) {
|
||||
if (input.shape && input.shape.length === 4 && input.shape[0] === 1 && input.shape[3] === 3)
|
||||
tensor = tf19.clone(input);
|
||||
tensor2 = tf19.clone(input);
|
||||
else
|
||||
throw new Error(`Human: Input tensor shape must be [1, height, width, 3] and instead was ${input.shape}`);
|
||||
} else {
|
||||
|
@ -9784,12 +9786,12 @@ function process4(input, config3) {
|
|||
pixels = tf19.browser.fromPixels(data);
|
||||
}
|
||||
const casted = pixels.toFloat();
|
||||
tensor = casted.expandDims(0);
|
||||
tensor2 = casted.expandDims(0);
|
||||
pixels.dispose();
|
||||
casted.dispose();
|
||||
}
|
||||
const canvas2 = config3.filter.return ? outCanvas : null;
|
||||
return { tensor, canvas: canvas2 };
|
||||
return { tensor: tensor2, canvas: canvas2 };
|
||||
}
|
||||
|
||||
// src/draw/draw.ts
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -25,7 +25,8 @@
|
|||
"dev": "node --trace-warnings --unhandled-rejections=strict --trace-uncaught server/serve.js",
|
||||
"build": "rimraf dist/* typedoc/* types/* && node --trace-warnings --unhandled-rejections=strict --trace-uncaught server/build.js",
|
||||
"lint": "eslint src server demo test",
|
||||
"test": "node --trace-warnings --unhandled-rejections=strict --trace-uncaught test/test.js"
|
||||
"test": "node --trace-warnings --unhandled-rejections=strict --trace-uncaught test/test.js",
|
||||
"scan": "npx auditjs@latest ossi --dev --quiet"
|
||||
},
|
||||
"keywords": [
|
||||
"human",
|
||||
|
@ -62,7 +63,7 @@
|
|||
"@tensorflow/tfjs-layers": "^3.6.0",
|
||||
"@tensorflow/tfjs-node": "^3.6.1",
|
||||
"@tensorflow/tfjs-node-gpu": "^3.6.1",
|
||||
"@types/node": "^15.6.1",
|
||||
"@types/node": "^15.9.0",
|
||||
"@typescript-eslint/eslint-plugin": "^4.26.0",
|
||||
"@typescript-eslint/parser": "^4.26.0",
|
||||
"@vladmandic/pilogger": "^0.2.17",
|
||||
|
|
|
@ -9,12 +9,15 @@ const path = require('path');
|
|||
const log = require('@vladmandic/pilogger');
|
||||
const esbuild = require('esbuild');
|
||||
const TypeDoc = require('typedoc');
|
||||
const { ESLint } = require('eslint');
|
||||
const tfjs = require('@tensorflow/tfjs/package.json');
|
||||
const changelog = require('./changelog');
|
||||
|
||||
let logFile = 'build.log';
|
||||
|
||||
let busy = false;
|
||||
let td = null;
|
||||
let eslint = null;
|
||||
const banner = { js: `
|
||||
/*
|
||||
Human library
|
||||
|
@ -49,6 +52,8 @@ const tsconfig = {
|
|||
};
|
||||
|
||||
// common configuration
|
||||
const lintLocations = ['server/', 'demo/', 'src/', 'test/'];
|
||||
|
||||
const config = {
|
||||
common: {
|
||||
banner,
|
||||
|
@ -225,7 +230,7 @@ async function getStats(json) {
|
|||
}
|
||||
|
||||
// rebuild typings
|
||||
async function compile(entryPoint, options) {
|
||||
async function typings(entryPoint, options) {
|
||||
log.info('Generate types:', entryPoint);
|
||||
const program = ts.createProgram(entryPoint, options);
|
||||
const emit = program.emit();
|
||||
|
@ -250,13 +255,34 @@ async function typedoc(entryPoint) {
|
|||
td = new TypeDoc.Application();
|
||||
td.options.addReader(new TypeDoc.TSConfigReader());
|
||||
td.bootstrap({ entryPoints: [entryPoint], theme: 'wiki/theme/' });
|
||||
td.logger.warn = log.warn;
|
||||
td.logger.error = log.error;
|
||||
td.logger.verbose = () => { /***/ };
|
||||
td.logger.log = log.info;
|
||||
}
|
||||
const project = td.convert();
|
||||
if (!project) log.warn('TypeDoc: convert returned empty project');
|
||||
if (td.logger.hasErrors() || td.logger.hasWarnings()) log.warn('TypeDoc:', 'errors:', td.logger.errorCount, 'warnings:', td.logger.warningCount);
|
||||
const result = project ? await td.generateDocs(project, 'typedoc') : null;
|
||||
if (result) log.warn('TypeDoc:', result);
|
||||
}
|
||||
|
||||
async function lint() {
|
||||
log.info('Running Linter:', lintLocations);
|
||||
if (!eslint) {
|
||||
eslint = new ESLint();
|
||||
}
|
||||
const results = await eslint.lintFiles(lintLocations);
|
||||
const errors = results.reduce((prev, curr) => prev += curr.errorCount, 0);
|
||||
const warnings = results.reduce((prev, curr) => prev += curr.warningCount, 0);
|
||||
log.info('Linter complete: files:', results.length, 'errors:', errors, 'warnings:', warnings);
|
||||
if (errors > 0 || warnings > 0) {
|
||||
const formatter = await eslint.loadFormatter('stylish');
|
||||
const text = formatter.format(results);
|
||||
log.warn(text);
|
||||
}
|
||||
}
|
||||
|
||||
// rebuild on file change
|
||||
async function build(f, msg, dev = false) {
|
||||
if (busy) {
|
||||
|
@ -282,11 +308,11 @@ async function build(f, msg, dev = false) {
|
|||
log.state(`Build for: ${targetGroupName} type: ${targetName}:`, stats);
|
||||
}
|
||||
}
|
||||
if (!dev) {
|
||||
// generate typings & typedoc only when run as explict build
|
||||
await compile(targets.browserBundle.esm.entryPoints, tsconfig);
|
||||
await changelog.update('../CHANGELOG.md');
|
||||
await typedoc(targets.browserBundle.esm.entryPoints);
|
||||
if (!dev) { // only for prod builds, skipped for dev build
|
||||
await lint(); // run linter
|
||||
await typings(targets.browserBundle.esm.entryPoints, tsconfig); // generate typings
|
||||
await changelog.update('../CHANGELOG.md'); // generate changelog
|
||||
await typedoc(targets.browserBundle.esm.entryPoints); // generate typedoc
|
||||
}
|
||||
if (require.main === module) process.exit(0);
|
||||
} catch (err) {
|
||||
|
@ -302,6 +328,7 @@ if (require.main === module) {
|
|||
if (fs.existsSync(logFile)) fs.unlinkSync(logFile);
|
||||
log.logFile(logFile);
|
||||
log.header();
|
||||
log.info(`Toolchain: tfjs: ${tfjs.version} esbuild ${esbuild.version}; typescript ${ts.version}; typedoc: ${TypeDoc.Application.VERSION} eslint: ${ESLint.version}`);
|
||||
build('all', 'startup');
|
||||
} else {
|
||||
exports.build = build;
|
||||
|
|
|
@ -1,17 +1,21 @@
|
|||
2021-06-02 13:37:11 [36mINFO: [39m @vladmandic/human version 2.0.0
|
||||
2021-06-02 13:37:11 [36mINFO: [39m User: vlado Platform: linux Arch: x64 Node: v16.0.0
|
||||
2021-06-02 13:37:11 [36mINFO: [39m Build: file startup all type: production config: {"minifyWhitespace":true,"minifyIdentifiers":true,"minifySyntax":true}
|
||||
2021-06-02 13:37:11 [35mSTATE:[39m Build for: node type: tfjs: {"imports":1,"importBytes":102,"outputBytes":1292,"outputFiles":"dist/tfjs.esm.js"}
|
||||
2021-06-02 13:37:11 [35mSTATE:[39m Build for: node type: node: {"imports":40,"importBytes":418593,"outputBytes":371337,"outputFiles":"dist/human.node.js"}
|
||||
2021-06-02 13:37:11 [35mSTATE:[39m Build for: nodeGPU type: tfjs: {"imports":1,"importBytes":110,"outputBytes":1300,"outputFiles":"dist/tfjs.esm.js"}
|
||||
2021-06-02 13:37:11 [35mSTATE:[39m Build for: nodeGPU type: node: {"imports":40,"importBytes":418601,"outputBytes":371341,"outputFiles":"dist/human.node-gpu.js"}
|
||||
2021-06-02 13:37:11 [35mSTATE:[39m Build for: nodeWASM type: tfjs: {"imports":1,"importBytes":149,"outputBytes":1367,"outputFiles":"dist/tfjs.esm.js"}
|
||||
2021-06-02 13:37:11 [35mSTATE:[39m Build for: nodeWASM type: node: {"imports":40,"importBytes":418668,"outputBytes":371413,"outputFiles":"dist/human.node-wasm.js"}
|
||||
2021-06-02 13:37:11 [35mSTATE:[39m Build for: browserNoBundle type: tfjs: {"imports":1,"importBytes":2478,"outputBytes":1394,"outputFiles":"dist/tfjs.esm.js"}
|
||||
2021-06-02 13:37:11 [35mSTATE:[39m Build for: browserNoBundle type: esm: {"imports":40,"importBytes":418695,"outputBytes":244745,"outputFiles":"dist/human.esm-nobundle.js"}
|
||||
2021-06-02 13:37:12 [35mSTATE:[39m Build for: browserBundle type: tfjs: {"modules":1274,"moduleBytes":4114813,"imports":7,"importBytes":2478,"outputBytes":1111418,"outputFiles":"dist/tfjs.esm.js"}
|
||||
2021-06-02 13:37:12 [35mSTATE:[39m Build for: browserBundle type: iife: {"imports":40,"importBytes":1528719,"outputBytes":1352544,"outputFiles":"dist/human.js"}
|
||||
2021-06-02 13:37:13 [35mSTATE:[39m Build for: browserBundle type: esm: {"imports":40,"importBytes":1528719,"outputBytes":1352536,"outputFiles":"dist/human.esm.js"}
|
||||
2021-06-02 13:37:13 [36mINFO: [39m Generate types: ["src/human.ts"]
|
||||
2021-06-02 13:37:18 [36mINFO: [39m Update Change log: ["/home/vlado/dev/human/CHANGELOG.md"]
|
||||
2021-06-02 13:37:18 [36mINFO: [39m Generate TypeDocs: ["src/human.ts"]
|
||||
2021-06-03 09:38:51 [36mINFO: [39m @vladmandic/human version 2.0.0
|
||||
2021-06-03 09:38:51 [36mINFO: [39m User: vlado Platform: linux Arch: x64 Node: v16.0.0
|
||||
2021-06-03 09:38:51 [36mINFO: [39m Toolchain: tfjs: 3.6.0 esbuild 0.12.5; typescript 4.2.4; typedoc: 0.20.36 eslint: 7.27.0
|
||||
2021-06-03 09:38:51 [36mINFO: [39m Build: file startup all type: production config: {"minifyWhitespace":true,"minifyIdentifiers":true,"minifySyntax":true}
|
||||
2021-06-03 09:38:51 [35mSTATE:[39m Build for: node type: tfjs: {"imports":1,"importBytes":102,"outputBytes":1292,"outputFiles":"dist/tfjs.esm.js"}
|
||||
2021-06-03 09:38:51 [35mSTATE:[39m Build for: node type: node: {"imports":40,"importBytes":420707,"outputBytes":371671,"outputFiles":"dist/human.node.js"}
|
||||
2021-06-03 09:38:51 [35mSTATE:[39m Build for: nodeGPU type: tfjs: {"imports":1,"importBytes":110,"outputBytes":1300,"outputFiles":"dist/tfjs.esm.js"}
|
||||
2021-06-03 09:38:51 [35mSTATE:[39m Build for: nodeGPU type: node: {"imports":40,"importBytes":420715,"outputBytes":371675,"outputFiles":"dist/human.node-gpu.js"}
|
||||
2021-06-03 09:38:51 [35mSTATE:[39m Build for: nodeWASM type: tfjs: {"imports":1,"importBytes":149,"outputBytes":1367,"outputFiles":"dist/tfjs.esm.js"}
|
||||
2021-06-03 09:38:51 [35mSTATE:[39m Build for: nodeWASM type: node: {"imports":40,"importBytes":420782,"outputBytes":371747,"outputFiles":"dist/human.node-wasm.js"}
|
||||
2021-06-03 09:38:51 [35mSTATE:[39m Build for: browserNoBundle type: tfjs: {"imports":1,"importBytes":2478,"outputBytes":1394,"outputFiles":"dist/tfjs.esm.js"}
|
||||
2021-06-03 09:38:51 [35mSTATE:[39m Build for: browserNoBundle type: esm: {"imports":40,"importBytes":420809,"outputBytes":244956,"outputFiles":"dist/human.esm-nobundle.js"}
|
||||
2021-06-03 09:38:52 [35mSTATE:[39m Build for: browserBundle type: tfjs: {"modules":1274,"moduleBytes":4114813,"imports":7,"importBytes":2478,"outputBytes":1111418,"outputFiles":"dist/tfjs.esm.js"}
|
||||
2021-06-03 09:38:52 [35mSTATE:[39m Build for: browserBundle type: iife: {"imports":40,"importBytes":1530833,"outputBytes":1352725,"outputFiles":"dist/human.js"}
|
||||
2021-06-03 09:38:53 [35mSTATE:[39m Build for: browserBundle type: esm: {"imports":40,"importBytes":1530833,"outputBytes":1352717,"outputFiles":"dist/human.esm.js"}
|
||||
2021-06-03 09:38:53 [36mINFO: [39m Running Linter: ["server/","demo/","src/","test/"]
|
||||
2021-06-03 09:39:19 [36mINFO: [39m Linter complete: files: 67 errors: 0 warnings: 0
|
||||
2021-06-03 09:39:19 [36mINFO: [39m Generate types: ["src/human.ts"]
|
||||
2021-06-03 09:39:23 [36mINFO: [39m Update Change log: ["/home/vlado/dev/human/CHANGELOG.md"]
|
||||
2021-06-03 09:39:23 [36mINFO: [39m Generate TypeDocs: ["src/human.ts"]
|
||||
2021-06-03 09:39:42 [36mINFO: [39m Documentation generated at /home/vlado/dev/human/typedoc 1
|
||||
|
|
|
@ -29,7 +29,7 @@ export class BlazeFaceModel {
|
|||
inputSize: number;
|
||||
config: Config;
|
||||
|
||||
constructor(model, config) {
|
||||
constructor(model, config: Config) {
|
||||
this.model = model;
|
||||
this.anchorsData = util.generateAnchors(model.inputs[0].shape[1]);
|
||||
this.anchors = tf.tensor2d(this.anchorsData);
|
||||
|
@ -37,8 +37,9 @@ export class BlazeFaceModel {
|
|||
this.config = config;
|
||||
}
|
||||
|
||||
async getBoundingBoxes(inputImage) {
|
||||
async getBoundingBoxes(inputImage: Tensor) {
|
||||
// sanity check on input
|
||||
// @ts-ignore isDisposed is internal property
|
||||
if ((!inputImage) || (inputImage.isDisposedInternal) || (inputImage.shape.length !== 4) || (inputImage.shape[1] < 1) || (inputImage.shape[2] < 1)) return null;
|
||||
const [batch, boxes, scores] = tf.tidy(() => {
|
||||
const resizedImage = inputImage.resizeBilinear([this.inputSize, this.inputSize]);
|
||||
|
@ -85,7 +86,7 @@ export class BlazeFaceModel {
|
|||
}
|
||||
}
|
||||
|
||||
export async function load(config) {
|
||||
export async function load(config: Config) {
|
||||
const model = await tf.loadGraphModel(join(config.modelBasePath, config.face.detector.modelPath), { fromTFHub: config.face.detector.modelPath.includes('tfhub.dev') });
|
||||
const blazeFace = new BlazeFaceModel(model, config);
|
||||
if (!model || !model.modelUrl) log('load model failed:', config.face.detector.modelPath);
|
||||
|
|
|
@ -7,21 +7,22 @@ import * as tf from '../../dist/tfjs.esm.js';
|
|||
import * as blazeface from './blazeface';
|
||||
import * as facepipeline from './facepipeline';
|
||||
import * as coords from './coords';
|
||||
import { GraphModel } from '../tfjs/types';
|
||||
import { GraphModel, Tensor } from '../tfjs/types';
|
||||
import { Face } from '../result';
|
||||
import { Config } from '../config';
|
||||
|
||||
let faceModels: [blazeface.BlazeFaceModel | null, GraphModel | null, GraphModel | null] = [null, null, null];
|
||||
let facePipeline;
|
||||
|
||||
export async function predict(input, config): Promise<Face[]> {
|
||||
export async function predict(input: Tensor, config: Config): Promise<Face[]> {
|
||||
const predictions = await facePipeline.predict(input, config);
|
||||
const results: Array<Face> = [];
|
||||
let id = 0;
|
||||
for (const prediction of (predictions || [])) {
|
||||
if (!prediction || prediction.isDisposedInternal) continue; // guard against disposed tensors on long running operations such as pause in middle of processing
|
||||
const meshRaw = prediction.mesh.map((pt) => [
|
||||
pt[0] / input.shape[2],
|
||||
pt[1] / input.shape[1],
|
||||
pt[0] / (input.shape[2] || 0),
|
||||
pt[1] / (input.shape[1] || 0),
|
||||
pt[2] / facePipeline.meshSize,
|
||||
]);
|
||||
const annotations = {};
|
||||
|
@ -31,14 +32,14 @@ export async function predict(input, config): Promise<Face[]> {
|
|||
const clampedBox: [number, number, number, number] = prediction.box ? [
|
||||
Math.trunc(Math.max(0, prediction.box.startPoint[0])),
|
||||
Math.trunc(Math.max(0, prediction.box.startPoint[1])),
|
||||
Math.trunc(Math.min(input.shape[2], prediction.box.endPoint[0]) - Math.max(0, prediction.box.startPoint[0])),
|
||||
Math.trunc(Math.min(input.shape[1], prediction.box.endPoint[1]) - Math.max(0, prediction.box.startPoint[1])),
|
||||
Math.trunc(Math.min((input.shape[2] || 0), prediction.box.endPoint[0]) - Math.max(0, prediction.box.startPoint[0])),
|
||||
Math.trunc(Math.min((input.shape[1] || 0), prediction.box.endPoint[1]) - Math.max(0, prediction.box.startPoint[1])),
|
||||
] : [0, 0, 0, 0];
|
||||
const boxRaw: [number, number, number, number] = prediction.box ? [
|
||||
prediction.box.startPoint[0] / input.shape[2],
|
||||
prediction.box.startPoint[1] / input.shape[1],
|
||||
(prediction.box.endPoint[0] - prediction.box.startPoint[0]) / input.shape[2],
|
||||
(prediction.box.endPoint[1] - prediction.box.startPoint[1]) / input.shape[1],
|
||||
prediction.box.startPoint[0] / (input.shape[2] || 0),
|
||||
prediction.box.startPoint[1] / (input.shape[1] || 0),
|
||||
(prediction.box.endPoint[0] - prediction.box.startPoint[0]) / (input.shape[2] || 0),
|
||||
(prediction.box.endPoint[1] - prediction.box.startPoint[1]) / (input.shape[1] || 0),
|
||||
] : [0, 0, 0, 0];
|
||||
results.push({
|
||||
id: id++,
|
||||
|
|
|
@ -9,10 +9,11 @@ import * as tf from '../../dist/tfjs.esm.js';
|
|||
import * as annotations from './annotations';
|
||||
import { Tensor, GraphModel } from '../tfjs/types';
|
||||
import { Body } from '../result';
|
||||
import { Config } from '../config';
|
||||
|
||||
let model: GraphModel;
|
||||
|
||||
export async function load(config) {
|
||||
export async function load(config: Config): Promise<GraphModel> {
|
||||
if (!model) {
|
||||
// @ts-ignore type mismatch for Graphmodel
|
||||
model = await tf.loadGraphModel(join(config.modelBasePath, config.body.modelPath));
|
||||
|
@ -24,10 +25,10 @@ export async function load(config) {
|
|||
return model;
|
||||
}
|
||||
|
||||
export async function predict(image, config): Promise<Body[]> {
|
||||
export async function predict(image: Tensor, config: Config): Promise<Body[]> {
|
||||
if (!model) return [];
|
||||
if (!config.body.enabled) return [];
|
||||
const imgSize = { width: image.shape[2], height: image.shape[1] };
|
||||
const imgSize = { width: (image.shape[2] || 0), height: (image.shape[1] || 0) };
|
||||
const resize = tf.image.resizeBilinear(image, [model['width'], model['height']], false);
|
||||
const normalize = tf.div(resize, [255.0]);
|
||||
resize.dispose();
|
||||
|
|
|
@ -37,6 +37,12 @@ export interface Config {
|
|||
*/
|
||||
cacheSensitivity: number;
|
||||
|
||||
/** Cache sensitivity
|
||||
* - values 0..1 where 0.01 means reset cache if input changed more than 1%
|
||||
* - set to 0 to disable caching
|
||||
*/
|
||||
skipFrame: boolean;
|
||||
|
||||
/** Run input through image filters before inference
|
||||
* - image filters run with near-zero latency as they are executed on the GPU
|
||||
*/
|
||||
|
@ -147,6 +153,7 @@ export interface Config {
|
|||
modelPath: string,
|
||||
maxDetected: number,
|
||||
minConfidence: number,
|
||||
skipFrames: number,
|
||||
},
|
||||
|
||||
/** Controlls and configures all hand detection specific options
|
||||
|
@ -205,6 +212,7 @@ const config: Config = {
|
|||
cacheSensitivity: 0.75, // cache sensitivity
|
||||
// values 0..1 where 0.01 means reset cache if input changed more than 1%
|
||||
// set to 0 to disable caching
|
||||
skipFrame: false, // internal & dynamic
|
||||
filter: { // run input through image filters before inference
|
||||
// image filters run with near-zero latency as they are executed on the GPU
|
||||
enabled: true, // enable image pre-processing filters
|
||||
|
@ -294,13 +302,15 @@ const config: Config = {
|
|||
// should be set to the minimum number for performance
|
||||
// only valid for posenet as other models detects single pose
|
||||
minConfidence: 0.2, // threshold for discarding a prediction
|
||||
},
|
||||
skipFrames: 16, // how many max frames to go without re-running the detector
|
||||
// only used when cacheSensitivity is not zero
|
||||
},
|
||||
|
||||
hand: {
|
||||
enabled: true,
|
||||
rotation: true, // use best-guess rotated hand image or just box with rotation as-is
|
||||
// false means higher performance, but incorrect finger mapping if hand is inverted
|
||||
skipFrames: 18, // how many max frames to go without re-running the hand bounding box detector
|
||||
skipFrames: 19, // how many max frames to go without re-running the hand bounding box detector
|
||||
// only used when cacheSensitivity is not zero
|
||||
// e.g., if model is running st 25 FPS, we can re-use existing bounding
|
||||
// box for updated hand skeleton analysis as the hand probably
|
||||
|
@ -325,7 +335,7 @@ const config: Config = {
|
|||
minConfidence: 0.2, // threshold for discarding a prediction
|
||||
iouThreshold: 0.4, // ammount of overlap between two detected objects before one object is removed
|
||||
maxDetected: 10, // maximum number of objects detected in the input
|
||||
skipFrames: 19, // how many max frames to go without re-running the detector
|
||||
skipFrames: 20, // how many max frames to go without re-running the detector
|
||||
// only used when cacheSensitivity is not zero
|
||||
},
|
||||
};
|
||||
|
|
|
@ -5,7 +5,8 @@
|
|||
import { log, join } from '../helpers';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import { Body } from '../result';
|
||||
import { GraphModel } from '../tfjs/types';
|
||||
import { GraphModel, Tensor } from '../tfjs/types';
|
||||
import { Config } from '../config';
|
||||
|
||||
let model: GraphModel;
|
||||
|
||||
|
@ -19,7 +20,7 @@ let skipped = Number.MAX_SAFE_INTEGER;
|
|||
|
||||
const bodyParts = ['head', 'neck', 'rightShoulder', 'rightElbow', 'rightWrist', 'chest', 'leftShoulder', 'leftElbow', 'leftWrist', 'pelvis', 'rightHip', 'rightKnee', 'rightAnkle', 'leftHip', 'leftKnee', 'leftAnkle'];
|
||||
|
||||
export async function load(config) {
|
||||
export async function load(config: Config): Promise<GraphModel> {
|
||||
if (!model) {
|
||||
// @ts-ignore type mismatch on GraphModel
|
||||
model = await tf.loadGraphModel(join(config.modelBasePath, config.body.modelPath));
|
||||
|
@ -50,7 +51,7 @@ function max2d(inputs, minScore) {
|
|||
});
|
||||
}
|
||||
|
||||
export async function predict(image, config): Promise<Body[]> {
|
||||
export async function predict(image: Tensor, config: Config): Promise<Body[]> {
|
||||
if ((skipped < config.body.skipFrames) && config.skipFrame && Object.keys(keypoints).length > 0) {
|
||||
skipped++;
|
||||
return [{ id: 0, score, box, boxRaw, keypoints }];
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
*/
|
||||
|
||||
import { log, join } from '../helpers';
|
||||
import { Config } from '../config';
|
||||
import { Tensor, GraphModel } from '../tfjs/types';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
|
||||
const annotations = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral'];
|
||||
|
@ -15,7 +17,7 @@ let skipped = Number.MAX_SAFE_INTEGER;
|
|||
// tuning values
|
||||
const rgb = [0.2989, 0.5870, 0.1140]; // factors for red/green/blue colors when converting to grayscale
|
||||
|
||||
export async function load(config) {
|
||||
export async function load(config: Config): Promise<GraphModel> {
|
||||
if (!model) {
|
||||
model = await tf.loadGraphModel(join(config.modelBasePath, config.face.emotion.modelPath));
|
||||
if (!model || !model.modelUrl) log('load model failed:', config.face.emotion.modelPath);
|
||||
|
@ -24,7 +26,7 @@ export async function load(config) {
|
|||
return model;
|
||||
}
|
||||
|
||||
export async function predict(image, config, idx, count) {
|
||||
export async function predict(image: Tensor, config: Config, idx, count) {
|
||||
if (!model) return null;
|
||||
if ((skipped < config.face.emotion.skipFrames) && config.skipFrame && (lastCount === count) && last[idx] && (last[idx].length > 0)) {
|
||||
skipped++;
|
||||
|
|
|
@ -171,11 +171,11 @@ export const detectFace = async (parent /* instance of human */, input: Tensor):
|
|||
// run emotion, inherits face from blazeface
|
||||
parent.analyze('Start Emotion:');
|
||||
if (parent.config.async) {
|
||||
emotionRes = parent.config.face.emotion.enabled ? emotion.predict(faces[i].image, parent.config, i, faces.length) : {};
|
||||
emotionRes = parent.config.face.emotion.enabled ? emotion.predict(faces[i].image || tf.tensor([]), parent.config, i, faces.length) : {};
|
||||
} else {
|
||||
parent.state = 'run:emotion';
|
||||
timeStamp = now();
|
||||
emotionRes = parent.config.face.emotion.enabled ? await emotion.predict(faces[i].image, parent.config, i, faces.length) : {};
|
||||
emotionRes = parent.config.face.emotion.enabled ? await emotion.predict(faces[i].image || tf.tensor([]), parent.config, i, faces.length) : {};
|
||||
parent.performance.emotion = Math.trunc(now() - timeStamp);
|
||||
}
|
||||
parent.analyze('End Emotion:');
|
||||
|
@ -183,11 +183,11 @@ export const detectFace = async (parent /* instance of human */, input: Tensor):
|
|||
// run emotion, inherits face from blazeface
|
||||
parent.analyze('Start Description:');
|
||||
if (parent.config.async) {
|
||||
descRes = parent.config.face.description.enabled ? faceres.predict(faces[i], parent.config, i, faces.length) : [];
|
||||
descRes = parent.config.face.description.enabled ? faceres.predict(faces[i].image || tf.tensor([]), parent.config, i, faces.length) : [];
|
||||
} else {
|
||||
parent.state = 'run:description';
|
||||
timeStamp = now();
|
||||
descRes = parent.config.face.description.enabled ? await faceres.predict(faces[i].image, parent.config, i, faces.length) : [];
|
||||
descRes = parent.config.face.description.enabled ? await faceres.predict(faces[i].image || tf.tensor([]), parent.config, i, faces.length) : [];
|
||||
parent.performance.embedding = Math.trunc(now() - timeStamp);
|
||||
}
|
||||
parent.analyze('End Description:');
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
import { log, join } from '../helpers';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import { Tensor, GraphModel } from '../tfjs/types';
|
||||
import { Config } from '../config';
|
||||
|
||||
let model: GraphModel;
|
||||
const last: Array<{
|
||||
|
@ -21,7 +22,7 @@ let skipped = Number.MAX_SAFE_INTEGER;
|
|||
|
||||
type DB = Array<{ name: string, source: string, embedding: number[] }>;
|
||||
|
||||
export async function load(config) {
|
||||
export async function load(config: Config): Promise<GraphModel> {
|
||||
const modelUrl = join(config.modelBasePath, config.face.description.modelPath);
|
||||
if (!model) {
|
||||
// @ts-ignore type mismatch for GraphModel
|
||||
|
@ -32,7 +33,7 @@ export async function load(config) {
|
|||
return model;
|
||||
}
|
||||
|
||||
export function similarity(embedding1, embedding2, order = 2): number {
|
||||
export function similarity(embedding1: Array<number>, embedding2: Array<number>, order = 2): number {
|
||||
if (!embedding1 || !embedding2) return 0;
|
||||
if (embedding1?.length === 0 || embedding2?.length === 0) return 0;
|
||||
if (embedding1?.length !== embedding2?.length) return 0;
|
||||
|
@ -110,7 +111,7 @@ export function enhance(input): Tensor {
|
|||
return image;
|
||||
}
|
||||
|
||||
export async function predict(image, config, idx, count) {
|
||||
export async function predict(image: Tensor, config: Config, idx, count) {
|
||||
if (!model) return null;
|
||||
if ((skipped < config.face.description.skipFrames) && config.skipFrame && (lastCount === count) && last[idx]?.age && (last[idx]?.age > 0)) {
|
||||
skipped++;
|
||||
|
|
|
@ -7,7 +7,8 @@ import * as tf from '../../dist/tfjs.esm.js';
|
|||
import * as handdetector from './handdetector';
|
||||
import * as handpipeline from './handpipeline';
|
||||
import { Hand } from '../result';
|
||||
import { GraphModel } from '../tfjs/types';
|
||||
import { Tensor, GraphModel } from '../tfjs/types';
|
||||
import { Config } from '../config';
|
||||
|
||||
const meshAnnotations = {
|
||||
thumb: [1, 2, 3, 4],
|
||||
|
@ -22,7 +23,7 @@ let handDetectorModel: GraphModel | null;
|
|||
let handPoseModel: GraphModel | null;
|
||||
let handPipeline: handpipeline.HandPipeline;
|
||||
|
||||
export async function predict(input, config): Promise<Hand[]> {
|
||||
export async function predict(input: Tensor, config: Config): Promise<Hand[]> {
|
||||
const predictions = await handPipeline.estimateHands(input, config);
|
||||
if (!predictions) return [];
|
||||
const hands: Array<Hand> = [];
|
||||
|
@ -48,19 +49,19 @@ export async function predict(input, config): Promise<Hand[]> {
|
|||
}
|
||||
box[2] -= box[0];
|
||||
box[3] -= box[1];
|
||||
boxRaw = [box[0] / input.shape[2], box[1] / input.shape[1], box[2] / input.shape[2], box[3] / input.shape[1]];
|
||||
boxRaw = [box[0] / (input.shape[2] || 0), box[1] / (input.shape[1] || 0), box[2] / (input.shape[2] || 0), box[3] / (input.shape[1] || 0)];
|
||||
} else { // otherwise use box from prediction
|
||||
box = predictions[i].box ? [
|
||||
Math.trunc(Math.max(0, predictions[i].box.topLeft[0])),
|
||||
Math.trunc(Math.max(0, predictions[i].box.topLeft[1])),
|
||||
Math.trunc(Math.min(input.shape[2], predictions[i].box.bottomRight[0]) - Math.max(0, predictions[i].box.topLeft[0])),
|
||||
Math.trunc(Math.min(input.shape[1], predictions[i].box.bottomRight[1]) - Math.max(0, predictions[i].box.topLeft[1])),
|
||||
Math.trunc(Math.min((input.shape[2] || 0), predictions[i].box.bottomRight[0]) - Math.max(0, predictions[i].box.topLeft[0])),
|
||||
Math.trunc(Math.min((input.shape[1] || 0), predictions[i].box.bottomRight[1]) - Math.max(0, predictions[i].box.topLeft[1])),
|
||||
] : [0, 0, 0, 0];
|
||||
boxRaw = [
|
||||
(predictions[i].box.topLeft[0]) / input.shape[2],
|
||||
(predictions[i].box.topLeft[1]) / input.shape[1],
|
||||
(predictions[i].box.bottomRight[0] - predictions[i].box.topLeft[0]) / input.shape[2],
|
||||
(predictions[i].box.bottomRight[1] - predictions[i].box.topLeft[1]) / input.shape[1],
|
||||
(predictions[i].box.topLeft[0]) / (input.shape[2] || 0),
|
||||
(predictions[i].box.topLeft[1]) / (input.shape[1] || 0),
|
||||
(predictions[i].box.bottomRight[0] - predictions[i].box.topLeft[0]) / (input.shape[2] || 0),
|
||||
(predictions[i].box.bottomRight[1] - predictions[i].box.topLeft[1]) / (input.shape[1] || 0),
|
||||
];
|
||||
}
|
||||
hands.push({ id: i, score: Math.round(100 * predictions[i].confidence) / 100, box, boxRaw, keypoints, annotations });
|
||||
|
@ -68,7 +69,7 @@ export async function predict(input, config): Promise<Hand[]> {
|
|||
return hands;
|
||||
}
|
||||
|
||||
export async function load(config): Promise<[unknown, unknown]> {
|
||||
export async function load(config: Config): Promise<[unknown, unknown]> {
|
||||
if (!handDetectorModel || !handPoseModel) {
|
||||
// @ts-ignore type mismatch on GraphModel
|
||||
[handDetectorModel, handPoseModel] = await Promise.all([
|
||||
|
|
|
@ -12,7 +12,7 @@ export function join(folder: string, file: string): string {
|
|||
}
|
||||
|
||||
// helper function: wrapper around console output
|
||||
export function log(...msg) {
|
||||
export function log(...msg): void {
|
||||
const dt = new Date();
|
||||
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
|
||||
// eslint-disable-next-line no-console
|
||||
|
|
10
src/human.ts
10
src/human.ts
|
@ -132,7 +132,7 @@ export class Human {
|
|||
/** Platform and agent information detected by Human */
|
||||
sysinfo: { platform: string, agent: string };
|
||||
/** Performance object that contains values for all recently performed operations */
|
||||
performance: Record<string, unknown>; // perf members are dynamically defined as needed
|
||||
performance: Record<string, number>; // perf members are dynamically defined as needed
|
||||
#numTensors: number;
|
||||
#analyzeMemoryLeaks: boolean;
|
||||
#checkSanity: boolean;
|
||||
|
@ -258,7 +258,7 @@ export class Human {
|
|||
async load(userConfig?: Config | Record<string, unknown>) {
|
||||
this.state = 'load';
|
||||
const timeStamp = now();
|
||||
if (userConfig) this.config = mergeDeep(this.config, userConfig);
|
||||
if (userConfig) this.config = mergeDeep(this.config, userConfig) as Config;
|
||||
|
||||
if (this.#firstRun) { // print version info on first run and check for correct backend setup
|
||||
if (this.config.debug) log(`version: ${this.version}`);
|
||||
|
@ -432,7 +432,7 @@ export class Human {
|
|||
let timeStamp;
|
||||
|
||||
// update configuration
|
||||
this.config = mergeDeep(this.config, userConfig);
|
||||
this.config = mergeDeep(this.config, userConfig) as Config;
|
||||
|
||||
// sanity checks
|
||||
this.state = 'check';
|
||||
|
@ -478,12 +478,10 @@ export class Human {
|
|||
this.analyze('Get Image:');
|
||||
|
||||
timeStamp = now();
|
||||
// @ts-ignore hidden dynamic property that is not part of definitions
|
||||
this.config.skipFrame = await this.#skipFrame(process.tensor);
|
||||
if (!this.performance.frames) this.performance.frames = 0;
|
||||
if (!this.performance.cached) this.performance.cached = 0;
|
||||
(this.performance.frames as number)++;
|
||||
// @ts-ignore hidden dynamic property that is not part of definitions
|
||||
if (this.config.skipFrame) this.performance.cached++;
|
||||
this.performance.changed = Math.trunc(now() - timeStamp);
|
||||
this.analyze('Check Changed:');
|
||||
|
@ -678,7 +676,7 @@ export class Human {
|
|||
*/
|
||||
async warmup(userConfig?: Config | Record<string, unknown>): Promise<Result | { error }> {
|
||||
const t0 = now();
|
||||
if (userConfig) this.config = mergeDeep(this.config, userConfig);
|
||||
if (userConfig) this.config = mergeDeep(this.config, userConfig) as Config;
|
||||
if (!this.config.warmup || this.config.warmup === 'none') return { error: 'null' };
|
||||
let res;
|
||||
if (typeof createImageBitmap === 'function') res = await this.#warmupBitmap();
|
||||
|
|
|
@ -5,7 +5,8 @@
|
|||
import { log, join } from '../helpers';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import { Body } from '../result';
|
||||
import { GraphModel } from '../tfjs/types';
|
||||
import { GraphModel, Tensor } from '../tfjs/types';
|
||||
import { Config } from '../config';
|
||||
|
||||
let model: GraphModel;
|
||||
|
||||
|
@ -19,7 +20,7 @@ let skipped = Number.MAX_SAFE_INTEGER;
|
|||
|
||||
const bodyParts = ['nose', 'leftEye', 'rightEye', 'leftEar', 'rightEar', 'leftShoulder', 'rightShoulder', 'leftElbow', 'rightElbow', 'leftWrist', 'rightWrist', 'leftHip', 'rightHip', 'leftKnee', 'rightKnee', 'leftAnkle', 'rightAnkle'];
|
||||
|
||||
export async function load(config) {
|
||||
export async function load(config: Config): Promise<GraphModel> {
|
||||
if (!model) {
|
||||
// @ts-ignore type mismatch on GraphModel
|
||||
model = await tf.loadGraphModel(join(config.modelBasePath, config.body.modelPath));
|
||||
|
@ -29,7 +30,7 @@ export async function load(config) {
|
|||
return model;
|
||||
}
|
||||
|
||||
export async function predict(image, config): Promise<Body[]> {
|
||||
export async function predict(image: Tensor, config: Config): Promise<Body[]> {
|
||||
if ((skipped < config.body.skipFrames) && config.skipFrame && Object.keys(keypoints).length > 0) {
|
||||
skipped++;
|
||||
return [{ id: 0, score, box, boxRaw, keypoints }];
|
||||
|
@ -63,8 +64,8 @@ export async function predict(image, config): Promise<Body[]> {
|
|||
kpt[id][0],
|
||||
],
|
||||
position: [ // normalized to input image size
|
||||
Math.round(image.shape[2] * kpt[id][1]),
|
||||
Math.round(image.shape[1] * kpt[id][0]),
|
||||
Math.round((image.shape[2] || 0) * kpt[id][1]),
|
||||
Math.round((image.shape[1] || 0) * kpt[id][0]),
|
||||
],
|
||||
});
|
||||
}
|
||||
|
|
|
@ -6,12 +6,14 @@ import { log, join } from '../helpers';
|
|||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import { labels } from './labels';
|
||||
import { Item } from '../result';
|
||||
import { GraphModel, Tensor } from '../tfjs/types';
|
||||
import { Config } from '../config';
|
||||
|
||||
let model;
|
||||
let last: Item[] = [];
|
||||
let skipped = Number.MAX_SAFE_INTEGER;
|
||||
|
||||
export async function load(config) {
|
||||
export async function load(config: Config): Promise<GraphModel> {
|
||||
if (!model) {
|
||||
model = await tf.loadGraphModel(join(config.modelBasePath, config.object.modelPath));
|
||||
const inputs = Object.values(model.modelSignature['inputs']);
|
||||
|
@ -23,7 +25,7 @@ export async function load(config) {
|
|||
return model;
|
||||
}
|
||||
|
||||
async function process(res, inputSize, outputShape, config) {
|
||||
async function process(res: Tensor, inputSize, outputShape, config: Config) {
|
||||
if (!res) return [];
|
||||
const results: Array<Item> = [];
|
||||
const detections = res.arraySync();
|
||||
|
@ -64,7 +66,7 @@ async function process(res, inputSize, outputShape, config) {
|
|||
return results;
|
||||
}
|
||||
|
||||
export async function predict(input, config): Promise<Item[]> {
|
||||
export async function predict(input: Tensor, config: Config): Promise<Item[]> {
|
||||
if ((skipped < config.object.skipFrames) && config.skipFrame && (last.length > 0)) {
|
||||
skipped++;
|
||||
return last;
|
||||
|
|
|
@ -6,6 +6,8 @@ import { log, join } from '../helpers';
|
|||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import { labels } from './labels';
|
||||
import { Item } from '../result';
|
||||
import { GraphModel, Tensor } from '../tfjs/types';
|
||||
import { Config } from '../config';
|
||||
|
||||
let model;
|
||||
let last: Array<Item> = [];
|
||||
|
@ -13,7 +15,7 @@ let skipped = Number.MAX_SAFE_INTEGER;
|
|||
|
||||
const scaleBox = 2.5; // increase box size
|
||||
|
||||
export async function load(config) {
|
||||
export async function load(config: Config): Promise<GraphModel> {
|
||||
if (!model) {
|
||||
model = await tf.loadGraphModel(join(config.modelBasePath, config.object.modelPath));
|
||||
const inputs = Object.values(model.modelSignature['inputs']);
|
||||
|
@ -100,7 +102,7 @@ async function process(res, inputSize, outputShape, config) {
|
|||
return results;
|
||||
}
|
||||
|
||||
export async function predict(image, config): Promise<Item[]> {
|
||||
export async function predict(image: Tensor, config: Config): Promise<Item[]> {
|
||||
if ((skipped < config.object.skipFrames) && config.skipFrame && (last.length > 0)) {
|
||||
skipped++;
|
||||
return last;
|
||||
|
|
|
@ -8,11 +8,12 @@ import * as poses from './poses';
|
|||
import * as util from './utils';
|
||||
import { Body } from '../result';
|
||||
import { Tensor, GraphModel } from '../tfjs/types';
|
||||
import { Config } from '../config';
|
||||
|
||||
let model: GraphModel;
|
||||
const poseNetOutputs = ['MobilenetV1/offset_2/BiasAdd'/* offsets */, 'MobilenetV1/heatmap_2/BiasAdd'/* heatmapScores */, 'MobilenetV1/displacement_fwd_2/BiasAdd'/* displacementFwd */, 'MobilenetV1/displacement_bwd_2/BiasAdd'/* displacementBwd */];
|
||||
|
||||
export async function predict(input, config): Promise<Body[]> {
|
||||
export async function predict(input: Tensor, config: Config): Promise<Body[]> {
|
||||
const res = tf.tidy(() => {
|
||||
if (!model.inputs[0].shape) return [];
|
||||
const resized = input.resizeBilinear([model.inputs[0].shape[2], model.inputs[0].shape[1]]);
|
||||
|
@ -32,7 +33,7 @@ export async function predict(input, config): Promise<Body[]> {
|
|||
return scaled;
|
||||
}
|
||||
|
||||
export async function load(config) {
|
||||
export async function load(config: Config): Promise<GraphModel> {
|
||||
if (!model) {
|
||||
// @ts-ignore type mismatch for GraphModel
|
||||
model = await tf.loadGraphModel(join(config.modelBasePath, config.body.modelPath));
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
* Type definitions for Human result object
|
||||
*/
|
||||
|
||||
import { Tensor } from '../dist/tfjs.esm.js';
|
||||
import { Tensor } from './tfjs/types';
|
||||
|
||||
/** Face results
|
||||
* Combined results of face detector, face mesh, age, gender, emotion, embedding, iris models
|
||||
|
@ -52,8 +52,8 @@ export interface Face {
|
|||
matrix: [number, number, number, number, number, number, number, number, number],
|
||||
gaze: { bearing: number, strength: number },
|
||||
}
|
||||
image?: typeof Tensor;
|
||||
tensor: typeof Tensor,
|
||||
image?: Tensor;
|
||||
tensor: Tensor,
|
||||
}
|
||||
|
||||
/** Body results
|
||||
|
|
|
@ -35,6 +35,6 @@
|
|||
"entryPoints": "src/human.ts",
|
||||
"logLevel": "Info",
|
||||
"logger": "none",
|
||||
"theme": "wiki/theme/"
|
||||
"theme": "wiki/theme/",
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -514,7 +514,7 @@
|
|||
<section class="tsd-panel tsd-member tsd-kind-property tsd-parent-kind-class">
|
||||
<a name="performance" class="tsd-anchor"></a>
|
||||
<h3>performance</h3>
|
||||
<div class="tsd-signature tsd-kind-icon">performance<span class="tsd-signature-symbol">:</span> <span class="tsd-signature-type">Record</span><span class="tsd-signature-symbol"><</span><span class="tsd-signature-type">string</span><span class="tsd-signature-symbol">, </span><span class="tsd-signature-type">unknown</span><span class="tsd-signature-symbol">></span></div>
|
||||
<div class="tsd-signature tsd-kind-icon">performance<span class="tsd-signature-symbol">:</span> <span class="tsd-signature-type">Record</span><span class="tsd-signature-symbol"><</span><span class="tsd-signature-type">string</span><span class="tsd-signature-symbol">, </span><span class="tsd-signature-type">number</span><span class="tsd-signature-symbol">></span></div>
|
||||
<aside class="tsd-sources">
|
||||
</aside>
|
||||
<div class="tsd-comment tsd-typography">
|
||||
|
|
|
@ -97,6 +97,7 @@
|
|||
<li class="tsd-kind-property tsd-parent-kind-interface"><a href="config.html#hand" class="tsd-kind-icon">hand</a></li>
|
||||
<li class="tsd-kind-property tsd-parent-kind-interface"><a href="config.html#modelbasepath" class="tsd-kind-icon">model<wbr>Base<wbr>Path</a></li>
|
||||
<li class="tsd-kind-property tsd-parent-kind-interface"><a href="config.html#object" class="tsd-kind-icon">object</a></li>
|
||||
<li class="tsd-kind-property tsd-parent-kind-interface"><a href="config.html#skipframe" class="tsd-kind-icon">skip<wbr>Frame</a></li>
|
||||
<li class="tsd-kind-property tsd-parent-kind-interface"><a href="config.html#warmup" class="tsd-kind-icon">warmup</a></li>
|
||||
<li class="tsd-kind-property tsd-parent-kind-interface"><a href="config.html#wasmpath" class="tsd-kind-icon">wasm<wbr>Path</a></li>
|
||||
</ul>
|
||||
|
@ -133,7 +134,7 @@
|
|||
<section class="tsd-panel tsd-member tsd-kind-property tsd-parent-kind-interface">
|
||||
<a name="body" class="tsd-anchor"></a>
|
||||
<h3>body</h3>
|
||||
<div class="tsd-signature tsd-kind-icon">body<span class="tsd-signature-symbol">:</span> <span class="tsd-signature-symbol">{ </span>enabled<span class="tsd-signature-symbol">: </span><span class="tsd-signature-type">boolean</span><span class="tsd-signature-symbol">; </span>maxDetected<span class="tsd-signature-symbol">: </span><span class="tsd-signature-type">number</span><span class="tsd-signature-symbol">; </span>minConfidence<span class="tsd-signature-symbol">: </span><span class="tsd-signature-type">number</span><span class="tsd-signature-symbol">; </span>modelPath<span class="tsd-signature-symbol">: </span><span class="tsd-signature-type">string</span><span class="tsd-signature-symbol"> }</span></div>
|
||||
<div class="tsd-signature tsd-kind-icon">body<span class="tsd-signature-symbol">:</span> <span class="tsd-signature-symbol">{ </span>enabled<span class="tsd-signature-symbol">: </span><span class="tsd-signature-type">boolean</span><span class="tsd-signature-symbol">; </span>maxDetected<span class="tsd-signature-symbol">: </span><span class="tsd-signature-type">number</span><span class="tsd-signature-symbol">; </span>minConfidence<span class="tsd-signature-symbol">: </span><span class="tsd-signature-type">number</span><span class="tsd-signature-symbol">; </span>modelPath<span class="tsd-signature-symbol">: </span><span class="tsd-signature-type">string</span><span class="tsd-signature-symbol">; </span>skipFrames<span class="tsd-signature-symbol">: </span><span class="tsd-signature-type">number</span><span class="tsd-signature-symbol"> }</span></div>
|
||||
<aside class="tsd-sources">
|
||||
</aside>
|
||||
<div class="tsd-comment tsd-typography">
|
||||
|
@ -162,6 +163,9 @@
|
|||
<li class="tsd-parameter">
|
||||
<h5>model<wbr>Path<span class="tsd-signature-symbol">: </span><span class="tsd-signature-type">string</span></h5>
|
||||
</li>
|
||||
<li class="tsd-parameter">
|
||||
<h5>skip<wbr>Frames<span class="tsd-signature-symbol">: </span><span class="tsd-signature-type">number</span></h5>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</section>
|
||||
|
@ -615,6 +619,22 @@
|
|||
</ul>
|
||||
</div>
|
||||
</section>
|
||||
<section class="tsd-panel tsd-member tsd-kind-property tsd-parent-kind-interface">
|
||||
<a name="skipframe" class="tsd-anchor"></a>
|
||||
<h3>skip<wbr>Frame</h3>
|
||||
<div class="tsd-signature tsd-kind-icon">skip<wbr>Frame<span class="tsd-signature-symbol">:</span> <span class="tsd-signature-type">boolean</span></div>
|
||||
<aside class="tsd-sources">
|
||||
</aside>
|
||||
<div class="tsd-comment tsd-typography">
|
||||
<div class="lead">
|
||||
<p>Cache sensitivity</p>
|
||||
<ul>
|
||||
<li>values 0..1 where 0.01 means reset cache if input changed more than 1%</li>
|
||||
<li>set to 0 to disable caching</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
<section class="tsd-panel tsd-member tsd-kind-property tsd-parent-kind-interface">
|
||||
<a name="warmup" class="tsd-anchor"></a>
|
||||
<h3>warmup</h3>
|
||||
|
@ -702,6 +722,9 @@
|
|||
<li class=" tsd-kind-property tsd-parent-kind-interface">
|
||||
<a href="config.html#object" class="tsd-kind-icon">object</a>
|
||||
</li>
|
||||
<li class=" tsd-kind-property tsd-parent-kind-interface">
|
||||
<a href="config.html#skipframe" class="tsd-kind-icon">skip<wbr>Frame</a>
|
||||
</li>
|
||||
<li class=" tsd-kind-property tsd-parent-kind-interface">
|
||||
<a href="config.html#warmup" class="tsd-kind-icon">warmup</a>
|
||||
</li>
|
||||
|
|
|
@ -216,7 +216,7 @@
|
|||
<section class="tsd-panel tsd-member tsd-kind-property tsd-parent-kind-interface">
|
||||
<a name="image" class="tsd-anchor"></a>
|
||||
<h3><span class="tsd-flag ts-flagOptional">Optional</span> image</h3>
|
||||
<div class="tsd-signature tsd-kind-icon">image<span class="tsd-signature-symbol">:</span> <span class="tsd-signature-symbol">typeof </span><span class="tsd-signature-type">__class</span></div>
|
||||
<div class="tsd-signature tsd-kind-icon">image<span class="tsd-signature-symbol">:</span> <span class="tsd-signature-type">Tensor</span><span class="tsd-signature-symbol"><</span><span class="tsd-signature-type">Rank</span><span class="tsd-signature-symbol">></span></div>
|
||||
<aside class="tsd-sources">
|
||||
</aside>
|
||||
</section>
|
||||
|
@ -291,7 +291,7 @@
|
|||
<section class="tsd-panel tsd-member tsd-kind-property tsd-parent-kind-interface">
|
||||
<a name="tensor" class="tsd-anchor"></a>
|
||||
<h3>tensor</h3>
|
||||
<div class="tsd-signature tsd-kind-icon">tensor<span class="tsd-signature-symbol">:</span> <span class="tsd-signature-symbol">typeof </span><span class="tsd-signature-type">__class</span></div>
|
||||
<div class="tsd-signature tsd-kind-icon">tensor<span class="tsd-signature-symbol">:</span> <span class="tsd-signature-type">Tensor</span><span class="tsd-signature-symbol"><</span><span class="tsd-signature-type">Rank</span><span class="tsd-signature-symbol">></span></div>
|
||||
<aside class="tsd-sources">
|
||||
</aside>
|
||||
</section>
|
||||
|
|
|
@ -6,8 +6,8 @@ export declare class BlazeFaceModel {
|
|||
anchors: Tensor;
|
||||
inputSize: number;
|
||||
config: Config;
|
||||
constructor(model: any, config: any);
|
||||
getBoundingBoxes(inputImage: any): Promise<{
|
||||
constructor(model: any, config: Config);
|
||||
getBoundingBoxes(inputImage: Tensor): Promise<{
|
||||
boxes: {
|
||||
box: {
|
||||
startPoint: Tensor;
|
||||
|
@ -20,4 +20,4 @@ export declare class BlazeFaceModel {
|
|||
scaleFactor: number[];
|
||||
} | null>;
|
||||
}
|
||||
export declare function load(config: any): Promise<BlazeFaceModel>;
|
||||
export declare function load(config: Config): Promise<BlazeFaceModel>;
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
/**
|
||||
* FaceMesh & BlazeFace Module entry point
|
||||
*/
|
||||
import { Tensor } from '../tfjs/types';
|
||||
import { Face } from '../result';
|
||||
export declare function predict(input: any, config: any): Promise<Face[]>;
|
||||
import { Config } from '../config';
|
||||
export declare function predict(input: Tensor, config: Config): Promise<Face[]>;
|
||||
export declare function load(config: any): Promise<[unknown, unknown, unknown]>;
|
||||
export declare const triangulation: number[];
|
||||
export declare const uvmap: number[][];
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
/**
|
||||
* BlazePose Module
|
||||
*/
|
||||
import { Tensor, GraphModel } from '../tfjs/types';
|
||||
import { Body } from '../result';
|
||||
export declare function load(config: any): Promise<any>;
|
||||
export declare function predict(image: any, config: any): Promise<Body[]>;
|
||||
import { Config } from '../config';
|
||||
export declare function load(config: Config): Promise<GraphModel>;
|
||||
export declare function predict(image: Tensor, config: Config): Promise<Body[]>;
|
||||
|
|
|
@ -27,6 +27,11 @@ export interface Config {
|
|||
* - set to 0 to disable caching
|
||||
*/
|
||||
cacheSensitivity: number;
|
||||
/** Cache sensitivity
|
||||
* - values 0..1 where 0.01 means reset cache if input changed more than 1%
|
||||
* - set to 0 to disable caching
|
||||
*/
|
||||
skipFrame: boolean;
|
||||
/** Run input through image filters before inference
|
||||
* - image filters run with near-zero latency as they are executed on the GPU
|
||||
*/
|
||||
|
@ -133,6 +138,7 @@ export interface Config {
|
|||
modelPath: string;
|
||||
maxDetected: number;
|
||||
minConfidence: number;
|
||||
skipFrames: number;
|
||||
};
|
||||
/** Controlls and configures all hand detection specific options
|
||||
* - enabled: true/false
|
||||
|
|
|
@ -2,5 +2,7 @@
|
|||
* EfficientPose Module
|
||||
*/
|
||||
import { Body } from '../result';
|
||||
export declare function load(config: any): Promise<any>;
|
||||
export declare function predict(image: any, config: any): Promise<Body[]>;
|
||||
import { GraphModel, Tensor } from '../tfjs/types';
|
||||
import { Config } from '../config';
|
||||
export declare function load(config: Config): Promise<GraphModel>;
|
||||
export declare function predict(image: Tensor, config: Config): Promise<Body[]>;
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
/**
|
||||
* Emotion Module
|
||||
*/
|
||||
export declare function load(config: any): Promise<any>;
|
||||
export declare function predict(image: any, config: any, idx: any, count: any): Promise<unknown>;
|
||||
import { Config } from '../config';
|
||||
import { Tensor, GraphModel } from '../tfjs/types';
|
||||
export declare function load(config: Config): Promise<GraphModel>;
|
||||
export declare function predict(image: Tensor, config: Config, idx: any, count: any): Promise<unknown>;
|
||||
|
|
|
@ -3,14 +3,15 @@
|
|||
* Returns Age, Gender, Descriptor
|
||||
* Implements Face simmilarity function
|
||||
*/
|
||||
import { Tensor } from '../tfjs/types';
|
||||
import { Tensor, GraphModel } from '../tfjs/types';
|
||||
import { Config } from '../config';
|
||||
declare type DB = Array<{
|
||||
name: string;
|
||||
source: string;
|
||||
embedding: number[];
|
||||
}>;
|
||||
export declare function load(config: any): Promise<any>;
|
||||
export declare function similarity(embedding1: any, embedding2: any, order?: number): number;
|
||||
export declare function load(config: Config): Promise<GraphModel>;
|
||||
export declare function similarity(embedding1: Array<number>, embedding2: Array<number>, order?: number): number;
|
||||
export declare function match(embedding: Array<number>, db: DB, threshold?: number): {
|
||||
similarity: number;
|
||||
name: string;
|
||||
|
@ -18,5 +19,5 @@ export declare function match(embedding: Array<number>, db: DB, threshold?: numb
|
|||
embedding: number[];
|
||||
};
|
||||
export declare function enhance(input: any): Tensor;
|
||||
export declare function predict(image: any, config: any, idx: any, count: any): Promise<unknown>;
|
||||
export declare function predict(image: Tensor, config: Config, idx: any, count: any): Promise<unknown>;
|
||||
export {};
|
||||
|
|
|
@ -2,5 +2,7 @@
|
|||
* HandPose module entry point
|
||||
*/
|
||||
import { Hand } from '../result';
|
||||
export declare function predict(input: any, config: any): Promise<Hand[]>;
|
||||
export declare function load(config: any): Promise<[unknown, unknown]>;
|
||||
import { Tensor } from '../tfjs/types';
|
||||
import { Config } from '../config';
|
||||
export declare function predict(input: Tensor, config: Config): Promise<Hand[]>;
|
||||
export declare function load(config: Config): Promise<[unknown, unknown]>;
|
||||
|
|
|
@ -122,7 +122,7 @@ export declare class Human {
|
|||
agent: string;
|
||||
};
|
||||
/** Performance object that contains values for all recently performed operations */
|
||||
performance: Record<string, unknown>;
|
||||
performance: Record<string, number>;
|
||||
/**
|
||||
* Creates instance of Human library that is futher used for all operations
|
||||
* @param userConfig: {@link Config}
|
||||
|
|
|
@ -2,5 +2,7 @@
|
|||
* EfficientPose Module
|
||||
*/
|
||||
import { Body } from '../result';
|
||||
export declare function load(config: any): Promise<any>;
|
||||
export declare function predict(image: any, config: any): Promise<Body[]>;
|
||||
import { GraphModel, Tensor } from '../tfjs/types';
|
||||
import { Config } from '../config';
|
||||
export declare function load(config: Config): Promise<GraphModel>;
|
||||
export declare function predict(image: Tensor, config: Config): Promise<Body[]>;
|
||||
|
|
|
@ -2,5 +2,7 @@
|
|||
* CenterNet object detection module
|
||||
*/
|
||||
import { Item } from '../result';
|
||||
export declare function load(config: any): Promise<any>;
|
||||
export declare function predict(input: any, config: any): Promise<Item[]>;
|
||||
import { GraphModel, Tensor } from '../tfjs/types';
|
||||
import { Config } from '../config';
|
||||
export declare function load(config: Config): Promise<GraphModel>;
|
||||
export declare function predict(input: Tensor, config: Config): Promise<Item[]>;
|
||||
|
|
|
@ -2,5 +2,7 @@
|
|||
* NanoDet object detection module
|
||||
*/
|
||||
import { Item } from '../result';
|
||||
export declare function load(config: any): Promise<any>;
|
||||
export declare function predict(image: any, config: any): Promise<Item[]>;
|
||||
import { GraphModel, Tensor } from '../tfjs/types';
|
||||
import { Config } from '../config';
|
||||
export declare function load(config: Config): Promise<GraphModel>;
|
||||
export declare function predict(image: Tensor, config: Config): Promise<Item[]>;
|
||||
|
|
|
@ -2,5 +2,7 @@
|
|||
* PoseNet module entry point
|
||||
*/
|
||||
import { Body } from '../result';
|
||||
export declare function predict(input: any, config: any): Promise<Body[]>;
|
||||
export declare function load(config: any): Promise<any>;
|
||||
import { Tensor, GraphModel } from '../tfjs/types';
|
||||
import { Config } from '../config';
|
||||
export declare function predict(input: Tensor, config: Config): Promise<Body[]>;
|
||||
export declare function load(config: Config): Promise<GraphModel>;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/**
|
||||
* Type definitions for Human result object
|
||||
*/
|
||||
import { Tensor } from '../dist/tfjs.esm.js';
|
||||
import { Tensor } from './tfjs/types';
|
||||
/** Face results
|
||||
* Combined results of face detector, face mesh, age, gender, emotion, embedding, iris models
|
||||
* Some values may be null if specific model is not enabled
|
||||
|
@ -60,8 +60,8 @@ export interface Face {
|
|||
strength: number;
|
||||
};
|
||||
};
|
||||
image?: typeof Tensor;
|
||||
tensor: typeof Tensor;
|
||||
image?: Tensor;
|
||||
tensor: Tensor;
|
||||
}
|
||||
/** Body results
|
||||
*
|
||||
|
|
2
wiki
2
wiki
|
@ -1 +1 @@
|
|||
Subproject commit 7910d0735849c1fef131ca71969b8a6a080772da
|
||||
Subproject commit 0087af5684c5722b2cf7ffd3db57b8117b7ac8c5
|
Loading…
Reference in New Issue