mirror of https://github.com/vladmandic/human
fix multiple memory leaks
parent
8862bb1277
commit
e9f4145b60
|
@ -13,7 +13,7 @@
|
|||
"locations": ["dist/*", "types/*", "typedoc/*"]
|
||||
},
|
||||
"lint": {
|
||||
"locations": [ "src/**/*.ts", "test/*.js", "demo/**/*.js" ],
|
||||
"locations": [ "*.json", "src/**/*.ts", "test/**/*.js", "demo/**/*.js" ],
|
||||
"rules": { }
|
||||
},
|
||||
"changelog": {
|
||||
|
@ -133,7 +133,7 @@
|
|||
]
|
||||
},
|
||||
"watch": {
|
||||
"locations": [ "src/**", "tfjs/*" ]
|
||||
"locations": [ "src/**/*", "tfjs/**/*" ]
|
||||
},
|
||||
"typescript": {
|
||||
"allowJs": false
|
||||
|
|
|
@ -238,16 +238,16 @@ function getBoxCenter(box6) {
|
|||
box6.startPoint[1] + (box6.endPoint[1] - box6.startPoint[1]) / 2
|
||||
];
|
||||
}
|
||||
function cutBoxFromImageAndResize(box6, image19, cropSize) {
|
||||
const h = image19.shape[1];
|
||||
const w = image19.shape[2];
|
||||
function cutBoxFromImageAndResize(box6, image20, cropSize) {
|
||||
const h = image20.shape[1];
|
||||
const w = image20.shape[2];
|
||||
const boxes = [[
|
||||
box6.startPoint[1] / h,
|
||||
box6.startPoint[0] / w,
|
||||
box6.endPoint[1] / h,
|
||||
box6.endPoint[0] / w
|
||||
]];
|
||||
return tfjs_esm_exports.image.cropAndResize(image19, boxes, [0], cropSize);
|
||||
return tfjs_esm_exports.image.cropAndResize(image20, boxes, [0], cropSize);
|
||||
}
|
||||
function enlargeBox(box6, factor = 1.5) {
|
||||
const center = getBoxCenter(box6);
|
||||
|
@ -391,7 +391,7 @@ var BlazeFaceModel = class {
|
|||
async getBoundingBoxes(inputImage, userConfig) {
|
||||
var _a, _b, _c, _d;
|
||||
if (!inputImage || inputImage["isDisposedInternal"] || inputImage.shape.length !== 4 || inputImage.shape[1] < 1 || inputImage.shape[2] < 1)
|
||||
return null;
|
||||
return { boxes: [] };
|
||||
const [batch, boxes, scores] = tfjs_esm_exports.tidy(() => {
|
||||
const resizedImage = tfjs_esm_exports.image.resizeBilinear(inputImage, [this.inputSize, this.inputSize]);
|
||||
const normalizedImage = tfjs_esm_exports.sub(tfjs_esm_exports.div(resizedImage, 127.5), 0.5);
|
||||
|
@ -421,11 +421,9 @@ var BlazeFaceModel = class {
|
|||
const confidence = scoresData[nms[i]];
|
||||
if (confidence > (((_d = this.config.face.detector) == null ? void 0 : _d.minConfidence) || 0)) {
|
||||
const boundingBox = tfjs_esm_exports.slice(boxes, [nms[i], 0], [1, -1]);
|
||||
const localBox = createBox(boundingBox);
|
||||
tfjs_esm_exports.dispose(boundingBox);
|
||||
const anchor = this.anchorsData[nms[i]];
|
||||
const landmarks = tfjs_esm_exports.tidy(() => tfjs_esm_exports.reshape(tfjs_esm_exports.squeeze(tfjs_esm_exports.slice(batch, [nms[i], keypointsCount - 1], [1, -1])), [keypointsCount, -1]));
|
||||
annotatedBoxes.push({ box: localBox, landmarks, anchor, confidence });
|
||||
annotatedBoxes.push({ box: createBox(boundingBox), landmarks, anchor: this.anchorsData[nms[i]], confidence });
|
||||
tfjs_esm_exports.dispose(boundingBox);
|
||||
}
|
||||
}
|
||||
tfjs_esm_exports.dispose(batch);
|
||||
|
@ -3913,12 +3911,12 @@ var Pipeline = class {
|
|||
const angle = computeRotation(box6.landmarks[indexOfMouth], box6.landmarks[indexOfForehead]);
|
||||
const faceCenter = getBoxCenter({ startPoint: box6.startPoint, endPoint: box6.endPoint });
|
||||
const faceCenterNormalized = [faceCenter[0] / input.shape[2], faceCenter[1] / input.shape[1]];
|
||||
const rotatedImage = tfjs_esm_exports.image.rotateWithOffset(input, angle, 0, faceCenterNormalized);
|
||||
const rotated = tfjs_esm_exports.image.rotateWithOffset(input, angle, 0, faceCenterNormalized);
|
||||
const rotationMatrix = buildRotationMatrix(-angle, faceCenter);
|
||||
const cut = config3.face.mesh.enabled ? cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, rotatedImage, [this.meshSize, this.meshSize]) : cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, rotatedImage, [this.boxSize, this.boxSize]);
|
||||
const cut = config3.face.mesh.enabled ? cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, rotated, [this.meshSize, this.meshSize]) : cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, rotated, [this.boxSize, this.boxSize]);
|
||||
const face5 = tfjs_esm_exports.div(cut, 255);
|
||||
tfjs_esm_exports.dispose(cut);
|
||||
tfjs_esm_exports.dispose(rotatedImage);
|
||||
tfjs_esm_exports.dispose(rotated);
|
||||
return [angle, rotationMatrix, face5];
|
||||
}
|
||||
async augmentIris(rawCoords, face5) {
|
||||
|
@ -4002,11 +4000,11 @@ var Pipeline = class {
|
|||
[angle, rotationMatrix, face5] = this.correctFaceRotation(config3, box6, input);
|
||||
} else {
|
||||
rotationMatrix = IDENTITY_MATRIX;
|
||||
const clonedImage = input.clone();
|
||||
const cut = config3.face.mesh.enabled ? cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, clonedImage, [this.meshSize, this.meshSize]) : cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, clonedImage, [this.boxSize, this.boxSize]);
|
||||
const cloned = input.clone();
|
||||
const cut = config3.face.mesh.enabled ? cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, cloned, [this.meshSize, this.meshSize]) : cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, cloned, [this.boxSize, this.boxSize]);
|
||||
face5 = tfjs_esm_exports.div(cut, 255);
|
||||
tfjs_esm_exports.dispose(cut);
|
||||
tfjs_esm_exports.dispose(clonedImage);
|
||||
tfjs_esm_exports.dispose(cloned);
|
||||
}
|
||||
if (!config3.face.mesh.enabled) {
|
||||
results.push({
|
||||
|
@ -4035,6 +4033,7 @@ var Pipeline = class {
|
|||
const mesh = this.transformRawCoords(rawCoords, box6, angle, rotationMatrix);
|
||||
box6 = { ...enlargeBox(calculateLandmarksBoundingBox(mesh), 1.5), confidence: box6.confidence };
|
||||
if (config3.face.detector.rotation && config3.face.mesh.enabled && config3.face.description.enabled && env2.kernels.includes("rotatewithoffset")) {
|
||||
tfjs_esm_exports.dispose(face5);
|
||||
[angle, rotationMatrix, face5] = this.correctFaceRotation(config3, box6, input);
|
||||
}
|
||||
results.push({
|
||||
|
@ -4101,8 +4100,6 @@ async function predict(input, config3) {
|
|||
annotations: annotations3,
|
||||
tensor: prediction.image
|
||||
});
|
||||
if (prediction.coords)
|
||||
tfjs_esm_exports.dispose(prediction.coords);
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
@ -4182,20 +4179,20 @@ function match(embedding, db, threshold = 0) {
|
|||
return best;
|
||||
}
|
||||
function enhance(input) {
|
||||
const image19 = tfjs_esm_exports.tidy(() => {
|
||||
const tensor2 = input.image || input.tensor || input;
|
||||
if (!(tensor2 instanceof tfjs_esm_exports.Tensor))
|
||||
const image20 = tfjs_esm_exports.tidy(() => {
|
||||
const tensor3 = input.image || input.tensor || input;
|
||||
if (!(tensor3 instanceof tfjs_esm_exports.Tensor))
|
||||
return null;
|
||||
const box6 = [[0.05, 0.15, 0.85, 0.85]];
|
||||
if (!model.inputs[0].shape)
|
||||
return null;
|
||||
const crop = tensor2.shape.length === 3 ? tfjs_esm_exports.image.cropAndResize(tfjs_esm_exports.expandDims(tensor2, 0), box6, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]) : tfjs_esm_exports.image.cropAndResize(tensor2, box6, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]);
|
||||
const crop = tensor3.shape.length === 3 ? tfjs_esm_exports.image.cropAndResize(tfjs_esm_exports.expandDims(tensor3, 0), box6, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]) : tfjs_esm_exports.image.cropAndResize(tensor3, box6, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]);
|
||||
const norm = tfjs_esm_exports.mul(crop, 255);
|
||||
return norm;
|
||||
});
|
||||
return image19;
|
||||
return image20;
|
||||
}
|
||||
async function predict2(image19, config3, idx, count2) {
|
||||
async function predict2(image20, config3, idx, count2) {
|
||||
var _a, _b, _c;
|
||||
if (!model)
|
||||
return null;
|
||||
|
@ -4206,7 +4203,7 @@ async function predict2(image19, config3, idx, count2) {
|
|||
skipped = 0;
|
||||
return new Promise(async (resolve) => {
|
||||
var _a2, _b2;
|
||||
const enhanced = enhance(image19);
|
||||
const enhanced = enhance(image20);
|
||||
let resT;
|
||||
const obj = {
|
||||
age: 0,
|
||||
|
@ -4226,6 +4223,7 @@ async function predict2(image19, config3, idx, count2) {
|
|||
}
|
||||
const argmax = tfjs_esm_exports.argMax(resT.find((t) => t.shape[1] === 100), 1);
|
||||
const age = (await argmax.data())[0];
|
||||
tfjs_esm_exports.dispose(argmax);
|
||||
const all2 = await resT.find((t) => t.shape[1] === 100).data();
|
||||
obj.age = Math.round(all2[age - 1] > all2[age + 1] ? 10 * age - 100 * all2[age - 1] : 10 * age + 100 * all2[age + 1]) / 10;
|
||||
const desc = resT.find((t) => t.shape[1] === 1024);
|
||||
|
@ -4258,7 +4256,7 @@ async function load4(config3) {
|
|||
log("cached model:", model2.modelUrl);
|
||||
return model2;
|
||||
}
|
||||
async function predict3(image19, config3, idx, count2) {
|
||||
async function predict3(image20, config3, idx, count2) {
|
||||
var _a;
|
||||
if (!model2)
|
||||
return null;
|
||||
|
@ -4269,7 +4267,7 @@ async function predict3(image19, config3, idx, count2) {
|
|||
skipped2 = 0;
|
||||
return new Promise(async (resolve) => {
|
||||
var _a2, _b;
|
||||
const resize = tfjs_esm_exports.image.resizeBilinear(image19, [model2.inputs[0].shape[2], model2.inputs[0].shape[1]], false);
|
||||
const resize = tfjs_esm_exports.image.resizeBilinear(image20, [model2.inputs[0].shape[2], model2.inputs[0].shape[1]], false);
|
||||
const [red, green, blue] = tfjs_esm_exports.split(resize, 3, 3);
|
||||
tfjs_esm_exports.dispose(resize);
|
||||
const redNorm = tfjs_esm_exports.mul(red, rgb[0]);
|
||||
|
@ -4622,7 +4620,7 @@ async function predict4(input, config3) {
|
|||
results3d[1] = results3d[1].sigmoid();
|
||||
return results3d;
|
||||
});
|
||||
const buffers = await Promise.all(res.map((tensor2) => tensor2.buffer()));
|
||||
const buffers = await Promise.all(res.map((tensor3) => tensor3.buffer()));
|
||||
for (const t of res)
|
||||
tfjs_esm_exports.dispose(t);
|
||||
const decoded = await decode(buffers[0], buffers[1], buffers[2], buffers[3], config3.body.maxDetected, config3.body.minConfidence);
|
||||
|
@ -4656,16 +4654,16 @@ function getBoxCenter2(box6) {
|
|||
box6.startPoint[1] + (box6.endPoint[1] - box6.startPoint[1]) / 2
|
||||
];
|
||||
}
|
||||
function cutBoxFromImageAndResize2(box6, image19, cropSize) {
|
||||
const h = image19.shape[1];
|
||||
const w = image19.shape[2];
|
||||
function cutBoxFromImageAndResize2(box6, image20, cropSize) {
|
||||
const h = image20.shape[1];
|
||||
const w = image20.shape[2];
|
||||
const boxes = [[
|
||||
box6.startPoint[1] / h,
|
||||
box6.startPoint[0] / w,
|
||||
box6.endPoint[1] / h,
|
||||
box6.endPoint[0] / w
|
||||
]];
|
||||
return tfjs_esm_exports.image.cropAndResize(image19, boxes, [0], cropSize);
|
||||
return tfjs_esm_exports.image.cropAndResize(image20, boxes, [0], cropSize);
|
||||
}
|
||||
function scaleBoxCoordinates2(box6, factor) {
|
||||
const startPoint = [box6.startPoint[0] * factor[0], box6.startPoint[1] * factor[1]];
|
||||
|
@ -7685,16 +7683,16 @@ var HandDetector = class {
|
|||
const palmLandmarks = tfjs_esm_exports.tidy(() => tfjs_esm_exports.reshape(this.normalizeLandmarks(tfjs_esm_exports.slice(t.predictions, [index, 5], [1, 14]), index), [-1, 2]));
|
||||
hands.push({ box: palmBox, palmLandmarks, confidence: scores[index] });
|
||||
}
|
||||
for (const tensor2 of Object.keys(t))
|
||||
tfjs_esm_exports.dispose(t[tensor2]);
|
||||
for (const tensor3 of Object.keys(t))
|
||||
tfjs_esm_exports.dispose(t[tensor3]);
|
||||
return hands;
|
||||
}
|
||||
async estimateHandBounds(input, config3) {
|
||||
const inputHeight = input.shape[1];
|
||||
const inputWidth = input.shape[2];
|
||||
const image19 = tfjs_esm_exports.tidy(() => tfjs_esm_exports.sub(tfjs_esm_exports.div(tfjs_esm_exports.image.resizeBilinear(input, [this.inputSize, this.inputSize]), 127.5), 1));
|
||||
const predictions = await this.getBoxes(image19, config3);
|
||||
tfjs_esm_exports.dispose(image19);
|
||||
const image20 = tfjs_esm_exports.tidy(() => tfjs_esm_exports.sub(tfjs_esm_exports.div(tfjs_esm_exports.image.resizeBilinear(input, [this.inputSize, this.inputSize]), 127.5), 1));
|
||||
const predictions = await this.getBoxes(image20, config3);
|
||||
tfjs_esm_exports.dispose(image20);
|
||||
const hands = [];
|
||||
if (!predictions || predictions.length === 0)
|
||||
return hands;
|
||||
|
@ -7836,11 +7834,11 @@ var HandPipeline = class {
|
|||
Math.trunc(coord[2])
|
||||
]);
|
||||
}
|
||||
async estimateHands(image19, config3) {
|
||||
async estimateHands(image20, config3) {
|
||||
let useFreshBox = false;
|
||||
let boxes;
|
||||
if (this.skipped === 0 || this.skipped > config3.hand.skipFrames || !config3.hand.landmarks || !config3.skipFrame) {
|
||||
boxes = await this.handDetector.estimateHandBounds(image19, config3);
|
||||
boxes = await this.handDetector.estimateHandBounds(image20, config3);
|
||||
this.skipped = 0;
|
||||
}
|
||||
if (config3.skipFrame)
|
||||
|
@ -7859,8 +7857,8 @@ var HandPipeline = class {
|
|||
if (config3.hand.landmarks) {
|
||||
const angle = config3.hand.rotation ? computeRotation2(currentBox.palmLandmarks[palmLandmarksPalmBase], currentBox.palmLandmarks[palmLandmarksMiddleFingerBase]) : 0;
|
||||
const palmCenter = getBoxCenter2(currentBox);
|
||||
const palmCenterNormalized = [palmCenter[0] / image19.shape[2], palmCenter[1] / image19.shape[1]];
|
||||
const rotatedImage = config3.hand.rotation && env2.kernels.includes("rotatewithoffset") ? tfjs_esm_exports.image.rotateWithOffset(image19, angle, 0, palmCenterNormalized) : image19.clone();
|
||||
const palmCenterNormalized = [palmCenter[0] / image20.shape[2], palmCenter[1] / image20.shape[1]];
|
||||
const rotatedImage = config3.hand.rotation && env2.kernels.includes("rotatewithoffset") ? tfjs_esm_exports.image.rotateWithOffset(image20, angle, 0, palmCenterNormalized) : image20.clone();
|
||||
const rotationMatrix = buildRotationMatrix2(-angle, palmCenter);
|
||||
const newBox = useFreshBox ? this.getBoxForPalmLandmarks(currentBox.palmLandmarks, rotationMatrix) : currentBox;
|
||||
const croppedInput = cutBoxFromImageAndResize2(newBox, rotatedImage, [this.inputSize, this.inputSize]);
|
||||
|
@ -8462,13 +8460,13 @@ async function load7(config3) {
|
|||
log("cached model:", model4["modelUrl"]);
|
||||
return model4;
|
||||
}
|
||||
async function predict6(image19, config3) {
|
||||
async function predict6(image20, config3) {
|
||||
if (!model4)
|
||||
return [];
|
||||
if (!config3.body.enabled)
|
||||
return [];
|
||||
const imgSize = { width: image19.shape[2] || 0, height: image19.shape[1] || 0 };
|
||||
const resize = tfjs_esm_exports.image.resizeBilinear(image19, [model4["width"], model4["height"]], false);
|
||||
const imgSize = { width: image20.shape[2] || 0, height: image20.shape[1] || 0 };
|
||||
const resize = tfjs_esm_exports.image.resizeBilinear(image20, [model4["width"], model4["height"]], false);
|
||||
const normalize = tfjs_esm_exports.div(resize, [255]);
|
||||
tfjs_esm_exports.dispose(resize);
|
||||
const resT = await model4.predict(normalize);
|
||||
|
@ -8544,7 +8542,7 @@ function max2d(inputs, minScore) {
|
|||
return [0, 0, newScore];
|
||||
});
|
||||
}
|
||||
async function predict7(image19, config3) {
|
||||
async function predict7(image20, config3) {
|
||||
var _a;
|
||||
if (skipped3 < (((_a = config3.body) == null ? void 0 : _a.skipFrames) || 0) && config3.skipFrame && Object.keys(keypoints).length > 0) {
|
||||
skipped3++;
|
||||
|
@ -8553,26 +8551,26 @@ async function predict7(image19, config3) {
|
|||
skipped3 = 0;
|
||||
return new Promise(async (resolve) => {
|
||||
var _a2;
|
||||
const tensor2 = tfjs_esm_exports.tidy(() => {
|
||||
const tensor3 = tfjs_esm_exports.tidy(() => {
|
||||
if (!model5.inputs[0].shape)
|
||||
return null;
|
||||
const resize = tfjs_esm_exports.image.resizeBilinear(image19, [model5.inputs[0].shape[2], model5.inputs[0].shape[1]], false);
|
||||
const resize = tfjs_esm_exports.image.resizeBilinear(image20, [model5.inputs[0].shape[2], model5.inputs[0].shape[1]], false);
|
||||
const enhance2 = tfjs_esm_exports.mul(resize, 2);
|
||||
const norm = enhance2.sub(1);
|
||||
return norm;
|
||||
});
|
||||
let resT;
|
||||
if (config3.body.enabled)
|
||||
resT = await model5.predict(tensor2);
|
||||
tfjs_esm_exports.dispose(tensor2);
|
||||
resT = await model5.predict(tensor3);
|
||||
tfjs_esm_exports.dispose(tensor3);
|
||||
if (resT) {
|
||||
keypoints.length = 0;
|
||||
const squeeze7 = resT.squeeze();
|
||||
tfjs_esm_exports.dispose(resT);
|
||||
const stack2 = squeeze7.unstack(2);
|
||||
const stack3 = squeeze7.unstack(2);
|
||||
tfjs_esm_exports.dispose(squeeze7);
|
||||
for (let id = 0; id < stack2.length; id++) {
|
||||
const [x2, y2, partScore] = max2d(stack2[id], config3.body.minConfidence);
|
||||
for (let id = 0; id < stack3.length; id++) {
|
||||
const [x2, y2, partScore] = max2d(stack3[id], config3.body.minConfidence);
|
||||
if (score > (((_a2 = config3.body) == null ? void 0 : _a2.minConfidence) || 0)) {
|
||||
keypoints.push({
|
||||
score: Math.round(100 * partScore) / 100,
|
||||
|
@ -8582,13 +8580,13 @@ async function predict7(image19, config3) {
|
|||
y2 / model5.inputs[0].shape[1]
|
||||
],
|
||||
position: [
|
||||
Math.round(image19.shape[2] * x2 / model5.inputs[0].shape[2]),
|
||||
Math.round(image19.shape[1] * y2 / model5.inputs[0].shape[1])
|
||||
Math.round(image20.shape[2] * x2 / model5.inputs[0].shape[2]),
|
||||
Math.round(image20.shape[1] * y2 / model5.inputs[0].shape[1])
|
||||
]
|
||||
});
|
||||
}
|
||||
}
|
||||
stack2.forEach((s) => tfjs_esm_exports.dispose(s));
|
||||
stack3.forEach((s) => tfjs_esm_exports.dispose(s));
|
||||
}
|
||||
score = keypoints.reduce((prev, curr) => curr.score > prev ? curr.score : prev, 0);
|
||||
const x = keypoints.map((a) => a.position[0]);
|
||||
|
@ -8630,7 +8628,7 @@ async function load9(config3) {
|
|||
log("cached model:", model6["modelUrl"]);
|
||||
return model6;
|
||||
}
|
||||
async function parseSinglePose(res, config3, image19) {
|
||||
async function parseSinglePose(res, config3, image20) {
|
||||
keypoints2.length = 0;
|
||||
const kpt3 = res[0][0];
|
||||
for (let id = 0; id < kpt3.length; id++) {
|
||||
|
@ -8644,8 +8642,8 @@ async function parseSinglePose(res, config3, image19) {
|
|||
kpt3[id][0]
|
||||
],
|
||||
position: [
|
||||
Math.round((image19.shape[2] || 0) * kpt3[id][1]),
|
||||
Math.round((image19.shape[1] || 0) * kpt3[id][0])
|
||||
Math.round((image20.shape[2] || 0) * kpt3[id][1]),
|
||||
Math.round((image20.shape[1] || 0) * kpt3[id][0])
|
||||
]
|
||||
});
|
||||
}
|
||||
|
@ -8671,7 +8669,7 @@ async function parseSinglePose(res, config3, image19) {
|
|||
persons2.push({ id: 0, score: score2, box: box5, boxRaw: boxRaw2, keypoints: keypoints2 });
|
||||
return persons2;
|
||||
}
|
||||
async function parseMultiPose(res, config3, image19) {
|
||||
async function parseMultiPose(res, config3, image20) {
|
||||
const persons2 = [];
|
||||
for (let p = 0; p < res[0].length; p++) {
|
||||
const kpt3 = res[0][p];
|
||||
|
@ -8690,8 +8688,8 @@ async function parseMultiPose(res, config3, image19) {
|
|||
kpt3[3 * i + 0]
|
||||
],
|
||||
position: [
|
||||
Math.trunc(kpt3[3 * i + 1] * (image19.shape[2] || 0)),
|
||||
Math.trunc(kpt3[3 * i + 0] * (image19.shape[1] || 0))
|
||||
Math.trunc(kpt3[3 * i + 1] * (image20.shape[2] || 0)),
|
||||
Math.trunc(kpt3[3 * i + 0] * (image20.shape[1] || 0))
|
||||
]
|
||||
});
|
||||
}
|
||||
|
@ -8702,45 +8700,45 @@ async function parseMultiPose(res, config3, image19) {
|
|||
score: score2,
|
||||
boxRaw: boxRaw2,
|
||||
box: [
|
||||
Math.trunc(boxRaw2[0] * (image19.shape[2] || 0)),
|
||||
Math.trunc(boxRaw2[1] * (image19.shape[1] || 0)),
|
||||
Math.trunc(boxRaw2[2] * (image19.shape[2] || 0)),
|
||||
Math.trunc(boxRaw2[3] * (image19.shape[1] || 0))
|
||||
Math.trunc(boxRaw2[0] * (image20.shape[2] || 0)),
|
||||
Math.trunc(boxRaw2[1] * (image20.shape[1] || 0)),
|
||||
Math.trunc(boxRaw2[2] * (image20.shape[2] || 0)),
|
||||
Math.trunc(boxRaw2[3] * (image20.shape[1] || 0))
|
||||
],
|
||||
keypoints: keypoints2
|
||||
});
|
||||
}
|
||||
return persons2;
|
||||
}
|
||||
async function predict8(image19, config3) {
|
||||
async function predict8(image20, config3) {
|
||||
if (skipped4 < (config3.body.skipFrames || 0) && config3.skipFrame && Object.keys(keypoints2).length > 0) {
|
||||
skipped4++;
|
||||
return [{ id: 0, score: score2, box: box5, boxRaw: boxRaw2, keypoints: keypoints2 }];
|
||||
}
|
||||
skipped4 = 0;
|
||||
return new Promise(async (resolve) => {
|
||||
const tensor2 = tfjs_esm_exports.tidy(() => {
|
||||
const tensor3 = tfjs_esm_exports.tidy(() => {
|
||||
if (!model6.inputs[0].shape)
|
||||
return null;
|
||||
let inputSize = model6.inputs[0].shape[2];
|
||||
if (inputSize === -1)
|
||||
inputSize = 256;
|
||||
const resize = tfjs_esm_exports.image.resizeBilinear(image19, [inputSize, inputSize], false);
|
||||
const resize = tfjs_esm_exports.image.resizeBilinear(image20, [inputSize, inputSize], false);
|
||||
const cast4 = tfjs_esm_exports.cast(resize, "int32");
|
||||
return cast4;
|
||||
});
|
||||
let resT;
|
||||
if (config3.body.enabled)
|
||||
resT = await model6.predict(tensor2);
|
||||
tfjs_esm_exports.dispose(tensor2);
|
||||
resT = await model6.predict(tensor3);
|
||||
tfjs_esm_exports.dispose(tensor3);
|
||||
if (!resT)
|
||||
resolve([]);
|
||||
const res = await resT.array();
|
||||
let persons2;
|
||||
if (resT.shape[2] === 17)
|
||||
persons2 = await parseSinglePose(res, config3, image19);
|
||||
persons2 = await parseSinglePose(res, config3, image20);
|
||||
else if (resT.shape[2] === 56)
|
||||
persons2 = await parseMultiPose(res, config3, image19);
|
||||
persons2 = await parseMultiPose(res, config3, image20);
|
||||
tfjs_esm_exports.dispose(resT);
|
||||
resolve(persons2);
|
||||
});
|
||||
|
@ -8911,7 +8909,7 @@ async function process2(res, inputSize, outputShape, config3) {
|
|||
results = results.filter((_val, idx) => nmsIdx.includes(idx)).sort((a, b) => b.score - a.score);
|
||||
return results;
|
||||
}
|
||||
async function predict9(image19, config3) {
|
||||
async function predict9(image20, config3) {
|
||||
if (skipped5 < (config3.object.skipFrames || 0) && config3.skipFrame && last3.length > 0) {
|
||||
skipped5++;
|
||||
return last3;
|
||||
|
@ -8920,8 +8918,8 @@ async function predict9(image19, config3) {
|
|||
if (!env2.kernels.includes("mod") || !env2.kernels.includes("sparsetodense"))
|
||||
return last3;
|
||||
return new Promise(async (resolve) => {
|
||||
const outputSize = [image19.shape[2], image19.shape[1]];
|
||||
const resize = tfjs_esm_exports.image.resizeBilinear(image19, [model7.inputSize, model7.inputSize], false);
|
||||
const outputSize = [image20.shape[2], image20.shape[1]];
|
||||
const resize = tfjs_esm_exports.image.resizeBilinear(image20, [model7.inputSize, model7.inputSize], false);
|
||||
const norm = tfjs_esm_exports.div(resize, 255);
|
||||
const transpose = norm.transpose([0, 3, 1, 2]);
|
||||
tfjs_esm_exports.dispose(norm);
|
||||
|
@ -8966,6 +8964,7 @@ async function process3(res, inputSize, outputShape, config3) {
|
|||
tfjs_esm_exports.dispose(squeezeT);
|
||||
const stackT = tfjs_esm_exports.stack([arr[1], arr[0], arr[3], arr[2]], 1);
|
||||
const boxesT = tfjs_esm_exports.squeeze(stackT);
|
||||
tfjs_esm_exports.dispose(stackT);
|
||||
const scoresT = tfjs_esm_exports.squeeze(arr[4]);
|
||||
const classesT = tfjs_esm_exports.squeeze(arr[5]);
|
||||
arr.forEach((t) => tfjs_esm_exports.dispose(t));
|
||||
|
@ -9169,8 +9168,8 @@ function GLImageFilter(params) {
|
|||
gl.uniform1f(_currentProgram.uniform.flipY, flipY ? -1 : 1);
|
||||
gl.drawArrays(gl.TRIANGLES, 0, 6);
|
||||
};
|
||||
this.apply = function(image19) {
|
||||
_resize(image19.width, image19.height);
|
||||
this.apply = function(image20) {
|
||||
_resize(image20.width, image20.height);
|
||||
_drawCount = 0;
|
||||
if (!_sourceTexture)
|
||||
_sourceTexture = gl.createTexture();
|
||||
|
@ -9179,7 +9178,7 @@ function GLImageFilter(params) {
|
|||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
|
||||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
|
||||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
|
||||
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, image19);
|
||||
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, image20);
|
||||
if (_filterChain.length === 0) {
|
||||
_draw();
|
||||
return _canvas;
|
||||
|
@ -9730,16 +9729,33 @@ var maxSize = 2048;
|
|||
var inCanvas;
|
||||
var outCanvas;
|
||||
var fx;
|
||||
function canvas(width, height) {
|
||||
let c;
|
||||
if (env2.browser) {
|
||||
if (typeof OffscreenCanvas !== "undefined") {
|
||||
c = new OffscreenCanvas(width, height);
|
||||
} else {
|
||||
c = document.createElement("canvas");
|
||||
c.width = width;
|
||||
c.height = height;
|
||||
}
|
||||
} else {
|
||||
c = typeof env2.Canvas !== "undefined" ? new env2.Canvas(width, height) : null;
|
||||
}
|
||||
if (!c)
|
||||
throw new Error("Human: Cannot create canvas");
|
||||
return c;
|
||||
}
|
||||
function process4(input, config3) {
|
||||
let tensor2;
|
||||
let tensor3;
|
||||
if (!input)
|
||||
throw new Error("Human: Input is missing");
|
||||
if (!(input instanceof tfjs_esm_exports.Tensor) && !(typeof Image !== "undefined" && input instanceof Image) && !(typeof ImageData !== "undefined" && input instanceof ImageData) && !(typeof ImageBitmap !== "undefined" && input instanceof ImageBitmap) && !(typeof HTMLImageElement !== "undefined" && input instanceof HTMLImageElement) && !(typeof HTMLMediaElement !== "undefined" && input instanceof HTMLMediaElement) && !(typeof HTMLVideoElement !== "undefined" && input instanceof HTMLVideoElement) && !(typeof HTMLCanvasElement !== "undefined" && input instanceof HTMLCanvasElement) && !(typeof OffscreenCanvas !== "undefined" && input instanceof OffscreenCanvas)) {
|
||||
if (!(input instanceof tfjs_esm_exports.Tensor) && !(typeof Image !== "undefined" && input instanceof Image) && !(typeof env2.Canvas !== "undefined" && input instanceof env2.Canvas) && !(typeof ImageData !== "undefined" && input instanceof ImageData) && !(typeof ImageBitmap !== "undefined" && input instanceof ImageBitmap) && !(typeof HTMLImageElement !== "undefined" && input instanceof HTMLImageElement) && !(typeof HTMLMediaElement !== "undefined" && input instanceof HTMLMediaElement) && !(typeof HTMLVideoElement !== "undefined" && input instanceof HTMLVideoElement) && !(typeof HTMLCanvasElement !== "undefined" && input instanceof HTMLCanvasElement) && !(typeof OffscreenCanvas !== "undefined" && input instanceof OffscreenCanvas)) {
|
||||
throw new Error("Human: Input type is not recognized");
|
||||
}
|
||||
if (input instanceof tfjs_esm_exports.Tensor) {
|
||||
if (input.shape && input.shape.length === 4 && input.shape[0] === 1 && input.shape[3] === 3)
|
||||
tensor2 = tfjs_esm_exports.clone(input);
|
||||
tensor3 = tfjs_esm_exports.clone(input);
|
||||
else
|
||||
throw new Error(`Human: Input tensor shape must be [1, height, width, 3] and instead was ${input.shape}`);
|
||||
} else {
|
||||
|
@ -9767,15 +9783,10 @@ function process4(input, config3) {
|
|||
targetHeight = originalHeight * ((config3.filter.width || 0) / originalWidth);
|
||||
if (!targetWidth || !targetHeight)
|
||||
throw new Error("Human: Input cannot determine dimension");
|
||||
if (!inCanvas || (inCanvas == null ? void 0 : inCanvas.width) !== targetWidth || (inCanvas == null ? void 0 : inCanvas.height) !== targetHeight) {
|
||||
inCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement("canvas");
|
||||
if ((inCanvas == null ? void 0 : inCanvas.width) !== targetWidth)
|
||||
inCanvas.width = targetWidth;
|
||||
if ((inCanvas == null ? void 0 : inCanvas.height) !== targetHeight)
|
||||
inCanvas.height = targetHeight;
|
||||
}
|
||||
if (!inCanvas || (inCanvas == null ? void 0 : inCanvas.width) !== targetWidth || (inCanvas == null ? void 0 : inCanvas.height) !== targetHeight)
|
||||
inCanvas = canvas(targetWidth, targetHeight);
|
||||
const ctx = inCanvas.getContext("2d");
|
||||
if (input instanceof ImageData) {
|
||||
if (typeof ImageData !== "undefined" && input instanceof ImageData) {
|
||||
ctx.putImageData(input, 0, 0);
|
||||
} else {
|
||||
if (config3.filter.flip && typeof ctx.translate !== "undefined") {
|
||||
|
@ -9789,7 +9800,7 @@ function process4(input, config3) {
|
|||
}
|
||||
if (config3.filter.enabled) {
|
||||
if (!fx || !outCanvas || inCanvas.width !== outCanvas.width || (inCanvas == null ? void 0 : inCanvas.height) !== (outCanvas == null ? void 0 : outCanvas.height)) {
|
||||
outCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(inCanvas == null ? void 0 : inCanvas.width, inCanvas == null ? void 0 : inCanvas.height) : document.createElement("canvas");
|
||||
outCanvas = canvas(inCanvas == null ? void 0 : inCanvas.width, inCanvas == null ? void 0 : inCanvas.height);
|
||||
if ((outCanvas == null ? void 0 : outCanvas.width) !== (inCanvas == null ? void 0 : inCanvas.width))
|
||||
outCanvas.width = inCanvas == null ? void 0 : inCanvas.width;
|
||||
if ((outCanvas == null ? void 0 : outCanvas.height) !== (inCanvas == null ? void 0 : inCanvas.height))
|
||||
|
@ -9832,58 +9843,70 @@ function process4(input, config3) {
|
|||
if (fx)
|
||||
fx = null;
|
||||
}
|
||||
if (!tensor2) {
|
||||
if (!tensor3) {
|
||||
let pixels;
|
||||
if (outCanvas.data) {
|
||||
const shape = [outCanvas.height, outCanvas.width, 3];
|
||||
pixels = tfjs_esm_exports.tensor3d(outCanvas.data, shape, "int32");
|
||||
} else if (outCanvas instanceof ImageData) {
|
||||
} else if (typeof ImageData !== "undefined" && outCanvas instanceof ImageData) {
|
||||
pixels = tfjs_esm_exports.browser ? tfjs_esm_exports.browser.fromPixels(outCanvas) : null;
|
||||
} else if (config3.backend === "webgl" || config3.backend === "humangl") {
|
||||
const tempCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement("canvas");
|
||||
const tempCanvas = canvas(targetWidth, targetHeight);
|
||||
tempCanvas.width = targetWidth;
|
||||
tempCanvas.height = targetHeight;
|
||||
const tempCtx = tempCanvas.getContext("2d");
|
||||
tempCtx == null ? void 0 : tempCtx.drawImage(outCanvas, 0, 0);
|
||||
pixels = tfjs_esm_exports.browser ? tfjs_esm_exports.browser.fromPixels(tempCanvas) : null;
|
||||
pixels = tfjs_esm_exports.browser && env2.browser ? tfjs_esm_exports.browser.fromPixels(tempCanvas) : null;
|
||||
} else {
|
||||
const tempCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement("canvas");
|
||||
const tempCanvas = canvas(targetWidth, targetHeight);
|
||||
tempCanvas.width = targetWidth;
|
||||
tempCanvas.height = targetHeight;
|
||||
const tempCtx = tempCanvas.getContext("2d");
|
||||
tempCtx == null ? void 0 : tempCtx.drawImage(outCanvas, 0, 0);
|
||||
const data2 = tempCtx == null ? void 0 : tempCtx.getImageData(0, 0, targetWidth, targetHeight);
|
||||
pixels = tfjs_esm_exports.browser ? tfjs_esm_exports.browser.fromPixels(data2) : null;
|
||||
tempCtx.drawImage(outCanvas, 0, 0);
|
||||
const data2 = tempCtx.getImageData(0, 0, targetWidth, targetHeight);
|
||||
if (tfjs_esm_exports.browser && env2.browser) {
|
||||
pixels = tfjs_esm_exports.browser.fromPixels(data2);
|
||||
} else {
|
||||
pixels = tfjs_esm_exports.tidy(() => {
|
||||
const imageData = tfjs_esm_exports.tensor(Array.from(data2.data), [targetWidth, targetHeight, 4]);
|
||||
const channels = tfjs_esm_exports.split(imageData, 4, 2);
|
||||
const rgb2 = tfjs_esm_exports.stack([channels[0], channels[1], channels[2]], 2);
|
||||
const expand = tfjs_esm_exports.reshape(rgb2, [imageData.shape[0], imageData.shape[1], 3]);
|
||||
return expand;
|
||||
});
|
||||
}
|
||||
}
|
||||
if (pixels) {
|
||||
const casted = tfjs_esm_exports.cast(pixels, "float32");
|
||||
tensor2 = tfjs_esm_exports.expandDims(casted, 0);
|
||||
tensor3 = tfjs_esm_exports.expandDims(casted, 0);
|
||||
tfjs_esm_exports.dispose(pixels);
|
||||
tfjs_esm_exports.dispose(casted);
|
||||
} else {
|
||||
tensor3 = tfjs_esm_exports.zeros([1, targetWidth, targetHeight, 3]);
|
||||
throw new Error("Human: Cannot create tensor from input");
|
||||
}
|
||||
}
|
||||
}
|
||||
const canvas2 = config3.filter.return ? outCanvas : null;
|
||||
return { tensor: tensor2, canvas: canvas2 };
|
||||
return { tensor: tensor3, canvas: config3.filter.return ? outCanvas : null };
|
||||
}
|
||||
var lastInputSum = 0;
|
||||
var lastCacheDiff = 1;
|
||||
async function skip(instance, input) {
|
||||
if (instance.config.cacheSensitivity === 0)
|
||||
async function skip(config3, input) {
|
||||
if (config3.cacheSensitivity === 0)
|
||||
return false;
|
||||
const resizeFact = 32;
|
||||
if (!input.shape[1] || !input.shape[2])
|
||||
return false;
|
||||
const reduced = tfjs_esm_exports.image.resizeBilinear(input, [Math.trunc(input.shape[1] / resizeFact), Math.trunc(input.shape[2] / resizeFact)]);
|
||||
const reducedData = await reduced.data();
|
||||
tfjs_esm_exports.dispose(reduced);
|
||||
let sum = 0;
|
||||
for (let i = 0; i < reducedData.length / 3; i++)
|
||||
sum += reducedData[3 * i + 2];
|
||||
reduced.dispose();
|
||||
const diff = 100 * (Math.max(sum, lastInputSum) / Math.min(sum, lastInputSum) - 1);
|
||||
lastInputSum = sum;
|
||||
const skipFrame = diff < Math.max(instance.config.cacheSensitivity, lastCacheDiff);
|
||||
lastCacheDiff = diff > 10 * instance.config.cacheSensitivity ? 0 : diff;
|
||||
const skipFrame = diff < Math.max(config3.cacheSensitivity, lastCacheDiff);
|
||||
lastCacheDiff = diff > 10 * config3.cacheSensitivity ? 0 : diff;
|
||||
return skipFrame;
|
||||
}
|
||||
|
||||
|
@ -9915,6 +9938,7 @@ async function predict11(input) {
|
|||
tfjs_esm_exports.dispose(resizeInput);
|
||||
tfjs_esm_exports.dispose(norm);
|
||||
const squeeze7 = tfjs_esm_exports.squeeze(res, 0);
|
||||
tfjs_esm_exports.dispose(res);
|
||||
let resizeOutput;
|
||||
if (squeeze7.shape[2] === 2) {
|
||||
const softmax = squeeze7.softmax();
|
||||
|
@ -9932,16 +9956,18 @@ async function predict11(input) {
|
|||
} else {
|
||||
resizeOutput = tfjs_esm_exports.image.resizeBilinear(squeeze7, [width, height]);
|
||||
}
|
||||
if (typeof document === "undefined")
|
||||
return resizeOutput.data();
|
||||
tfjs_esm_exports.dispose(squeeze7);
|
||||
if (env2.node) {
|
||||
const data2 = await resizeOutput.data();
|
||||
tfjs_esm_exports.dispose(resizeOutput);
|
||||
return data2;
|
||||
}
|
||||
const overlay = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(width, height) : document.createElement("canvas");
|
||||
overlay.width = width;
|
||||
overlay.height = height;
|
||||
if (tfjs_esm_exports.browser)
|
||||
await tfjs_esm_exports.browser.toPixels(resizeOutput, overlay);
|
||||
tfjs_esm_exports.dispose(resizeOutput);
|
||||
tfjs_esm_exports.dispose(squeeze7);
|
||||
tfjs_esm_exports.dispose(res);
|
||||
const alphaCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(width, height) : document.createElement("canvas");
|
||||
alphaCanvas.width = width;
|
||||
alphaCanvas.height = height;
|
||||
|
@ -10245,7 +10271,7 @@ var detectFace = async (parent, input) => {
|
|||
delete faces[i].annotations.rightEyeIris;
|
||||
}
|
||||
const irisSize = ((_e = faces[i].annotations) == null ? void 0 : _e.leftEyeIris) && ((_f = faces[i].annotations) == null ? void 0 : _f.rightEyeIris) ? Math.max(Math.abs(faces[i].annotations.leftEyeIris[3][0] - faces[i].annotations.leftEyeIris[1][0]), Math.abs(faces[i].annotations.rightEyeIris[4][1] - faces[i].annotations.rightEyeIris[2][1])) / input.shape[2] : 0;
|
||||
const tensor2 = parent.config.face.detector.return ? tfjs_esm_exports.squeeze(faces[i].tensor) : null;
|
||||
const tensor3 = parent.config.face.detector.return ? tfjs_esm_exports.squeeze(faces[i].tensor) : null;
|
||||
tfjs_esm_exports.dispose(faces[i].tensor);
|
||||
if (faces[i].tensor)
|
||||
delete faces[i].tensor;
|
||||
|
@ -10259,7 +10285,7 @@ var detectFace = async (parent, input) => {
|
|||
emotion: emotionRes,
|
||||
iris: irisSize !== 0 ? Math.trunc(500 / irisSize / 11.7) / 100 : 0,
|
||||
rotation,
|
||||
tensor: tensor2
|
||||
tensor: tensor3
|
||||
});
|
||||
parent.analyze("End Face");
|
||||
}
|
||||
|
@ -10394,7 +10420,7 @@ var draw_exports = {};
|
|||
__export(draw_exports, {
|
||||
all: () => all,
|
||||
body: () => body2,
|
||||
canvas: () => canvas,
|
||||
canvas: () => canvas2,
|
||||
face: () => face2,
|
||||
gesture: () => gesture,
|
||||
hand: () => hand2,
|
||||
|
@ -10867,7 +10893,7 @@ async function person(inCanvas2, result, drawOptions) {
|
|||
}
|
||||
}
|
||||
}
|
||||
async function canvas(inCanvas2, outCanvas2) {
|
||||
async function canvas2(inCanvas2, outCanvas2) {
|
||||
if (!inCanvas2 || !outCanvas2)
|
||||
return;
|
||||
getCanvasContext(outCanvas2);
|
||||
|
@ -11947,29 +11973,34 @@ async function warmupBitmap(instance) {
|
|||
async function warmupCanvas(instance) {
|
||||
return new Promise((resolve) => {
|
||||
let src;
|
||||
let size = 0;
|
||||
switch (instance.config.warmup) {
|
||||
case "face":
|
||||
size = 256;
|
||||
src = "data:image/jpeg;base64," + face3;
|
||||
break;
|
||||
case "full":
|
||||
case "body":
|
||||
size = 1200;
|
||||
src = "data:image/jpeg;base64," + body3;
|
||||
break;
|
||||
default:
|
||||
src = null;
|
||||
}
|
||||
const img = new Image();
|
||||
let img;
|
||||
if (typeof Image !== "undefined")
|
||||
img = new Image();
|
||||
else if (env2.Image)
|
||||
img = new env2.Image();
|
||||
img.onload = async () => {
|
||||
const canvas2 = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(size, size) : document.createElement("canvas");
|
||||
canvas2.width = img.naturalWidth;
|
||||
canvas2.height = img.naturalHeight;
|
||||
const ctx = canvas2.getContext("2d");
|
||||
ctx == null ? void 0 : ctx.drawImage(img, 0, 0);
|
||||
const res = await instance.detect(canvas2, instance.config);
|
||||
resolve(res);
|
||||
const canvas3 = canvas(img.naturalWidth, img.naturalHeight);
|
||||
if (!canvas3) {
|
||||
log("Warmup: Canvas not found");
|
||||
resolve({});
|
||||
} else {
|
||||
const ctx = canvas3.getContext("2d");
|
||||
ctx.drawImage(img, 0, 0);
|
||||
const tensor3 = await instance.image(canvas3);
|
||||
const res = await instance.detect(tensor3.tensor, instance.config);
|
||||
resolve(res);
|
||||
}
|
||||
};
|
||||
if (src)
|
||||
img.src = src;
|
||||
|
@ -12008,7 +12039,7 @@ async function warmup(instance, userConfig) {
|
|||
let res;
|
||||
if (typeof createImageBitmap === "function")
|
||||
res = await warmupBitmap(instance);
|
||||
else if (typeof Image !== "undefined")
|
||||
else if (typeof Image !== "undefined" || env2.Canvas !== void 0)
|
||||
res = await warmupCanvas(instance);
|
||||
else
|
||||
res = await warmupNode(instance);
|
||||
|
@ -12100,7 +12131,7 @@ var Human = class {
|
|||
return similarity(embedding1, embedding2);
|
||||
}
|
||||
segmentation(input, background) {
|
||||
return process5(input, background, this.config);
|
||||
return input ? process5(input, background, this.config) : null;
|
||||
}
|
||||
enhance(input) {
|
||||
return enhance(input);
|
||||
|
@ -12158,32 +12189,32 @@ var Human = class {
|
|||
await check(this);
|
||||
await this.load();
|
||||
timeStamp = now();
|
||||
this.process = process4(input, this.config);
|
||||
const inputTensor = this.process.tensor;
|
||||
let img = process4(input, this.config);
|
||||
this.process = img;
|
||||
this.performance.image = Math.trunc(now() - timeStamp);
|
||||
this.analyze("Get Image:");
|
||||
if (this.config.segmentation.enabled && this.process && inputTensor) {
|
||||
if (this.config.segmentation.enabled && this.process && img.tensor && img.canvas) {
|
||||
this.analyze("Start Segmentation:");
|
||||
this.state = "run:segmentation";
|
||||
timeStamp = now();
|
||||
await predict11(this.process);
|
||||
await predict11(img);
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0)
|
||||
this.performance.segmentation = elapsedTime;
|
||||
if (this.process.canvas) {
|
||||
tfjs_esm_exports.dispose(inputTensor);
|
||||
this.process = process4(this.process.canvas, this.config);
|
||||
if (img.canvas) {
|
||||
tfjs_esm_exports.dispose(img.tensor);
|
||||
img = process4(img.canvas, this.config);
|
||||
}
|
||||
this.analyze("End Segmentation:");
|
||||
}
|
||||
if (!this.process || !inputTensor) {
|
||||
if (!img.tensor) {
|
||||
log("could not convert input to tensor");
|
||||
resolve({ error: "could not convert input to tensor" });
|
||||
return;
|
||||
}
|
||||
this.emit("image");
|
||||
timeStamp = now();
|
||||
this.config.skipFrame = await skip(this, inputTensor);
|
||||
this.config.skipFrame = await skip(this.config, img.tensor);
|
||||
if (!this.performance.frames)
|
||||
this.performance.frames = 0;
|
||||
if (!this.performance.cached)
|
||||
|
@ -12198,13 +12229,13 @@ var Human = class {
|
|||
let handRes = [];
|
||||
let objectRes = [];
|
||||
if (this.config.async) {
|
||||
faceRes = this.config.face.enabled ? detectFace(this, inputTensor) : [];
|
||||
faceRes = this.config.face.enabled ? detectFace(this, img.tensor) : [];
|
||||
if (this.performance.face)
|
||||
delete this.performance.face;
|
||||
} else {
|
||||
this.state = "run:face";
|
||||
timeStamp = now();
|
||||
faceRes = this.config.face.enabled ? await detectFace(this, inputTensor) : [];
|
||||
faceRes = this.config.face.enabled ? await detectFace(this, img.tensor) : [];
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0)
|
||||
this.performance.face = elapsedTime;
|
||||
|
@ -12212,26 +12243,26 @@ var Human = class {
|
|||
this.analyze("Start Body:");
|
||||
if (this.config.async) {
|
||||
if ((_a = this.config.body.modelPath) == null ? void 0 : _a.includes("posenet"))
|
||||
bodyRes = this.config.body.enabled ? predict4(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? predict4(img.tensor, this.config) : [];
|
||||
else if ((_b = this.config.body.modelPath) == null ? void 0 : _b.includes("blazepose"))
|
||||
bodyRes = this.config.body.enabled ? predict6(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? predict6(img.tensor, this.config) : [];
|
||||
else if ((_c = this.config.body.modelPath) == null ? void 0 : _c.includes("efficientpose"))
|
||||
bodyRes = this.config.body.enabled ? predict7(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? predict7(img.tensor, this.config) : [];
|
||||
else if ((_d = this.config.body.modelPath) == null ? void 0 : _d.includes("movenet"))
|
||||
bodyRes = this.config.body.enabled ? predict8(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? predict8(img.tensor, this.config) : [];
|
||||
if (this.performance.body)
|
||||
delete this.performance.body;
|
||||
} else {
|
||||
this.state = "run:body";
|
||||
timeStamp = now();
|
||||
if ((_e = this.config.body.modelPath) == null ? void 0 : _e.includes("posenet"))
|
||||
bodyRes = this.config.body.enabled ? await predict4(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? await predict4(img.tensor, this.config) : [];
|
||||
else if ((_f = this.config.body.modelPath) == null ? void 0 : _f.includes("blazepose"))
|
||||
bodyRes = this.config.body.enabled ? await predict6(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? await predict6(img.tensor, this.config) : [];
|
||||
else if ((_g = this.config.body.modelPath) == null ? void 0 : _g.includes("efficientpose"))
|
||||
bodyRes = this.config.body.enabled ? await predict7(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? await predict7(img.tensor, this.config) : [];
|
||||
else if ((_h = this.config.body.modelPath) == null ? void 0 : _h.includes("movenet"))
|
||||
bodyRes = this.config.body.enabled ? await predict8(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? await predict8(img.tensor, this.config) : [];
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0)
|
||||
this.performance.body = elapsedTime;
|
||||
|
@ -12239,13 +12270,13 @@ var Human = class {
|
|||
this.analyze("End Body:");
|
||||
this.analyze("Start Hand:");
|
||||
if (this.config.async) {
|
||||
handRes = this.config.hand.enabled ? predict5(inputTensor, this.config) : [];
|
||||
handRes = this.config.hand.enabled ? predict5(img.tensor, this.config) : [];
|
||||
if (this.performance.hand)
|
||||
delete this.performance.hand;
|
||||
} else {
|
||||
this.state = "run:hand";
|
||||
timeStamp = now();
|
||||
handRes = this.config.hand.enabled ? await predict5(inputTensor, this.config) : [];
|
||||
handRes = this.config.hand.enabled ? await predict5(img.tensor, this.config) : [];
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0)
|
||||
this.performance.hand = elapsedTime;
|
||||
|
@ -12254,18 +12285,18 @@ var Human = class {
|
|||
this.analyze("Start Object:");
|
||||
if (this.config.async) {
|
||||
if ((_i = this.config.object.modelPath) == null ? void 0 : _i.includes("nanodet"))
|
||||
objectRes = this.config.object.enabled ? predict9(inputTensor, this.config) : [];
|
||||
objectRes = this.config.object.enabled ? predict9(img.tensor, this.config) : [];
|
||||
else if ((_j = this.config.object.modelPath) == null ? void 0 : _j.includes("centernet"))
|
||||
objectRes = this.config.object.enabled ? predict10(inputTensor, this.config) : [];
|
||||
objectRes = this.config.object.enabled ? predict10(img.tensor, this.config) : [];
|
||||
if (this.performance.object)
|
||||
delete this.performance.object;
|
||||
} else {
|
||||
this.state = "run:object";
|
||||
timeStamp = now();
|
||||
if ((_k = this.config.object.modelPath) == null ? void 0 : _k.includes("nanodet"))
|
||||
objectRes = this.config.object.enabled ? await predict9(inputTensor, this.config) : [];
|
||||
objectRes = this.config.object.enabled ? await predict9(img.tensor, this.config) : [];
|
||||
else if ((_l = this.config.object.modelPath) == null ? void 0 : _l.includes("centernet"))
|
||||
objectRes = this.config.object.enabled ? await predict10(inputTensor, this.config) : [];
|
||||
objectRes = this.config.object.enabled ? await predict10(img.tensor, this.config) : [];
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0)
|
||||
this.performance.object = elapsedTime;
|
||||
|
@ -12298,7 +12329,7 @@ var Human = class {
|
|||
return join2(faceRes, bodyRes, handRes, gestureRes, shape);
|
||||
}
|
||||
};
|
||||
tfjs_esm_exports.dispose(inputTensor);
|
||||
tfjs_esm_exports.dispose(img.tensor);
|
||||
this.emit("detect");
|
||||
resolve(this.result);
|
||||
});
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -4086,28 +4086,28 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({
|
|||
return findEventTarget(target);
|
||||
}
|
||||
function _emscripten_set_canvas_element_size_calling_thread(target, width, height) {
|
||||
var canvas2 = findCanvasEventTarget(target);
|
||||
if (!canvas2)
|
||||
var canvas3 = findCanvasEventTarget(target);
|
||||
if (!canvas3)
|
||||
return -4;
|
||||
if (canvas2.canvasSharedPtr) {
|
||||
GROWABLE_HEAP_I32()[canvas2.canvasSharedPtr >> 2] = width;
|
||||
GROWABLE_HEAP_I32()[canvas2.canvasSharedPtr + 4 >> 2] = height;
|
||||
if (canvas3.canvasSharedPtr) {
|
||||
GROWABLE_HEAP_I32()[canvas3.canvasSharedPtr >> 2] = width;
|
||||
GROWABLE_HEAP_I32()[canvas3.canvasSharedPtr + 4 >> 2] = height;
|
||||
}
|
||||
if (canvas2.offscreenCanvas || !canvas2.controlTransferredOffscreen) {
|
||||
if (canvas2.offscreenCanvas)
|
||||
canvas2 = canvas2.offscreenCanvas;
|
||||
if (canvas3.offscreenCanvas || !canvas3.controlTransferredOffscreen) {
|
||||
if (canvas3.offscreenCanvas)
|
||||
canvas3 = canvas3.offscreenCanvas;
|
||||
var autoResizeViewport = false;
|
||||
if (canvas2.GLctxObject && canvas2.GLctxObject.GLctx) {
|
||||
var prevViewport = canvas2.GLctxObject.GLctx.getParameter(2978);
|
||||
autoResizeViewport = prevViewport[0] === 0 && prevViewport[1] === 0 && prevViewport[2] === canvas2.width && prevViewport[3] === canvas2.height;
|
||||
if (canvas3.GLctxObject && canvas3.GLctxObject.GLctx) {
|
||||
var prevViewport = canvas3.GLctxObject.GLctx.getParameter(2978);
|
||||
autoResizeViewport = prevViewport[0] === 0 && prevViewport[1] === 0 && prevViewport[2] === canvas3.width && prevViewport[3] === canvas3.height;
|
||||
}
|
||||
canvas2.width = width;
|
||||
canvas2.height = height;
|
||||
canvas3.width = width;
|
||||
canvas3.height = height;
|
||||
if (autoResizeViewport) {
|
||||
canvas2.GLctxObject.GLctx.viewport(0, 0, width, height);
|
||||
canvas3.GLctxObject.GLctx.viewport(0, 0, width, height);
|
||||
}
|
||||
} else if (canvas2.canvasSharedPtr) {
|
||||
var targetThread = GROWABLE_HEAP_I32()[canvas2.canvasSharedPtr + 8 >> 2];
|
||||
} else if (canvas3.canvasSharedPtr) {
|
||||
var targetThread = GROWABLE_HEAP_I32()[canvas3.canvasSharedPtr + 8 >> 2];
|
||||
_emscripten_set_offscreencanvas_size_on_target_thread(targetThread, target, width, height);
|
||||
return 1;
|
||||
} else {
|
||||
|
@ -4121,8 +4121,8 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({
|
|||
return _emscripten_set_canvas_element_size_calling_thread(target, width, height);
|
||||
}
|
||||
function _emscripten_set_canvas_element_size(target, width, height) {
|
||||
var canvas2 = findCanvasEventTarget(target);
|
||||
if (canvas2) {
|
||||
var canvas3 = findCanvasEventTarget(target);
|
||||
if (canvas3) {
|
||||
return _emscripten_set_canvas_element_size_calling_thread(target, width, height);
|
||||
} else {
|
||||
return _emscripten_set_canvas_element_size_main_thread(target, width, height);
|
||||
|
@ -4194,8 +4194,8 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({
|
|||
source += UTF8ToString(GROWABLE_HEAP_I32()[string3 + i * 4 >> 2], len < 0 ? void 0 : len);
|
||||
}
|
||||
return source;
|
||||
}, createContext: function(canvas2, webGLContextAttributes) {
|
||||
var ctx = canvas2.getContext("webgl", webGLContextAttributes);
|
||||
}, createContext: function(canvas3, webGLContextAttributes) {
|
||||
var ctx = canvas3.getContext("webgl", webGLContextAttributes);
|
||||
if (!ctx)
|
||||
return 0;
|
||||
var handle = GL.registerContext(ctx, webGLContextAttributes);
|
||||
|
@ -4275,14 +4275,14 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({
|
|||
var a = attributes >> 2;
|
||||
var powerPreference = GROWABLE_HEAP_I32()[a + (24 >> 2)];
|
||||
var contextAttributes = { "alpha": !!GROWABLE_HEAP_I32()[a + (0 >> 2)], "depth": !!GROWABLE_HEAP_I32()[a + (4 >> 2)], "stencil": !!GROWABLE_HEAP_I32()[a + (8 >> 2)], "antialias": !!GROWABLE_HEAP_I32()[a + (12 >> 2)], "premultipliedAlpha": !!GROWABLE_HEAP_I32()[a + (16 >> 2)], "preserveDrawingBuffer": !!GROWABLE_HEAP_I32()[a + (20 >> 2)], "powerPreference": __emscripten_webgl_power_preferences[powerPreference], "failIfMajorPerformanceCaveat": !!GROWABLE_HEAP_I32()[a + (28 >> 2)], majorVersion: GROWABLE_HEAP_I32()[a + (32 >> 2)], minorVersion: GROWABLE_HEAP_I32()[a + (36 >> 2)], enableExtensionsByDefault: GROWABLE_HEAP_I32()[a + (40 >> 2)], explicitSwapControl: GROWABLE_HEAP_I32()[a + (44 >> 2)], proxyContextToMainThread: GROWABLE_HEAP_I32()[a + (48 >> 2)], renderViaOffscreenBackBuffer: GROWABLE_HEAP_I32()[a + (52 >> 2)] };
|
||||
var canvas2 = findCanvasEventTarget(target);
|
||||
if (!canvas2) {
|
||||
var canvas3 = findCanvasEventTarget(target);
|
||||
if (!canvas3) {
|
||||
return 0;
|
||||
}
|
||||
if (contextAttributes.explicitSwapControl) {
|
||||
return 0;
|
||||
}
|
||||
var contextHandle = GL.createContext(canvas2, contextAttributes);
|
||||
var contextHandle = GL.createContext(canvas3, contextAttributes);
|
||||
return contextHandle;
|
||||
}
|
||||
function _emscripten_webgl_create_context(a0, a12) {
|
||||
|
@ -10488,7 +10488,7 @@ async function fromPixelsAsync(pixels, numChannels = 3) {
|
|||
}
|
||||
return fromPixels_(inputs, numChannels);
|
||||
}
|
||||
async function toPixels(img, canvas2) {
|
||||
async function toPixels(img, canvas3) {
|
||||
let $img = convertToTensor(img, "img", "toPixels");
|
||||
if (!(img instanceof Tensor)) {
|
||||
const originalImgTensor = $img;
|
||||
|
@ -10536,10 +10536,10 @@ async function toPixels(img, canvas2) {
|
|||
bytes[j + 2] = Math.round(rgba[2]);
|
||||
bytes[j + 3] = Math.round(rgba[3]);
|
||||
}
|
||||
if (canvas2 != null) {
|
||||
canvas2.width = width;
|
||||
canvas2.height = height;
|
||||
const ctx = canvas2.getContext("2d");
|
||||
if (canvas3 != null) {
|
||||
canvas3.width = width;
|
||||
canvas3.height = height;
|
||||
const ctx = canvas3.getContext("2d");
|
||||
const imageData = new ImageData(bytes, width, height);
|
||||
ctx.putImageData(imageData, 0, 0);
|
||||
}
|
||||
|
@ -43093,15 +43093,15 @@ function getWebGLRenderingContext(webGLVersion) {
|
|||
if (webGLVersion !== 1 && webGLVersion !== 2) {
|
||||
throw new Error("Cannot get WebGL rendering context, WebGL is disabled.");
|
||||
}
|
||||
const canvas2 = createCanvas(webGLVersion);
|
||||
canvas2.addEventListener("webglcontextlost", (ev) => {
|
||||
const canvas3 = createCanvas(webGLVersion);
|
||||
canvas3.addEventListener("webglcontextlost", (ev) => {
|
||||
ev.preventDefault();
|
||||
delete contexts[webGLVersion];
|
||||
}, false);
|
||||
if (webGLVersion === 1) {
|
||||
return canvas2.getContext("webgl", WEBGL_ATTRIBUTES) || canvas2.getContext("experimental-webgl", WEBGL_ATTRIBUTES);
|
||||
return canvas3.getContext("webgl", WEBGL_ATTRIBUTES) || canvas3.getContext("experimental-webgl", WEBGL_ATTRIBUTES);
|
||||
}
|
||||
return canvas2.getContext("webgl2", WEBGL_ATTRIBUTES);
|
||||
return canvas3.getContext("webgl2", WEBGL_ATTRIBUTES);
|
||||
}
|
||||
var PackingScheme;
|
||||
(function(PackingScheme2) {
|
||||
|
@ -59746,16 +59746,16 @@ function getBoxCenter(box6) {
|
|||
box6.startPoint[1] + (box6.endPoint[1] - box6.startPoint[1]) / 2
|
||||
];
|
||||
}
|
||||
function cutBoxFromImageAndResize(box6, image4, cropSize) {
|
||||
const h = image4.shape[1];
|
||||
const w = image4.shape[2];
|
||||
function cutBoxFromImageAndResize(box6, image5, cropSize) {
|
||||
const h = image5.shape[1];
|
||||
const w = image5.shape[2];
|
||||
const boxes = [[
|
||||
box6.startPoint[1] / h,
|
||||
box6.startPoint[0] / w,
|
||||
box6.endPoint[1] / h,
|
||||
box6.endPoint[0] / w
|
||||
]];
|
||||
return image.cropAndResize(image4, boxes, [0], cropSize);
|
||||
return image.cropAndResize(image5, boxes, [0], cropSize);
|
||||
}
|
||||
function enlargeBox(box6, factor = 1.5) {
|
||||
const center = getBoxCenter(box6);
|
||||
|
@ -59899,7 +59899,7 @@ var BlazeFaceModel = class {
|
|||
async getBoundingBoxes(inputImage, userConfig) {
|
||||
var _a, _b, _c, _d;
|
||||
if (!inputImage || inputImage["isDisposedInternal"] || inputImage.shape.length !== 4 || inputImage.shape[1] < 1 || inputImage.shape[2] < 1)
|
||||
return null;
|
||||
return { boxes: [] };
|
||||
const [batch, boxes, scores] = tidy(() => {
|
||||
const resizedImage = image.resizeBilinear(inputImage, [this.inputSize, this.inputSize]);
|
||||
const normalizedImage = sub(div(resizedImage, 127.5), 0.5);
|
||||
|
@ -59929,11 +59929,9 @@ var BlazeFaceModel = class {
|
|||
const confidence = scoresData[nms[i]];
|
||||
if (confidence > (((_d = this.config.face.detector) == null ? void 0 : _d.minConfidence) || 0)) {
|
||||
const boundingBox = slice(boxes, [nms[i], 0], [1, -1]);
|
||||
const localBox = createBox(boundingBox);
|
||||
dispose(boundingBox);
|
||||
const anchor = this.anchorsData[nms[i]];
|
||||
const landmarks = tidy(() => reshape(squeeze(slice(batch, [nms[i], keypointsCount - 1], [1, -1])), [keypointsCount, -1]));
|
||||
annotatedBoxes.push({ box: localBox, landmarks, anchor, confidence });
|
||||
annotatedBoxes.push({ box: createBox(boundingBox), landmarks, anchor: this.anchorsData[nms[i]], confidence });
|
||||
dispose(boundingBox);
|
||||
}
|
||||
}
|
||||
dispose(batch);
|
||||
|
@ -63421,12 +63419,12 @@ var Pipeline = class {
|
|||
const angle = computeRotation(box6.landmarks[indexOfMouth], box6.landmarks[indexOfForehead]);
|
||||
const faceCenter = getBoxCenter({ startPoint: box6.startPoint, endPoint: box6.endPoint });
|
||||
const faceCenterNormalized = [faceCenter[0] / input2.shape[2], faceCenter[1] / input2.shape[1]];
|
||||
const rotatedImage = image.rotateWithOffset(input2, angle, 0, faceCenterNormalized);
|
||||
const rotated = image.rotateWithOffset(input2, angle, 0, faceCenterNormalized);
|
||||
const rotationMatrix = buildRotationMatrix(-angle, faceCenter);
|
||||
const cut = config3.face.mesh.enabled ? cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, rotatedImage, [this.meshSize, this.meshSize]) : cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, rotatedImage, [this.boxSize, this.boxSize]);
|
||||
const cut = config3.face.mesh.enabled ? cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, rotated, [this.meshSize, this.meshSize]) : cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, rotated, [this.boxSize, this.boxSize]);
|
||||
const face5 = div(cut, 255);
|
||||
dispose(cut);
|
||||
dispose(rotatedImage);
|
||||
dispose(rotated);
|
||||
return [angle, rotationMatrix, face5];
|
||||
}
|
||||
async augmentIris(rawCoords, face5) {
|
||||
|
@ -63510,11 +63508,11 @@ var Pipeline = class {
|
|||
[angle, rotationMatrix, face5] = this.correctFaceRotation(config3, box6, input2);
|
||||
} else {
|
||||
rotationMatrix = IDENTITY_MATRIX;
|
||||
const clonedImage = input2.clone();
|
||||
const cut = config3.face.mesh.enabled ? cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, clonedImage, [this.meshSize, this.meshSize]) : cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, clonedImage, [this.boxSize, this.boxSize]);
|
||||
const cloned = input2.clone();
|
||||
const cut = config3.face.mesh.enabled ? cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, cloned, [this.meshSize, this.meshSize]) : cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, cloned, [this.boxSize, this.boxSize]);
|
||||
face5 = div(cut, 255);
|
||||
dispose(cut);
|
||||
dispose(clonedImage);
|
||||
dispose(cloned);
|
||||
}
|
||||
if (!config3.face.mesh.enabled) {
|
||||
results.push({
|
||||
|
@ -63543,6 +63541,7 @@ var Pipeline = class {
|
|||
const mesh = this.transformRawCoords(rawCoords, box6, angle, rotationMatrix);
|
||||
box6 = { ...enlargeBox(calculateLandmarksBoundingBox(mesh), 1.5), confidence: box6.confidence };
|
||||
if (config3.face.detector.rotation && config3.face.mesh.enabled && config3.face.description.enabled && env2.kernels.includes("rotatewithoffset")) {
|
||||
dispose(face5);
|
||||
[angle, rotationMatrix, face5] = this.correctFaceRotation(config3, box6, input2);
|
||||
}
|
||||
results.push({
|
||||
|
@ -63609,8 +63608,6 @@ async function predict(input2, config3) {
|
|||
annotations: annotations3,
|
||||
tensor: prediction.image
|
||||
});
|
||||
if (prediction.coords)
|
||||
dispose(prediction.coords);
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
@ -63690,7 +63687,7 @@ function match(embedding2, db, threshold3 = 0) {
|
|||
return best;
|
||||
}
|
||||
function enhance(input2) {
|
||||
const image4 = tidy(() => {
|
||||
const image5 = tidy(() => {
|
||||
const tensor2 = input2.image || input2.tensor || input2;
|
||||
if (!(tensor2 instanceof Tensor))
|
||||
return null;
|
||||
|
@ -63701,9 +63698,9 @@ function enhance(input2) {
|
|||
const norm2 = mul(crop, 255);
|
||||
return norm2;
|
||||
});
|
||||
return image4;
|
||||
return image5;
|
||||
}
|
||||
async function predict2(image4, config3, idx, count3) {
|
||||
async function predict2(image5, config3, idx, count3) {
|
||||
var _a, _b, _c;
|
||||
if (!model2)
|
||||
return null;
|
||||
|
@ -63714,7 +63711,7 @@ async function predict2(image4, config3, idx, count3) {
|
|||
skipped = 0;
|
||||
return new Promise(async (resolve) => {
|
||||
var _a2, _b2;
|
||||
const enhanced = enhance(image4);
|
||||
const enhanced = enhance(image5);
|
||||
let resT;
|
||||
const obj = {
|
||||
age: 0,
|
||||
|
@ -63734,6 +63731,7 @@ async function predict2(image4, config3, idx, count3) {
|
|||
}
|
||||
const argmax2 = argMax(resT.find((t) => t.shape[1] === 100), 1);
|
||||
const age = (await argmax2.data())[0];
|
||||
dispose(argmax2);
|
||||
const all6 = await resT.find((t) => t.shape[1] === 100).data();
|
||||
obj.age = Math.round(all6[age - 1] > all6[age + 1] ? 10 * age - 100 * all6[age - 1] : 10 * age + 100 * all6[age + 1]) / 10;
|
||||
const desc = resT.find((t) => t.shape[1] === 1024);
|
||||
|
@ -63766,7 +63764,7 @@ async function load4(config3) {
|
|||
log("cached model:", model3.modelUrl);
|
||||
return model3;
|
||||
}
|
||||
async function predict3(image4, config3, idx, count3) {
|
||||
async function predict3(image5, config3, idx, count3) {
|
||||
var _a;
|
||||
if (!model3)
|
||||
return null;
|
||||
|
@ -63777,7 +63775,7 @@ async function predict3(image4, config3, idx, count3) {
|
|||
skipped2 = 0;
|
||||
return new Promise(async (resolve) => {
|
||||
var _a2, _b;
|
||||
const resize = image.resizeBilinear(image4, [model3.inputs[0].shape[2], model3.inputs[0].shape[1]], false);
|
||||
const resize = image.resizeBilinear(image5, [model3.inputs[0].shape[2], model3.inputs[0].shape[1]], false);
|
||||
const [red, green, blue] = split(resize, 3, 3);
|
||||
dispose(resize);
|
||||
const redNorm = mul(red, rgb[0]);
|
||||
|
@ -64164,16 +64162,16 @@ function getBoxCenter2(box6) {
|
|||
box6.startPoint[1] + (box6.endPoint[1] - box6.startPoint[1]) / 2
|
||||
];
|
||||
}
|
||||
function cutBoxFromImageAndResize2(box6, image4, cropSize) {
|
||||
const h = image4.shape[1];
|
||||
const w = image4.shape[2];
|
||||
function cutBoxFromImageAndResize2(box6, image5, cropSize) {
|
||||
const h = image5.shape[1];
|
||||
const w = image5.shape[2];
|
||||
const boxes = [[
|
||||
box6.startPoint[1] / h,
|
||||
box6.startPoint[0] / w,
|
||||
box6.endPoint[1] / h,
|
||||
box6.endPoint[0] / w
|
||||
]];
|
||||
return image.cropAndResize(image4, boxes, [0], cropSize);
|
||||
return image.cropAndResize(image5, boxes, [0], cropSize);
|
||||
}
|
||||
function scaleBoxCoordinates2(box6, factor) {
|
||||
const startPoint = [box6.startPoint[0] * factor[0], box6.startPoint[1] * factor[1]];
|
||||
|
@ -67200,9 +67198,9 @@ var HandDetector = class {
|
|||
async estimateHandBounds(input2, config3) {
|
||||
const inputHeight = input2.shape[1];
|
||||
const inputWidth = input2.shape[2];
|
||||
const image4 = tidy(() => sub(div(image.resizeBilinear(input2, [this.inputSize, this.inputSize]), 127.5), 1));
|
||||
const predictions = await this.getBoxes(image4, config3);
|
||||
dispose(image4);
|
||||
const image5 = tidy(() => sub(div(image.resizeBilinear(input2, [this.inputSize, this.inputSize]), 127.5), 1));
|
||||
const predictions = await this.getBoxes(image5, config3);
|
||||
dispose(image5);
|
||||
const hands = [];
|
||||
if (!predictions || predictions.length === 0)
|
||||
return hands;
|
||||
|
@ -67344,11 +67342,11 @@ var HandPipeline = class {
|
|||
Math.trunc(coord[2])
|
||||
]);
|
||||
}
|
||||
async estimateHands(image4, config3) {
|
||||
async estimateHands(image5, config3) {
|
||||
let useFreshBox = false;
|
||||
let boxes;
|
||||
if (this.skipped === 0 || this.skipped > config3.hand.skipFrames || !config3.hand.landmarks || !config3.skipFrame) {
|
||||
boxes = await this.handDetector.estimateHandBounds(image4, config3);
|
||||
boxes = await this.handDetector.estimateHandBounds(image5, config3);
|
||||
this.skipped = 0;
|
||||
}
|
||||
if (config3.skipFrame)
|
||||
|
@ -67367,8 +67365,8 @@ var HandPipeline = class {
|
|||
if (config3.hand.landmarks) {
|
||||
const angle = config3.hand.rotation ? computeRotation2(currentBox.palmLandmarks[palmLandmarksPalmBase], currentBox.palmLandmarks[palmLandmarksMiddleFingerBase]) : 0;
|
||||
const palmCenter = getBoxCenter2(currentBox);
|
||||
const palmCenterNormalized = [palmCenter[0] / image4.shape[2], palmCenter[1] / image4.shape[1]];
|
||||
const rotatedImage = config3.hand.rotation && env2.kernels.includes("rotatewithoffset") ? image.rotateWithOffset(image4, angle, 0, palmCenterNormalized) : image4.clone();
|
||||
const palmCenterNormalized = [palmCenter[0] / image5.shape[2], palmCenter[1] / image5.shape[1]];
|
||||
const rotatedImage = config3.hand.rotation && env2.kernels.includes("rotatewithoffset") ? image.rotateWithOffset(image5, angle, 0, palmCenterNormalized) : image5.clone();
|
||||
const rotationMatrix = buildRotationMatrix2(-angle, palmCenter);
|
||||
const newBox = useFreshBox ? this.getBoxForPalmLandmarks(currentBox.palmLandmarks, rotationMatrix) : currentBox;
|
||||
const croppedInput = cutBoxFromImageAndResize2(newBox, rotatedImage, [this.inputSize, this.inputSize]);
|
||||
|
@ -67970,13 +67968,13 @@ async function load7(config3) {
|
|||
log("cached model:", model5["modelUrl"]);
|
||||
return model5;
|
||||
}
|
||||
async function predict6(image4, config3) {
|
||||
async function predict6(image5, config3) {
|
||||
if (!model5)
|
||||
return [];
|
||||
if (!config3.body.enabled)
|
||||
return [];
|
||||
const imgSize = { width: image4.shape[2] || 0, height: image4.shape[1] || 0 };
|
||||
const resize = image.resizeBilinear(image4, [model5["width"], model5["height"]], false);
|
||||
const imgSize = { width: image5.shape[2] || 0, height: image5.shape[1] || 0 };
|
||||
const resize = image.resizeBilinear(image5, [model5["width"], model5["height"]], false);
|
||||
const normalize = div(resize, [255]);
|
||||
dispose(resize);
|
||||
const resT = await model5.predict(normalize);
|
||||
|
@ -68052,7 +68050,7 @@ function max2d(inputs, minScore) {
|
|||
return [0, 0, newScore];
|
||||
});
|
||||
}
|
||||
async function predict7(image4, config3) {
|
||||
async function predict7(image5, config3) {
|
||||
var _a;
|
||||
if (skipped3 < (((_a = config3.body) == null ? void 0 : _a.skipFrames) || 0) && config3.skipFrame && Object.keys(keypoints).length > 0) {
|
||||
skipped3++;
|
||||
|
@ -68064,7 +68062,7 @@ async function predict7(image4, config3) {
|
|||
const tensor2 = tidy(() => {
|
||||
if (!model6.inputs[0].shape)
|
||||
return null;
|
||||
const resize = image.resizeBilinear(image4, [model6.inputs[0].shape[2], model6.inputs[0].shape[1]], false);
|
||||
const resize = image.resizeBilinear(image5, [model6.inputs[0].shape[2], model6.inputs[0].shape[1]], false);
|
||||
const enhance2 = mul(resize, 2);
|
||||
const norm2 = enhance2.sub(1);
|
||||
return norm2;
|
||||
|
@ -68090,8 +68088,8 @@ async function predict7(image4, config3) {
|
|||
y2 / model6.inputs[0].shape[1]
|
||||
],
|
||||
position: [
|
||||
Math.round(image4.shape[2] * x2 / model6.inputs[0].shape[2]),
|
||||
Math.round(image4.shape[1] * y2 / model6.inputs[0].shape[1])
|
||||
Math.round(image5.shape[2] * x2 / model6.inputs[0].shape[2]),
|
||||
Math.round(image5.shape[1] * y2 / model6.inputs[0].shape[1])
|
||||
]
|
||||
});
|
||||
}
|
||||
|
@ -68138,7 +68136,7 @@ async function load9(config3) {
|
|||
log("cached model:", model7["modelUrl"]);
|
||||
return model7;
|
||||
}
|
||||
async function parseSinglePose(res, config3, image4) {
|
||||
async function parseSinglePose(res, config3, image5) {
|
||||
keypoints2.length = 0;
|
||||
const kpt3 = res[0][0];
|
||||
for (let id = 0; id < kpt3.length; id++) {
|
||||
|
@ -68152,8 +68150,8 @@ async function parseSinglePose(res, config3, image4) {
|
|||
kpt3[id][0]
|
||||
],
|
||||
position: [
|
||||
Math.round((image4.shape[2] || 0) * kpt3[id][1]),
|
||||
Math.round((image4.shape[1] || 0) * kpt3[id][0])
|
||||
Math.round((image5.shape[2] || 0) * kpt3[id][1]),
|
||||
Math.round((image5.shape[1] || 0) * kpt3[id][0])
|
||||
]
|
||||
});
|
||||
}
|
||||
|
@ -68179,7 +68177,7 @@ async function parseSinglePose(res, config3, image4) {
|
|||
persons2.push({ id: 0, score: score2, box: box5, boxRaw: boxRaw2, keypoints: keypoints2 });
|
||||
return persons2;
|
||||
}
|
||||
async function parseMultiPose(res, config3, image4) {
|
||||
async function parseMultiPose(res, config3, image5) {
|
||||
const persons2 = [];
|
||||
for (let p2 = 0; p2 < res[0].length; p2++) {
|
||||
const kpt3 = res[0][p2];
|
||||
|
@ -68198,8 +68196,8 @@ async function parseMultiPose(res, config3, image4) {
|
|||
kpt3[3 * i + 0]
|
||||
],
|
||||
position: [
|
||||
Math.trunc(kpt3[3 * i + 1] * (image4.shape[2] || 0)),
|
||||
Math.trunc(kpt3[3 * i + 0] * (image4.shape[1] || 0))
|
||||
Math.trunc(kpt3[3 * i + 1] * (image5.shape[2] || 0)),
|
||||
Math.trunc(kpt3[3 * i + 0] * (image5.shape[1] || 0))
|
||||
]
|
||||
});
|
||||
}
|
||||
|
@ -68210,17 +68208,17 @@ async function parseMultiPose(res, config3, image4) {
|
|||
score: score2,
|
||||
boxRaw: boxRaw2,
|
||||
box: [
|
||||
Math.trunc(boxRaw2[0] * (image4.shape[2] || 0)),
|
||||
Math.trunc(boxRaw2[1] * (image4.shape[1] || 0)),
|
||||
Math.trunc(boxRaw2[2] * (image4.shape[2] || 0)),
|
||||
Math.trunc(boxRaw2[3] * (image4.shape[1] || 0))
|
||||
Math.trunc(boxRaw2[0] * (image5.shape[2] || 0)),
|
||||
Math.trunc(boxRaw2[1] * (image5.shape[1] || 0)),
|
||||
Math.trunc(boxRaw2[2] * (image5.shape[2] || 0)),
|
||||
Math.trunc(boxRaw2[3] * (image5.shape[1] || 0))
|
||||
],
|
||||
keypoints: keypoints2
|
||||
});
|
||||
}
|
||||
return persons2;
|
||||
}
|
||||
async function predict8(image4, config3) {
|
||||
async function predict8(image5, config3) {
|
||||
if (skipped4 < (config3.body.skipFrames || 0) && config3.skipFrame && Object.keys(keypoints2).length > 0) {
|
||||
skipped4++;
|
||||
return [{ id: 0, score: score2, box: box5, boxRaw: boxRaw2, keypoints: keypoints2 }];
|
||||
|
@ -68233,7 +68231,7 @@ async function predict8(image4, config3) {
|
|||
let inputSize = model7.inputs[0].shape[2];
|
||||
if (inputSize === -1)
|
||||
inputSize = 256;
|
||||
const resize = image.resizeBilinear(image4, [inputSize, inputSize], false);
|
||||
const resize = image.resizeBilinear(image5, [inputSize, inputSize], false);
|
||||
const cast6 = cast(resize, "int32");
|
||||
return cast6;
|
||||
});
|
||||
|
@ -68246,9 +68244,9 @@ async function predict8(image4, config3) {
|
|||
const res = await resT.array();
|
||||
let persons2;
|
||||
if (resT.shape[2] === 17)
|
||||
persons2 = await parseSinglePose(res, config3, image4);
|
||||
persons2 = await parseSinglePose(res, config3, image5);
|
||||
else if (resT.shape[2] === 56)
|
||||
persons2 = await parseMultiPose(res, config3, image4);
|
||||
persons2 = await parseMultiPose(res, config3, image5);
|
||||
dispose(resT);
|
||||
resolve(persons2);
|
||||
});
|
||||
|
@ -68419,7 +68417,7 @@ async function process2(res, inputSize, outputShape, config3) {
|
|||
results = results.filter((_val, idx) => nmsIdx.includes(idx)).sort((a, b) => b.score - a.score);
|
||||
return results;
|
||||
}
|
||||
async function predict9(image4, config3) {
|
||||
async function predict9(image5, config3) {
|
||||
if (skipped5 < (config3.object.skipFrames || 0) && config3.skipFrame && last3.length > 0) {
|
||||
skipped5++;
|
||||
return last3;
|
||||
|
@ -68428,8 +68426,8 @@ async function predict9(image4, config3) {
|
|||
if (!env2.kernels.includes("mod") || !env2.kernels.includes("sparsetodense"))
|
||||
return last3;
|
||||
return new Promise(async (resolve) => {
|
||||
const outputSize = [image4.shape[2], image4.shape[1]];
|
||||
const resize = image.resizeBilinear(image4, [model8.inputSize, model8.inputSize], false);
|
||||
const outputSize = [image5.shape[2], image5.shape[1]];
|
||||
const resize = image.resizeBilinear(image5, [model8.inputSize, model8.inputSize], false);
|
||||
const norm2 = div(resize, 255);
|
||||
const transpose5 = norm2.transpose([0, 3, 1, 2]);
|
||||
dispose(norm2);
|
||||
|
@ -68474,6 +68472,7 @@ async function process3(res, inputSize, outputShape, config3) {
|
|||
dispose(squeezeT);
|
||||
const stackT = stack([arr[1], arr[0], arr[3], arr[2]], 1);
|
||||
const boxesT = squeeze(stackT);
|
||||
dispose(stackT);
|
||||
const scoresT = squeeze(arr[4]);
|
||||
const classesT = squeeze(arr[5]);
|
||||
arr.forEach((t) => dispose(t));
|
||||
|
@ -68677,8 +68676,8 @@ function GLImageFilter(params) {
|
|||
gl.uniform1f(_currentProgram.uniform.flipY, flipY ? -1 : 1);
|
||||
gl.drawArrays(gl.TRIANGLES, 0, 6);
|
||||
};
|
||||
this.apply = function(image4) {
|
||||
_resize(image4.width, image4.height);
|
||||
this.apply = function(image5) {
|
||||
_resize(image5.width, image5.height);
|
||||
_drawCount = 0;
|
||||
if (!_sourceTexture)
|
||||
_sourceTexture = gl.createTexture();
|
||||
|
@ -68687,7 +68686,7 @@ function GLImageFilter(params) {
|
|||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
|
||||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
|
||||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
|
||||
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, image4);
|
||||
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, image5);
|
||||
if (_filterChain.length === 0) {
|
||||
_draw();
|
||||
return _canvas;
|
||||
|
@ -69238,11 +69237,28 @@ var maxSize = 2048;
|
|||
var inCanvas;
|
||||
var outCanvas;
|
||||
var fx;
|
||||
function canvas(width, height) {
|
||||
let c;
|
||||
if (env2.browser) {
|
||||
if (typeof OffscreenCanvas !== "undefined") {
|
||||
c = new OffscreenCanvas(width, height);
|
||||
} else {
|
||||
c = document.createElement("canvas");
|
||||
c.width = width;
|
||||
c.height = height;
|
||||
}
|
||||
} else {
|
||||
c = typeof env2.Canvas !== "undefined" ? new env2.Canvas(width, height) : null;
|
||||
}
|
||||
if (!c)
|
||||
throw new Error("Human: Cannot create canvas");
|
||||
return c;
|
||||
}
|
||||
function process4(input2, config3) {
|
||||
let tensor2;
|
||||
if (!input2)
|
||||
throw new Error("Human: Input is missing");
|
||||
if (!(input2 instanceof Tensor) && !(typeof Image !== "undefined" && input2 instanceof Image) && !(typeof ImageData !== "undefined" && input2 instanceof ImageData) && !(typeof ImageBitmap !== "undefined" && input2 instanceof ImageBitmap) && !(typeof HTMLImageElement !== "undefined" && input2 instanceof HTMLImageElement) && !(typeof HTMLMediaElement !== "undefined" && input2 instanceof HTMLMediaElement) && !(typeof HTMLVideoElement !== "undefined" && input2 instanceof HTMLVideoElement) && !(typeof HTMLCanvasElement !== "undefined" && input2 instanceof HTMLCanvasElement) && !(typeof OffscreenCanvas !== "undefined" && input2 instanceof OffscreenCanvas)) {
|
||||
if (!(input2 instanceof Tensor) && !(typeof Image !== "undefined" && input2 instanceof Image) && !(typeof env2.Canvas !== "undefined" && input2 instanceof env2.Canvas) && !(typeof ImageData !== "undefined" && input2 instanceof ImageData) && !(typeof ImageBitmap !== "undefined" && input2 instanceof ImageBitmap) && !(typeof HTMLImageElement !== "undefined" && input2 instanceof HTMLImageElement) && !(typeof HTMLMediaElement !== "undefined" && input2 instanceof HTMLMediaElement) && !(typeof HTMLVideoElement !== "undefined" && input2 instanceof HTMLVideoElement) && !(typeof HTMLCanvasElement !== "undefined" && input2 instanceof HTMLCanvasElement) && !(typeof OffscreenCanvas !== "undefined" && input2 instanceof OffscreenCanvas)) {
|
||||
throw new Error("Human: Input type is not recognized");
|
||||
}
|
||||
if (input2 instanceof Tensor) {
|
||||
|
@ -69275,15 +69291,10 @@ function process4(input2, config3) {
|
|||
targetHeight = originalHeight * ((config3.filter.width || 0) / originalWidth);
|
||||
if (!targetWidth || !targetHeight)
|
||||
throw new Error("Human: Input cannot determine dimension");
|
||||
if (!inCanvas || (inCanvas == null ? void 0 : inCanvas.width) !== targetWidth || (inCanvas == null ? void 0 : inCanvas.height) !== targetHeight) {
|
||||
inCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement("canvas");
|
||||
if ((inCanvas == null ? void 0 : inCanvas.width) !== targetWidth)
|
||||
inCanvas.width = targetWidth;
|
||||
if ((inCanvas == null ? void 0 : inCanvas.height) !== targetHeight)
|
||||
inCanvas.height = targetHeight;
|
||||
}
|
||||
if (!inCanvas || (inCanvas == null ? void 0 : inCanvas.width) !== targetWidth || (inCanvas == null ? void 0 : inCanvas.height) !== targetHeight)
|
||||
inCanvas = canvas(targetWidth, targetHeight);
|
||||
const ctx = inCanvas.getContext("2d");
|
||||
if (input2 instanceof ImageData) {
|
||||
if (typeof ImageData !== "undefined" && input2 instanceof ImageData) {
|
||||
ctx.putImageData(input2, 0, 0);
|
||||
} else {
|
||||
if (config3.filter.flip && typeof ctx.translate !== "undefined") {
|
||||
|
@ -69297,7 +69308,7 @@ function process4(input2, config3) {
|
|||
}
|
||||
if (config3.filter.enabled) {
|
||||
if (!fx || !outCanvas || inCanvas.width !== outCanvas.width || (inCanvas == null ? void 0 : inCanvas.height) !== (outCanvas == null ? void 0 : outCanvas.height)) {
|
||||
outCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(inCanvas == null ? void 0 : inCanvas.width, inCanvas == null ? void 0 : inCanvas.height) : document.createElement("canvas");
|
||||
outCanvas = canvas(inCanvas == null ? void 0 : inCanvas.width, inCanvas == null ? void 0 : inCanvas.height);
|
||||
if ((outCanvas == null ? void 0 : outCanvas.width) !== (inCanvas == null ? void 0 : inCanvas.width))
|
||||
outCanvas.width = inCanvas == null ? void 0 : inCanvas.width;
|
||||
if ((outCanvas == null ? void 0 : outCanvas.height) !== (inCanvas == null ? void 0 : inCanvas.height))
|
||||
|
@ -69345,53 +69356,65 @@ function process4(input2, config3) {
|
|||
if (outCanvas.data) {
|
||||
const shape = [outCanvas.height, outCanvas.width, 3];
|
||||
pixels = tensor3d(outCanvas.data, shape, "int32");
|
||||
} else if (outCanvas instanceof ImageData) {
|
||||
} else if (typeof ImageData !== "undefined" && outCanvas instanceof ImageData) {
|
||||
pixels = browser_exports ? browser_exports.fromPixels(outCanvas) : null;
|
||||
} else if (config3.backend === "webgl" || config3.backend === "humangl") {
|
||||
const tempCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement("canvas");
|
||||
const tempCanvas = canvas(targetWidth, targetHeight);
|
||||
tempCanvas.width = targetWidth;
|
||||
tempCanvas.height = targetHeight;
|
||||
const tempCtx = tempCanvas.getContext("2d");
|
||||
tempCtx == null ? void 0 : tempCtx.drawImage(outCanvas, 0, 0);
|
||||
pixels = browser_exports ? browser_exports.fromPixels(tempCanvas) : null;
|
||||
pixels = browser_exports && env2.browser ? browser_exports.fromPixels(tempCanvas) : null;
|
||||
} else {
|
||||
const tempCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement("canvas");
|
||||
const tempCanvas = canvas(targetWidth, targetHeight);
|
||||
tempCanvas.width = targetWidth;
|
||||
tempCanvas.height = targetHeight;
|
||||
const tempCtx = tempCanvas.getContext("2d");
|
||||
tempCtx == null ? void 0 : tempCtx.drawImage(outCanvas, 0, 0);
|
||||
const data = tempCtx == null ? void 0 : tempCtx.getImageData(0, 0, targetWidth, targetHeight);
|
||||
pixels = browser_exports ? browser_exports.fromPixels(data) : null;
|
||||
tempCtx.drawImage(outCanvas, 0, 0);
|
||||
const data = tempCtx.getImageData(0, 0, targetWidth, targetHeight);
|
||||
if (browser_exports && env2.browser) {
|
||||
pixels = browser_exports.fromPixels(data);
|
||||
} else {
|
||||
pixels = tidy(() => {
|
||||
const imageData = tensor(Array.from(data.data), [targetWidth, targetHeight, 4]);
|
||||
const channels = split(imageData, 4, 2);
|
||||
const rgb2 = stack([channels[0], channels[1], channels[2]], 2);
|
||||
const expand = reshape(rgb2, [imageData.shape[0], imageData.shape[1], 3]);
|
||||
return expand;
|
||||
});
|
||||
}
|
||||
}
|
||||
if (pixels) {
|
||||
const casted = cast(pixels, "float32");
|
||||
tensor2 = expandDims(casted, 0);
|
||||
dispose(pixels);
|
||||
dispose(casted);
|
||||
} else {
|
||||
tensor2 = zeros([1, targetWidth, targetHeight, 3]);
|
||||
throw new Error("Human: Cannot create tensor from input");
|
||||
}
|
||||
}
|
||||
}
|
||||
const canvas2 = config3.filter.return ? outCanvas : null;
|
||||
return { tensor: tensor2, canvas: canvas2 };
|
||||
return { tensor: tensor2, canvas: config3.filter.return ? outCanvas : null };
|
||||
}
|
||||
var lastInputSum = 0;
|
||||
var lastCacheDiff = 1;
|
||||
async function skip(instance, input2) {
|
||||
if (instance.config.cacheSensitivity === 0)
|
||||
async function skip(config3, input2) {
|
||||
if (config3.cacheSensitivity === 0)
|
||||
return false;
|
||||
const resizeFact = 32;
|
||||
if (!input2.shape[1] || !input2.shape[2])
|
||||
return false;
|
||||
const reduced = image.resizeBilinear(input2, [Math.trunc(input2.shape[1] / resizeFact), Math.trunc(input2.shape[2] / resizeFact)]);
|
||||
const reducedData = await reduced.data();
|
||||
dispose(reduced);
|
||||
let sum6 = 0;
|
||||
for (let i = 0; i < reducedData.length / 3; i++)
|
||||
sum6 += reducedData[3 * i + 2];
|
||||
reduced.dispose();
|
||||
const diff = 100 * (Math.max(sum6, lastInputSum) / Math.min(sum6, lastInputSum) - 1);
|
||||
lastInputSum = sum6;
|
||||
const skipFrame = diff < Math.max(instance.config.cacheSensitivity, lastCacheDiff);
|
||||
lastCacheDiff = diff > 10 * instance.config.cacheSensitivity ? 0 : diff;
|
||||
const skipFrame = diff < Math.max(config3.cacheSensitivity, lastCacheDiff);
|
||||
lastCacheDiff = diff > 10 * config3.cacheSensitivity ? 0 : diff;
|
||||
return skipFrame;
|
||||
}
|
||||
|
||||
|
@ -69423,6 +69446,7 @@ async function predict11(input2) {
|
|||
dispose(resizeInput);
|
||||
dispose(norm2);
|
||||
const squeeze2 = squeeze(res, 0);
|
||||
dispose(res);
|
||||
let resizeOutput;
|
||||
if (squeeze2.shape[2] === 2) {
|
||||
const softmax6 = squeeze2.softmax();
|
||||
|
@ -69440,16 +69464,18 @@ async function predict11(input2) {
|
|||
} else {
|
||||
resizeOutput = image.resizeBilinear(squeeze2, [width, height]);
|
||||
}
|
||||
if (typeof document === "undefined")
|
||||
return resizeOutput.data();
|
||||
dispose(squeeze2);
|
||||
if (env2.node) {
|
||||
const data = await resizeOutput.data();
|
||||
dispose(resizeOutput);
|
||||
return data;
|
||||
}
|
||||
const overlay = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(width, height) : document.createElement("canvas");
|
||||
overlay.width = width;
|
||||
overlay.height = height;
|
||||
if (browser_exports)
|
||||
await browser_exports.toPixels(resizeOutput, overlay);
|
||||
dispose(resizeOutput);
|
||||
dispose(squeeze2);
|
||||
dispose(res);
|
||||
const alphaCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(width, height) : document.createElement("canvas");
|
||||
alphaCanvas.width = width;
|
||||
alphaCanvas.height = height;
|
||||
|
@ -69902,7 +69928,7 @@ var draw_exports = {};
|
|||
__export(draw_exports, {
|
||||
all: () => all5,
|
||||
body: () => body2,
|
||||
canvas: () => canvas,
|
||||
canvas: () => canvas2,
|
||||
face: () => face2,
|
||||
gesture: () => gesture,
|
||||
hand: () => hand2,
|
||||
|
@ -70375,7 +70401,7 @@ async function person(inCanvas2, result, drawOptions) {
|
|||
}
|
||||
}
|
||||
}
|
||||
async function canvas(inCanvas2, outCanvas2) {
|
||||
async function canvas2(inCanvas2, outCanvas2) {
|
||||
if (!inCanvas2 || !outCanvas2)
|
||||
return;
|
||||
getCanvasContext(outCanvas2);
|
||||
|
@ -71455,29 +71481,34 @@ async function warmupBitmap(instance) {
|
|||
async function warmupCanvas(instance) {
|
||||
return new Promise((resolve) => {
|
||||
let src;
|
||||
let size = 0;
|
||||
switch (instance.config.warmup) {
|
||||
case "face":
|
||||
size = 256;
|
||||
src = "data:image/jpeg;base64," + face3;
|
||||
break;
|
||||
case "full":
|
||||
case "body":
|
||||
size = 1200;
|
||||
src = "data:image/jpeg;base64," + body3;
|
||||
break;
|
||||
default:
|
||||
src = null;
|
||||
}
|
||||
const img = new Image();
|
||||
let img;
|
||||
if (typeof Image !== "undefined")
|
||||
img = new Image();
|
||||
else if (env2.Image)
|
||||
img = new env2.Image();
|
||||
img.onload = async () => {
|
||||
const canvas2 = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(size, size) : document.createElement("canvas");
|
||||
canvas2.width = img.naturalWidth;
|
||||
canvas2.height = img.naturalHeight;
|
||||
const ctx = canvas2.getContext("2d");
|
||||
ctx == null ? void 0 : ctx.drawImage(img, 0, 0);
|
||||
const res = await instance.detect(canvas2, instance.config);
|
||||
resolve(res);
|
||||
const canvas3 = canvas(img.naturalWidth, img.naturalHeight);
|
||||
if (!canvas3) {
|
||||
log("Warmup: Canvas not found");
|
||||
resolve({});
|
||||
} else {
|
||||
const ctx = canvas3.getContext("2d");
|
||||
ctx.drawImage(img, 0, 0);
|
||||
const tensor2 = await instance.image(canvas3);
|
||||
const res = await instance.detect(tensor2.tensor, instance.config);
|
||||
resolve(res);
|
||||
}
|
||||
};
|
||||
if (src)
|
||||
img.src = src;
|
||||
|
@ -71516,7 +71547,7 @@ async function warmup(instance, userConfig) {
|
|||
let res;
|
||||
if (typeof createImageBitmap === "function")
|
||||
res = await warmupBitmap(instance);
|
||||
else if (typeof Image !== "undefined")
|
||||
else if (typeof Image !== "undefined" || env2.Canvas !== void 0)
|
||||
res = await warmupCanvas(instance);
|
||||
else
|
||||
res = await warmupNode(instance);
|
||||
|
@ -71608,7 +71639,7 @@ var Human = class {
|
|||
return similarity(embedding1, embedding2);
|
||||
}
|
||||
segmentation(input2, background) {
|
||||
return process5(input2, background, this.config);
|
||||
return input2 ? process5(input2, background, this.config) : null;
|
||||
}
|
||||
enhance(input2) {
|
||||
return enhance(input2);
|
||||
|
@ -71666,32 +71697,32 @@ var Human = class {
|
|||
await check(this);
|
||||
await this.load();
|
||||
timeStamp = now();
|
||||
this.process = process4(input2, this.config);
|
||||
const inputTensor = this.process.tensor;
|
||||
let img = process4(input2, this.config);
|
||||
this.process = img;
|
||||
this.performance.image = Math.trunc(now() - timeStamp);
|
||||
this.analyze("Get Image:");
|
||||
if (this.config.segmentation.enabled && this.process && inputTensor) {
|
||||
if (this.config.segmentation.enabled && this.process && img.tensor && img.canvas) {
|
||||
this.analyze("Start Segmentation:");
|
||||
this.state = "run:segmentation";
|
||||
timeStamp = now();
|
||||
await predict11(this.process);
|
||||
await predict11(img);
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0)
|
||||
this.performance.segmentation = elapsedTime;
|
||||
if (this.process.canvas) {
|
||||
dispose(inputTensor);
|
||||
this.process = process4(this.process.canvas, this.config);
|
||||
if (img.canvas) {
|
||||
dispose(img.tensor);
|
||||
img = process4(img.canvas, this.config);
|
||||
}
|
||||
this.analyze("End Segmentation:");
|
||||
}
|
||||
if (!this.process || !inputTensor) {
|
||||
if (!img.tensor) {
|
||||
log("could not convert input to tensor");
|
||||
resolve({ error: "could not convert input to tensor" });
|
||||
return;
|
||||
}
|
||||
this.emit("image");
|
||||
timeStamp = now();
|
||||
this.config.skipFrame = await skip(this, inputTensor);
|
||||
this.config.skipFrame = await skip(this.config, img.tensor);
|
||||
if (!this.performance.frames)
|
||||
this.performance.frames = 0;
|
||||
if (!this.performance.cached)
|
||||
|
@ -71706,13 +71737,13 @@ var Human = class {
|
|||
let handRes = [];
|
||||
let objectRes = [];
|
||||
if (this.config.async) {
|
||||
faceRes = this.config.face.enabled ? detectFace(this, inputTensor) : [];
|
||||
faceRes = this.config.face.enabled ? detectFace(this, img.tensor) : [];
|
||||
if (this.performance.face)
|
||||
delete this.performance.face;
|
||||
} else {
|
||||
this.state = "run:face";
|
||||
timeStamp = now();
|
||||
faceRes = this.config.face.enabled ? await detectFace(this, inputTensor) : [];
|
||||
faceRes = this.config.face.enabled ? await detectFace(this, img.tensor) : [];
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0)
|
||||
this.performance.face = elapsedTime;
|
||||
|
@ -71720,26 +71751,26 @@ var Human = class {
|
|||
this.analyze("Start Body:");
|
||||
if (this.config.async) {
|
||||
if ((_a = this.config.body.modelPath) == null ? void 0 : _a.includes("posenet"))
|
||||
bodyRes = this.config.body.enabled ? predict4(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? predict4(img.tensor, this.config) : [];
|
||||
else if ((_b = this.config.body.modelPath) == null ? void 0 : _b.includes("blazepose"))
|
||||
bodyRes = this.config.body.enabled ? predict6(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? predict6(img.tensor, this.config) : [];
|
||||
else if ((_c = this.config.body.modelPath) == null ? void 0 : _c.includes("efficientpose"))
|
||||
bodyRes = this.config.body.enabled ? predict7(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? predict7(img.tensor, this.config) : [];
|
||||
else if ((_d = this.config.body.modelPath) == null ? void 0 : _d.includes("movenet"))
|
||||
bodyRes = this.config.body.enabled ? predict8(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? predict8(img.tensor, this.config) : [];
|
||||
if (this.performance.body)
|
||||
delete this.performance.body;
|
||||
} else {
|
||||
this.state = "run:body";
|
||||
timeStamp = now();
|
||||
if ((_e = this.config.body.modelPath) == null ? void 0 : _e.includes("posenet"))
|
||||
bodyRes = this.config.body.enabled ? await predict4(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? await predict4(img.tensor, this.config) : [];
|
||||
else if ((_f = this.config.body.modelPath) == null ? void 0 : _f.includes("blazepose"))
|
||||
bodyRes = this.config.body.enabled ? await predict6(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? await predict6(img.tensor, this.config) : [];
|
||||
else if ((_g = this.config.body.modelPath) == null ? void 0 : _g.includes("efficientpose"))
|
||||
bodyRes = this.config.body.enabled ? await predict7(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? await predict7(img.tensor, this.config) : [];
|
||||
else if ((_h = this.config.body.modelPath) == null ? void 0 : _h.includes("movenet"))
|
||||
bodyRes = this.config.body.enabled ? await predict8(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? await predict8(img.tensor, this.config) : [];
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0)
|
||||
this.performance.body = elapsedTime;
|
||||
|
@ -71747,13 +71778,13 @@ var Human = class {
|
|||
this.analyze("End Body:");
|
||||
this.analyze("Start Hand:");
|
||||
if (this.config.async) {
|
||||
handRes = this.config.hand.enabled ? predict5(inputTensor, this.config) : [];
|
||||
handRes = this.config.hand.enabled ? predict5(img.tensor, this.config) : [];
|
||||
if (this.performance.hand)
|
||||
delete this.performance.hand;
|
||||
} else {
|
||||
this.state = "run:hand";
|
||||
timeStamp = now();
|
||||
handRes = this.config.hand.enabled ? await predict5(inputTensor, this.config) : [];
|
||||
handRes = this.config.hand.enabled ? await predict5(img.tensor, this.config) : [];
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0)
|
||||
this.performance.hand = elapsedTime;
|
||||
|
@ -71762,18 +71793,18 @@ var Human = class {
|
|||
this.analyze("Start Object:");
|
||||
if (this.config.async) {
|
||||
if ((_i = this.config.object.modelPath) == null ? void 0 : _i.includes("nanodet"))
|
||||
objectRes = this.config.object.enabled ? predict9(inputTensor, this.config) : [];
|
||||
objectRes = this.config.object.enabled ? predict9(img.tensor, this.config) : [];
|
||||
else if ((_j = this.config.object.modelPath) == null ? void 0 : _j.includes("centernet"))
|
||||
objectRes = this.config.object.enabled ? predict10(inputTensor, this.config) : [];
|
||||
objectRes = this.config.object.enabled ? predict10(img.tensor, this.config) : [];
|
||||
if (this.performance.object)
|
||||
delete this.performance.object;
|
||||
} else {
|
||||
this.state = "run:object";
|
||||
timeStamp = now();
|
||||
if ((_k = this.config.object.modelPath) == null ? void 0 : _k.includes("nanodet"))
|
||||
objectRes = this.config.object.enabled ? await predict9(inputTensor, this.config) : [];
|
||||
objectRes = this.config.object.enabled ? await predict9(img.tensor, this.config) : [];
|
||||
else if ((_l = this.config.object.modelPath) == null ? void 0 : _l.includes("centernet"))
|
||||
objectRes = this.config.object.enabled ? await predict10(inputTensor, this.config) : [];
|
||||
objectRes = this.config.object.enabled ? await predict10(img.tensor, this.config) : [];
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0)
|
||||
this.performance.object = elapsedTime;
|
||||
|
@ -71806,7 +71837,7 @@ var Human = class {
|
|||
return join2(faceRes, bodyRes, handRes, gestureRes, shape);
|
||||
}
|
||||
};
|
||||
dispose(inputTensor);
|
||||
dispose(img.tensor);
|
||||
this.emit("detect");
|
||||
resolve(this.result);
|
||||
});
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -255,16 +255,16 @@ function getBoxCenter(box6) {
|
|||
box6.startPoint[1] + (box6.endPoint[1] - box6.startPoint[1]) / 2
|
||||
];
|
||||
}
|
||||
function cutBoxFromImageAndResize(box6, image19, cropSize) {
|
||||
const h = image19.shape[1];
|
||||
const w = image19.shape[2];
|
||||
function cutBoxFromImageAndResize(box6, image20, cropSize) {
|
||||
const h = image20.shape[1];
|
||||
const w = image20.shape[2];
|
||||
const boxes = [[
|
||||
box6.startPoint[1] / h,
|
||||
box6.startPoint[0] / w,
|
||||
box6.endPoint[1] / h,
|
||||
box6.endPoint[0] / w
|
||||
]];
|
||||
return tf.image.cropAndResize(image19, boxes, [0], cropSize);
|
||||
return tf.image.cropAndResize(image20, boxes, [0], cropSize);
|
||||
}
|
||||
function enlargeBox(box6, factor = 1.5) {
|
||||
const center = getBoxCenter(box6);
|
||||
|
@ -408,7 +408,7 @@ var BlazeFaceModel = class {
|
|||
async getBoundingBoxes(inputImage, userConfig) {
|
||||
var _a, _b, _c, _d;
|
||||
if (!inputImage || inputImage["isDisposedInternal"] || inputImage.shape.length !== 4 || inputImage.shape[1] < 1 || inputImage.shape[2] < 1)
|
||||
return null;
|
||||
return { boxes: [] };
|
||||
const [batch, boxes, scores] = tf2.tidy(() => {
|
||||
const resizedImage = tf2.image.resizeBilinear(inputImage, [this.inputSize, this.inputSize]);
|
||||
const normalizedImage = tf2.sub(tf2.div(resizedImage, 127.5), 0.5);
|
||||
|
@ -438,11 +438,9 @@ var BlazeFaceModel = class {
|
|||
const confidence = scoresData[nms[i]];
|
||||
if (confidence > (((_d = this.config.face.detector) == null ? void 0 : _d.minConfidence) || 0)) {
|
||||
const boundingBox = tf2.slice(boxes, [nms[i], 0], [1, -1]);
|
||||
const localBox = createBox(boundingBox);
|
||||
tf2.dispose(boundingBox);
|
||||
const anchor = this.anchorsData[nms[i]];
|
||||
const landmarks = tf2.tidy(() => tf2.reshape(tf2.squeeze(tf2.slice(batch, [nms[i], keypointsCount - 1], [1, -1])), [keypointsCount, -1]));
|
||||
annotatedBoxes.push({ box: localBox, landmarks, anchor, confidence });
|
||||
annotatedBoxes.push({ box: createBox(boundingBox), landmarks, anchor: this.anchorsData[nms[i]], confidence });
|
||||
tf2.dispose(boundingBox);
|
||||
}
|
||||
}
|
||||
tf2.dispose(batch);
|
||||
|
@ -3934,12 +3932,12 @@ var Pipeline = class {
|
|||
const angle = computeRotation(box6.landmarks[indexOfMouth], box6.landmarks[indexOfForehead]);
|
||||
const faceCenter = getBoxCenter({ startPoint: box6.startPoint, endPoint: box6.endPoint });
|
||||
const faceCenterNormalized = [faceCenter[0] / input.shape[2], faceCenter[1] / input.shape[1]];
|
||||
const rotatedImage = tf4.image.rotateWithOffset(input, angle, 0, faceCenterNormalized);
|
||||
const rotated = tf4.image.rotateWithOffset(input, angle, 0, faceCenterNormalized);
|
||||
const rotationMatrix = buildRotationMatrix(-angle, faceCenter);
|
||||
const cut = config3.face.mesh.enabled ? cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, rotatedImage, [this.meshSize, this.meshSize]) : cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, rotatedImage, [this.boxSize, this.boxSize]);
|
||||
const cut = config3.face.mesh.enabled ? cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, rotated, [this.meshSize, this.meshSize]) : cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, rotated, [this.boxSize, this.boxSize]);
|
||||
const face5 = tf4.div(cut, 255);
|
||||
tf4.dispose(cut);
|
||||
tf4.dispose(rotatedImage);
|
||||
tf4.dispose(rotated);
|
||||
return [angle, rotationMatrix, face5];
|
||||
}
|
||||
async augmentIris(rawCoords, face5) {
|
||||
|
@ -4023,11 +4021,11 @@ var Pipeline = class {
|
|||
[angle, rotationMatrix, face5] = this.correctFaceRotation(config3, box6, input);
|
||||
} else {
|
||||
rotationMatrix = IDENTITY_MATRIX;
|
||||
const clonedImage = input.clone();
|
||||
const cut = config3.face.mesh.enabled ? cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, clonedImage, [this.meshSize, this.meshSize]) : cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, clonedImage, [this.boxSize, this.boxSize]);
|
||||
const cloned = input.clone();
|
||||
const cut = config3.face.mesh.enabled ? cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, cloned, [this.meshSize, this.meshSize]) : cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, cloned, [this.boxSize, this.boxSize]);
|
||||
face5 = tf4.div(cut, 255);
|
||||
tf4.dispose(cut);
|
||||
tf4.dispose(clonedImage);
|
||||
tf4.dispose(cloned);
|
||||
}
|
||||
if (!config3.face.mesh.enabled) {
|
||||
results.push({
|
||||
|
@ -4056,6 +4054,7 @@ var Pipeline = class {
|
|||
const mesh = this.transformRawCoords(rawCoords, box6, angle, rotationMatrix);
|
||||
box6 = { ...enlargeBox(calculateLandmarksBoundingBox(mesh), 1.5), confidence: box6.confidence };
|
||||
if (config3.face.detector.rotation && config3.face.mesh.enabled && config3.face.description.enabled && env2.kernels.includes("rotatewithoffset")) {
|
||||
tf4.dispose(face5);
|
||||
[angle, rotationMatrix, face5] = this.correctFaceRotation(config3, box6, input);
|
||||
}
|
||||
results.push({
|
||||
|
@ -4122,8 +4121,6 @@ async function predict(input, config3) {
|
|||
annotations: annotations3,
|
||||
tensor: prediction.image
|
||||
});
|
||||
if (prediction.coords)
|
||||
tf5.dispose(prediction.coords);
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
@ -4204,20 +4201,20 @@ function match(embedding, db, threshold = 0) {
|
|||
return best;
|
||||
}
|
||||
function enhance(input) {
|
||||
const image19 = tf6.tidy(() => {
|
||||
const tensor2 = input.image || input.tensor || input;
|
||||
if (!(tensor2 instanceof tf6.Tensor))
|
||||
const image20 = tf6.tidy(() => {
|
||||
const tensor3 = input.image || input.tensor || input;
|
||||
if (!(tensor3 instanceof tf6.Tensor))
|
||||
return null;
|
||||
const box6 = [[0.05, 0.15, 0.85, 0.85]];
|
||||
if (!model.inputs[0].shape)
|
||||
return null;
|
||||
const crop = tensor2.shape.length === 3 ? tf6.image.cropAndResize(tf6.expandDims(tensor2, 0), box6, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]) : tf6.image.cropAndResize(tensor2, box6, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]);
|
||||
const crop = tensor3.shape.length === 3 ? tf6.image.cropAndResize(tf6.expandDims(tensor3, 0), box6, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]) : tf6.image.cropAndResize(tensor3, box6, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]);
|
||||
const norm = tf6.mul(crop, 255);
|
||||
return norm;
|
||||
});
|
||||
return image19;
|
||||
return image20;
|
||||
}
|
||||
async function predict2(image19, config3, idx, count2) {
|
||||
async function predict2(image20, config3, idx, count2) {
|
||||
var _a, _b, _c;
|
||||
if (!model)
|
||||
return null;
|
||||
|
@ -4228,7 +4225,7 @@ async function predict2(image19, config3, idx, count2) {
|
|||
skipped = 0;
|
||||
return new Promise(async (resolve) => {
|
||||
var _a2, _b2;
|
||||
const enhanced = enhance(image19);
|
||||
const enhanced = enhance(image20);
|
||||
let resT;
|
||||
const obj = {
|
||||
age: 0,
|
||||
|
@ -4248,6 +4245,7 @@ async function predict2(image19, config3, idx, count2) {
|
|||
}
|
||||
const argmax = tf6.argMax(resT.find((t) => t.shape[1] === 100), 1);
|
||||
const age = (await argmax.data())[0];
|
||||
tf6.dispose(argmax);
|
||||
const all2 = await resT.find((t) => t.shape[1] === 100).data();
|
||||
obj.age = Math.round(all2[age - 1] > all2[age + 1] ? 10 * age - 100 * all2[age - 1] : 10 * age + 100 * all2[age + 1]) / 10;
|
||||
const desc = resT.find((t) => t.shape[1] === 1024);
|
||||
|
@ -4281,7 +4279,7 @@ async function load4(config3) {
|
|||
log("cached model:", model2.modelUrl);
|
||||
return model2;
|
||||
}
|
||||
async function predict3(image19, config3, idx, count2) {
|
||||
async function predict3(image20, config3, idx, count2) {
|
||||
var _a;
|
||||
if (!model2)
|
||||
return null;
|
||||
|
@ -4292,7 +4290,7 @@ async function predict3(image19, config3, idx, count2) {
|
|||
skipped2 = 0;
|
||||
return new Promise(async (resolve) => {
|
||||
var _a2, _b;
|
||||
const resize = tf7.image.resizeBilinear(image19, [model2.inputs[0].shape[2], model2.inputs[0].shape[1]], false);
|
||||
const resize = tf7.image.resizeBilinear(image20, [model2.inputs[0].shape[2], model2.inputs[0].shape[1]], false);
|
||||
const [red, green, blue] = tf7.split(resize, 3, 3);
|
||||
tf7.dispose(resize);
|
||||
const redNorm = tf7.mul(red, rgb[0]);
|
||||
|
@ -4648,7 +4646,7 @@ async function predict4(input, config3) {
|
|||
results3d[1] = results3d[1].sigmoid();
|
||||
return results3d;
|
||||
});
|
||||
const buffers = await Promise.all(res.map((tensor2) => tensor2.buffer()));
|
||||
const buffers = await Promise.all(res.map((tensor3) => tensor3.buffer()));
|
||||
for (const t of res)
|
||||
tf8.dispose(t);
|
||||
const decoded = await decode(buffers[0], buffers[1], buffers[2], buffers[3], config3.body.maxDetected, config3.body.minConfidence);
|
||||
|
@ -4689,16 +4687,16 @@ function getBoxCenter2(box6) {
|
|||
box6.startPoint[1] + (box6.endPoint[1] - box6.startPoint[1]) / 2
|
||||
];
|
||||
}
|
||||
function cutBoxFromImageAndResize2(box6, image19, cropSize) {
|
||||
const h = image19.shape[1];
|
||||
const w = image19.shape[2];
|
||||
function cutBoxFromImageAndResize2(box6, image20, cropSize) {
|
||||
const h = image20.shape[1];
|
||||
const w = image20.shape[2];
|
||||
const boxes = [[
|
||||
box6.startPoint[1] / h,
|
||||
box6.startPoint[0] / w,
|
||||
box6.endPoint[1] / h,
|
||||
box6.endPoint[0] / w
|
||||
]];
|
||||
return tf9.image.cropAndResize(image19, boxes, [0], cropSize);
|
||||
return tf9.image.cropAndResize(image20, boxes, [0], cropSize);
|
||||
}
|
||||
function scaleBoxCoordinates2(box6, factor) {
|
||||
const startPoint = [box6.startPoint[0] * factor[0], box6.startPoint[1] * factor[1]];
|
||||
|
@ -7718,16 +7716,16 @@ var HandDetector = class {
|
|||
const palmLandmarks = tf10.tidy(() => tf10.reshape(this.normalizeLandmarks(tf10.slice(t.predictions, [index, 5], [1, 14]), index), [-1, 2]));
|
||||
hands.push({ box: palmBox, palmLandmarks, confidence: scores[index] });
|
||||
}
|
||||
for (const tensor2 of Object.keys(t))
|
||||
tf10.dispose(t[tensor2]);
|
||||
for (const tensor3 of Object.keys(t))
|
||||
tf10.dispose(t[tensor3]);
|
||||
return hands;
|
||||
}
|
||||
async estimateHandBounds(input, config3) {
|
||||
const inputHeight = input.shape[1];
|
||||
const inputWidth = input.shape[2];
|
||||
const image19 = tf10.tidy(() => tf10.sub(tf10.div(tf10.image.resizeBilinear(input, [this.inputSize, this.inputSize]), 127.5), 1));
|
||||
const predictions = await this.getBoxes(image19, config3);
|
||||
tf10.dispose(image19);
|
||||
const image20 = tf10.tidy(() => tf10.sub(tf10.div(tf10.image.resizeBilinear(input, [this.inputSize, this.inputSize]), 127.5), 1));
|
||||
const predictions = await this.getBoxes(image20, config3);
|
||||
tf10.dispose(image20);
|
||||
const hands = [];
|
||||
if (!predictions || predictions.length === 0)
|
||||
return hands;
|
||||
|
@ -7872,11 +7870,11 @@ var HandPipeline = class {
|
|||
Math.trunc(coord[2])
|
||||
]);
|
||||
}
|
||||
async estimateHands(image19, config3) {
|
||||
async estimateHands(image20, config3) {
|
||||
let useFreshBox = false;
|
||||
let boxes;
|
||||
if (this.skipped === 0 || this.skipped > config3.hand.skipFrames || !config3.hand.landmarks || !config3.skipFrame) {
|
||||
boxes = await this.handDetector.estimateHandBounds(image19, config3);
|
||||
boxes = await this.handDetector.estimateHandBounds(image20, config3);
|
||||
this.skipped = 0;
|
||||
}
|
||||
if (config3.skipFrame)
|
||||
|
@ -7895,8 +7893,8 @@ var HandPipeline = class {
|
|||
if (config3.hand.landmarks) {
|
||||
const angle = config3.hand.rotation ? computeRotation2(currentBox.palmLandmarks[palmLandmarksPalmBase], currentBox.palmLandmarks[palmLandmarksMiddleFingerBase]) : 0;
|
||||
const palmCenter = getBoxCenter2(currentBox);
|
||||
const palmCenterNormalized = [palmCenter[0] / image19.shape[2], palmCenter[1] / image19.shape[1]];
|
||||
const rotatedImage = config3.hand.rotation && env2.kernels.includes("rotatewithoffset") ? tf11.image.rotateWithOffset(image19, angle, 0, palmCenterNormalized) : image19.clone();
|
||||
const palmCenterNormalized = [palmCenter[0] / image20.shape[2], palmCenter[1] / image20.shape[1]];
|
||||
const rotatedImage = config3.hand.rotation && env2.kernels.includes("rotatewithoffset") ? tf11.image.rotateWithOffset(image20, angle, 0, palmCenterNormalized) : image20.clone();
|
||||
const rotationMatrix = buildRotationMatrix2(-angle, palmCenter);
|
||||
const newBox = useFreshBox ? this.getBoxForPalmLandmarks(currentBox.palmLandmarks, rotationMatrix) : currentBox;
|
||||
const croppedInput = cutBoxFromImageAndResize2(newBox, rotatedImage, [this.inputSize, this.inputSize]);
|
||||
|
@ -8501,13 +8499,13 @@ async function load7(config3) {
|
|||
log("cached model:", model4["modelUrl"]);
|
||||
return model4;
|
||||
}
|
||||
async function predict6(image19, config3) {
|
||||
async function predict6(image20, config3) {
|
||||
if (!model4)
|
||||
return [];
|
||||
if (!config3.body.enabled)
|
||||
return [];
|
||||
const imgSize = { width: image19.shape[2] || 0, height: image19.shape[1] || 0 };
|
||||
const resize = tf13.image.resizeBilinear(image19, [model4["width"], model4["height"]], false);
|
||||
const imgSize = { width: image20.shape[2] || 0, height: image20.shape[1] || 0 };
|
||||
const resize = tf13.image.resizeBilinear(image20, [model4["width"], model4["height"]], false);
|
||||
const normalize = tf13.div(resize, [255]);
|
||||
tf13.dispose(resize);
|
||||
const resT = await model4.predict(normalize);
|
||||
|
@ -8584,7 +8582,7 @@ function max2d(inputs, minScore) {
|
|||
return [0, 0, newScore];
|
||||
});
|
||||
}
|
||||
async function predict7(image19, config3) {
|
||||
async function predict7(image20, config3) {
|
||||
var _a;
|
||||
if (skipped3 < (((_a = config3.body) == null ? void 0 : _a.skipFrames) || 0) && config3.skipFrame && Object.keys(keypoints).length > 0) {
|
||||
skipped3++;
|
||||
|
@ -8593,26 +8591,26 @@ async function predict7(image19, config3) {
|
|||
skipped3 = 0;
|
||||
return new Promise(async (resolve) => {
|
||||
var _a2;
|
||||
const tensor2 = tf14.tidy(() => {
|
||||
const tensor3 = tf14.tidy(() => {
|
||||
if (!model5.inputs[0].shape)
|
||||
return null;
|
||||
const resize = tf14.image.resizeBilinear(image19, [model5.inputs[0].shape[2], model5.inputs[0].shape[1]], false);
|
||||
const resize = tf14.image.resizeBilinear(image20, [model5.inputs[0].shape[2], model5.inputs[0].shape[1]], false);
|
||||
const enhance2 = tf14.mul(resize, 2);
|
||||
const norm = enhance2.sub(1);
|
||||
return norm;
|
||||
});
|
||||
let resT;
|
||||
if (config3.body.enabled)
|
||||
resT = await model5.predict(tensor2);
|
||||
tf14.dispose(tensor2);
|
||||
resT = await model5.predict(tensor3);
|
||||
tf14.dispose(tensor3);
|
||||
if (resT) {
|
||||
keypoints.length = 0;
|
||||
const squeeze7 = resT.squeeze();
|
||||
tf14.dispose(resT);
|
||||
const stack2 = squeeze7.unstack(2);
|
||||
const stack3 = squeeze7.unstack(2);
|
||||
tf14.dispose(squeeze7);
|
||||
for (let id = 0; id < stack2.length; id++) {
|
||||
const [x2, y2, partScore] = max2d(stack2[id], config3.body.minConfidence);
|
||||
for (let id = 0; id < stack3.length; id++) {
|
||||
const [x2, y2, partScore] = max2d(stack3[id], config3.body.minConfidence);
|
||||
if (score > (((_a2 = config3.body) == null ? void 0 : _a2.minConfidence) || 0)) {
|
||||
keypoints.push({
|
||||
score: Math.round(100 * partScore) / 100,
|
||||
|
@ -8622,13 +8620,13 @@ async function predict7(image19, config3) {
|
|||
y2 / model5.inputs[0].shape[1]
|
||||
],
|
||||
position: [
|
||||
Math.round(image19.shape[2] * x2 / model5.inputs[0].shape[2]),
|
||||
Math.round(image19.shape[1] * y2 / model5.inputs[0].shape[1])
|
||||
Math.round(image20.shape[2] * x2 / model5.inputs[0].shape[2]),
|
||||
Math.round(image20.shape[1] * y2 / model5.inputs[0].shape[1])
|
||||
]
|
||||
});
|
||||
}
|
||||
}
|
||||
stack2.forEach((s) => tf14.dispose(s));
|
||||
stack3.forEach((s) => tf14.dispose(s));
|
||||
}
|
||||
score = keypoints.reduce((prev, curr) => curr.score > prev ? curr.score : prev, 0);
|
||||
const x = keypoints.map((a) => a.position[0]);
|
||||
|
@ -8671,7 +8669,7 @@ async function load9(config3) {
|
|||
log("cached model:", model6["modelUrl"]);
|
||||
return model6;
|
||||
}
|
||||
async function parseSinglePose(res, config3, image19) {
|
||||
async function parseSinglePose(res, config3, image20) {
|
||||
keypoints2.length = 0;
|
||||
const kpt3 = res[0][0];
|
||||
for (let id = 0; id < kpt3.length; id++) {
|
||||
|
@ -8685,8 +8683,8 @@ async function parseSinglePose(res, config3, image19) {
|
|||
kpt3[id][0]
|
||||
],
|
||||
position: [
|
||||
Math.round((image19.shape[2] || 0) * kpt3[id][1]),
|
||||
Math.round((image19.shape[1] || 0) * kpt3[id][0])
|
||||
Math.round((image20.shape[2] || 0) * kpt3[id][1]),
|
||||
Math.round((image20.shape[1] || 0) * kpt3[id][0])
|
||||
]
|
||||
});
|
||||
}
|
||||
|
@ -8712,7 +8710,7 @@ async function parseSinglePose(res, config3, image19) {
|
|||
persons2.push({ id: 0, score: score2, box: box5, boxRaw: boxRaw2, keypoints: keypoints2 });
|
||||
return persons2;
|
||||
}
|
||||
async function parseMultiPose(res, config3, image19) {
|
||||
async function parseMultiPose(res, config3, image20) {
|
||||
const persons2 = [];
|
||||
for (let p = 0; p < res[0].length; p++) {
|
||||
const kpt3 = res[0][p];
|
||||
|
@ -8731,8 +8729,8 @@ async function parseMultiPose(res, config3, image19) {
|
|||
kpt3[3 * i + 0]
|
||||
],
|
||||
position: [
|
||||
Math.trunc(kpt3[3 * i + 1] * (image19.shape[2] || 0)),
|
||||
Math.trunc(kpt3[3 * i + 0] * (image19.shape[1] || 0))
|
||||
Math.trunc(kpt3[3 * i + 1] * (image20.shape[2] || 0)),
|
||||
Math.trunc(kpt3[3 * i + 0] * (image20.shape[1] || 0))
|
||||
]
|
||||
});
|
||||
}
|
||||
|
@ -8743,45 +8741,45 @@ async function parseMultiPose(res, config3, image19) {
|
|||
score: score2,
|
||||
boxRaw: boxRaw2,
|
||||
box: [
|
||||
Math.trunc(boxRaw2[0] * (image19.shape[2] || 0)),
|
||||
Math.trunc(boxRaw2[1] * (image19.shape[1] || 0)),
|
||||
Math.trunc(boxRaw2[2] * (image19.shape[2] || 0)),
|
||||
Math.trunc(boxRaw2[3] * (image19.shape[1] || 0))
|
||||
Math.trunc(boxRaw2[0] * (image20.shape[2] || 0)),
|
||||
Math.trunc(boxRaw2[1] * (image20.shape[1] || 0)),
|
||||
Math.trunc(boxRaw2[2] * (image20.shape[2] || 0)),
|
||||
Math.trunc(boxRaw2[3] * (image20.shape[1] || 0))
|
||||
],
|
||||
keypoints: keypoints2
|
||||
});
|
||||
}
|
||||
return persons2;
|
||||
}
|
||||
async function predict8(image19, config3) {
|
||||
async function predict8(image20, config3) {
|
||||
if (skipped4 < (config3.body.skipFrames || 0) && config3.skipFrame && Object.keys(keypoints2).length > 0) {
|
||||
skipped4++;
|
||||
return [{ id: 0, score: score2, box: box5, boxRaw: boxRaw2, keypoints: keypoints2 }];
|
||||
}
|
||||
skipped4 = 0;
|
||||
return new Promise(async (resolve) => {
|
||||
const tensor2 = tf15.tidy(() => {
|
||||
const tensor3 = tf15.tidy(() => {
|
||||
if (!model6.inputs[0].shape)
|
||||
return null;
|
||||
let inputSize = model6.inputs[0].shape[2];
|
||||
if (inputSize === -1)
|
||||
inputSize = 256;
|
||||
const resize = tf15.image.resizeBilinear(image19, [inputSize, inputSize], false);
|
||||
const resize = tf15.image.resizeBilinear(image20, [inputSize, inputSize], false);
|
||||
const cast4 = tf15.cast(resize, "int32");
|
||||
return cast4;
|
||||
});
|
||||
let resT;
|
||||
if (config3.body.enabled)
|
||||
resT = await model6.predict(tensor2);
|
||||
tf15.dispose(tensor2);
|
||||
resT = await model6.predict(tensor3);
|
||||
tf15.dispose(tensor3);
|
||||
if (!resT)
|
||||
resolve([]);
|
||||
const res = await resT.array();
|
||||
let persons2;
|
||||
if (resT.shape[2] === 17)
|
||||
persons2 = await parseSinglePose(res, config3, image19);
|
||||
persons2 = await parseSinglePose(res, config3, image20);
|
||||
else if (resT.shape[2] === 56)
|
||||
persons2 = await parseMultiPose(res, config3, image19);
|
||||
persons2 = await parseMultiPose(res, config3, image20);
|
||||
tf15.dispose(resT);
|
||||
resolve(persons2);
|
||||
});
|
||||
|
@ -8955,7 +8953,7 @@ async function process2(res, inputSize, outputShape, config3) {
|
|||
results = results.filter((_val, idx) => nmsIdx.includes(idx)).sort((a, b) => b.score - a.score);
|
||||
return results;
|
||||
}
|
||||
async function predict9(image19, config3) {
|
||||
async function predict9(image20, config3) {
|
||||
if (skipped5 < (config3.object.skipFrames || 0) && config3.skipFrame && last3.length > 0) {
|
||||
skipped5++;
|
||||
return last3;
|
||||
|
@ -8964,8 +8962,8 @@ async function predict9(image19, config3) {
|
|||
if (!env2.kernels.includes("mod") || !env2.kernels.includes("sparsetodense"))
|
||||
return last3;
|
||||
return new Promise(async (resolve) => {
|
||||
const outputSize = [image19.shape[2], image19.shape[1]];
|
||||
const resize = tf16.image.resizeBilinear(image19, [model7.inputSize, model7.inputSize], false);
|
||||
const outputSize = [image20.shape[2], image20.shape[1]];
|
||||
const resize = tf16.image.resizeBilinear(image20, [model7.inputSize, model7.inputSize], false);
|
||||
const norm = tf16.div(resize, 255);
|
||||
const transpose = norm.transpose([0, 3, 1, 2]);
|
||||
tf16.dispose(norm);
|
||||
|
@ -9011,6 +9009,7 @@ async function process3(res, inputSize, outputShape, config3) {
|
|||
tf17.dispose(squeezeT);
|
||||
const stackT = tf17.stack([arr[1], arr[0], arr[3], arr[2]], 1);
|
||||
const boxesT = tf17.squeeze(stackT);
|
||||
tf17.dispose(stackT);
|
||||
const scoresT = tf17.squeeze(arr[4]);
|
||||
const classesT = tf17.squeeze(arr[5]);
|
||||
arr.forEach((t) => tf17.dispose(t));
|
||||
|
@ -9220,8 +9219,8 @@ function GLImageFilter(params) {
|
|||
gl.uniform1f(_currentProgram.uniform.flipY, flipY ? -1 : 1);
|
||||
gl.drawArrays(gl.TRIANGLES, 0, 6);
|
||||
};
|
||||
this.apply = function(image19) {
|
||||
_resize(image19.width, image19.height);
|
||||
this.apply = function(image20) {
|
||||
_resize(image20.width, image20.height);
|
||||
_drawCount = 0;
|
||||
if (!_sourceTexture)
|
||||
_sourceTexture = gl.createTexture();
|
||||
|
@ -9230,7 +9229,7 @@ function GLImageFilter(params) {
|
|||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
|
||||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
|
||||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
|
||||
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, image19);
|
||||
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, image20);
|
||||
if (_filterChain.length === 0) {
|
||||
_draw();
|
||||
return _canvas;
|
||||
|
@ -9781,16 +9780,33 @@ var maxSize = 2048;
|
|||
var inCanvas;
|
||||
var outCanvas;
|
||||
var fx;
|
||||
function canvas(width, height) {
|
||||
let c;
|
||||
if (env2.browser) {
|
||||
if (typeof OffscreenCanvas !== "undefined") {
|
||||
c = new OffscreenCanvas(width, height);
|
||||
} else {
|
||||
c = document.createElement("canvas");
|
||||
c.width = width;
|
||||
c.height = height;
|
||||
}
|
||||
} else {
|
||||
c = typeof env2.Canvas !== "undefined" ? new env2.Canvas(width, height) : null;
|
||||
}
|
||||
if (!c)
|
||||
throw new Error("Human: Cannot create canvas");
|
||||
return c;
|
||||
}
|
||||
function process4(input, config3) {
|
||||
let tensor2;
|
||||
let tensor3;
|
||||
if (!input)
|
||||
throw new Error("Human: Input is missing");
|
||||
if (!(input instanceof tf18.Tensor) && !(typeof Image !== "undefined" && input instanceof Image) && !(typeof ImageData !== "undefined" && input instanceof ImageData) && !(typeof ImageBitmap !== "undefined" && input instanceof ImageBitmap) && !(typeof HTMLImageElement !== "undefined" && input instanceof HTMLImageElement) && !(typeof HTMLMediaElement !== "undefined" && input instanceof HTMLMediaElement) && !(typeof HTMLVideoElement !== "undefined" && input instanceof HTMLVideoElement) && !(typeof HTMLCanvasElement !== "undefined" && input instanceof HTMLCanvasElement) && !(typeof OffscreenCanvas !== "undefined" && input instanceof OffscreenCanvas)) {
|
||||
if (!(input instanceof tf18.Tensor) && !(typeof Image !== "undefined" && input instanceof Image) && !(typeof env2.Canvas !== "undefined" && input instanceof env2.Canvas) && !(typeof ImageData !== "undefined" && input instanceof ImageData) && !(typeof ImageBitmap !== "undefined" && input instanceof ImageBitmap) && !(typeof HTMLImageElement !== "undefined" && input instanceof HTMLImageElement) && !(typeof HTMLMediaElement !== "undefined" && input instanceof HTMLMediaElement) && !(typeof HTMLVideoElement !== "undefined" && input instanceof HTMLVideoElement) && !(typeof HTMLCanvasElement !== "undefined" && input instanceof HTMLCanvasElement) && !(typeof OffscreenCanvas !== "undefined" && input instanceof OffscreenCanvas)) {
|
||||
throw new Error("Human: Input type is not recognized");
|
||||
}
|
||||
if (input instanceof tf18.Tensor) {
|
||||
if (input.shape && input.shape.length === 4 && input.shape[0] === 1 && input.shape[3] === 3)
|
||||
tensor2 = tf18.clone(input);
|
||||
tensor3 = tf18.clone(input);
|
||||
else
|
||||
throw new Error(`Human: Input tensor shape must be [1, height, width, 3] and instead was ${input.shape}`);
|
||||
} else {
|
||||
|
@ -9818,15 +9834,10 @@ function process4(input, config3) {
|
|||
targetHeight = originalHeight * ((config3.filter.width || 0) / originalWidth);
|
||||
if (!targetWidth || !targetHeight)
|
||||
throw new Error("Human: Input cannot determine dimension");
|
||||
if (!inCanvas || (inCanvas == null ? void 0 : inCanvas.width) !== targetWidth || (inCanvas == null ? void 0 : inCanvas.height) !== targetHeight) {
|
||||
inCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement("canvas");
|
||||
if ((inCanvas == null ? void 0 : inCanvas.width) !== targetWidth)
|
||||
inCanvas.width = targetWidth;
|
||||
if ((inCanvas == null ? void 0 : inCanvas.height) !== targetHeight)
|
||||
inCanvas.height = targetHeight;
|
||||
}
|
||||
if (!inCanvas || (inCanvas == null ? void 0 : inCanvas.width) !== targetWidth || (inCanvas == null ? void 0 : inCanvas.height) !== targetHeight)
|
||||
inCanvas = canvas(targetWidth, targetHeight);
|
||||
const ctx = inCanvas.getContext("2d");
|
||||
if (input instanceof ImageData) {
|
||||
if (typeof ImageData !== "undefined" && input instanceof ImageData) {
|
||||
ctx.putImageData(input, 0, 0);
|
||||
} else {
|
||||
if (config3.filter.flip && typeof ctx.translate !== "undefined") {
|
||||
|
@ -9840,7 +9851,7 @@ function process4(input, config3) {
|
|||
}
|
||||
if (config3.filter.enabled) {
|
||||
if (!fx || !outCanvas || inCanvas.width !== outCanvas.width || (inCanvas == null ? void 0 : inCanvas.height) !== (outCanvas == null ? void 0 : outCanvas.height)) {
|
||||
outCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(inCanvas == null ? void 0 : inCanvas.width, inCanvas == null ? void 0 : inCanvas.height) : document.createElement("canvas");
|
||||
outCanvas = canvas(inCanvas == null ? void 0 : inCanvas.width, inCanvas == null ? void 0 : inCanvas.height);
|
||||
if ((outCanvas == null ? void 0 : outCanvas.width) !== (inCanvas == null ? void 0 : inCanvas.width))
|
||||
outCanvas.width = inCanvas == null ? void 0 : inCanvas.width;
|
||||
if ((outCanvas == null ? void 0 : outCanvas.height) !== (inCanvas == null ? void 0 : inCanvas.height))
|
||||
|
@ -9883,58 +9894,70 @@ function process4(input, config3) {
|
|||
if (fx)
|
||||
fx = null;
|
||||
}
|
||||
if (!tensor2) {
|
||||
if (!tensor3) {
|
||||
let pixels;
|
||||
if (outCanvas.data) {
|
||||
const shape = [outCanvas.height, outCanvas.width, 3];
|
||||
pixels = tf18.tensor3d(outCanvas.data, shape, "int32");
|
||||
} else if (outCanvas instanceof ImageData) {
|
||||
} else if (typeof ImageData !== "undefined" && outCanvas instanceof ImageData) {
|
||||
pixels = tf18.browser ? tf18.browser.fromPixels(outCanvas) : null;
|
||||
} else if (config3.backend === "webgl" || config3.backend === "humangl") {
|
||||
const tempCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement("canvas");
|
||||
const tempCanvas = canvas(targetWidth, targetHeight);
|
||||
tempCanvas.width = targetWidth;
|
||||
tempCanvas.height = targetHeight;
|
||||
const tempCtx = tempCanvas.getContext("2d");
|
||||
tempCtx == null ? void 0 : tempCtx.drawImage(outCanvas, 0, 0);
|
||||
pixels = tf18.browser ? tf18.browser.fromPixels(tempCanvas) : null;
|
||||
pixels = tf18.browser && env2.browser ? tf18.browser.fromPixels(tempCanvas) : null;
|
||||
} else {
|
||||
const tempCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement("canvas");
|
||||
const tempCanvas = canvas(targetWidth, targetHeight);
|
||||
tempCanvas.width = targetWidth;
|
||||
tempCanvas.height = targetHeight;
|
||||
const tempCtx = tempCanvas.getContext("2d");
|
||||
tempCtx == null ? void 0 : tempCtx.drawImage(outCanvas, 0, 0);
|
||||
const data = tempCtx == null ? void 0 : tempCtx.getImageData(0, 0, targetWidth, targetHeight);
|
||||
pixels = tf18.browser ? tf18.browser.fromPixels(data) : null;
|
||||
tempCtx.drawImage(outCanvas, 0, 0);
|
||||
const data = tempCtx.getImageData(0, 0, targetWidth, targetHeight);
|
||||
if (tf18.browser && env2.browser) {
|
||||
pixels = tf18.browser.fromPixels(data);
|
||||
} else {
|
||||
pixels = tf18.tidy(() => {
|
||||
const imageData = tf18.tensor(Array.from(data.data), [targetWidth, targetHeight, 4]);
|
||||
const channels = tf18.split(imageData, 4, 2);
|
||||
const rgb2 = tf18.stack([channels[0], channels[1], channels[2]], 2);
|
||||
const expand = tf18.reshape(rgb2, [imageData.shape[0], imageData.shape[1], 3]);
|
||||
return expand;
|
||||
});
|
||||
}
|
||||
}
|
||||
if (pixels) {
|
||||
const casted = tf18.cast(pixels, "float32");
|
||||
tensor2 = tf18.expandDims(casted, 0);
|
||||
tensor3 = tf18.expandDims(casted, 0);
|
||||
tf18.dispose(pixels);
|
||||
tf18.dispose(casted);
|
||||
} else {
|
||||
tensor3 = tf18.zeros([1, targetWidth, targetHeight, 3]);
|
||||
throw new Error("Human: Cannot create tensor from input");
|
||||
}
|
||||
}
|
||||
}
|
||||
const canvas2 = config3.filter.return ? outCanvas : null;
|
||||
return { tensor: tensor2, canvas: canvas2 };
|
||||
return { tensor: tensor3, canvas: config3.filter.return ? outCanvas : null };
|
||||
}
|
||||
var lastInputSum = 0;
|
||||
var lastCacheDiff = 1;
|
||||
async function skip(instance, input) {
|
||||
if (instance.config.cacheSensitivity === 0)
|
||||
async function skip(config3, input) {
|
||||
if (config3.cacheSensitivity === 0)
|
||||
return false;
|
||||
const resizeFact = 32;
|
||||
if (!input.shape[1] || !input.shape[2])
|
||||
return false;
|
||||
const reduced = tf18.image.resizeBilinear(input, [Math.trunc(input.shape[1] / resizeFact), Math.trunc(input.shape[2] / resizeFact)]);
|
||||
const reducedData = await reduced.data();
|
||||
tf18.dispose(reduced);
|
||||
let sum = 0;
|
||||
for (let i = 0; i < reducedData.length / 3; i++)
|
||||
sum += reducedData[3 * i + 2];
|
||||
reduced.dispose();
|
||||
const diff = 100 * (Math.max(sum, lastInputSum) / Math.min(sum, lastInputSum) - 1);
|
||||
lastInputSum = sum;
|
||||
const skipFrame = diff < Math.max(instance.config.cacheSensitivity, lastCacheDiff);
|
||||
lastCacheDiff = diff > 10 * instance.config.cacheSensitivity ? 0 : diff;
|
||||
const skipFrame = diff < Math.max(config3.cacheSensitivity, lastCacheDiff);
|
||||
lastCacheDiff = diff > 10 * config3.cacheSensitivity ? 0 : diff;
|
||||
return skipFrame;
|
||||
}
|
||||
|
||||
|
@ -9966,6 +9989,7 @@ async function predict11(input) {
|
|||
tf19.dispose(resizeInput);
|
||||
tf19.dispose(norm);
|
||||
const squeeze7 = tf19.squeeze(res, 0);
|
||||
tf19.dispose(res);
|
||||
let resizeOutput;
|
||||
if (squeeze7.shape[2] === 2) {
|
||||
const softmax = squeeze7.softmax();
|
||||
|
@ -9983,16 +10007,18 @@ async function predict11(input) {
|
|||
} else {
|
||||
resizeOutput = tf19.image.resizeBilinear(squeeze7, [width, height]);
|
||||
}
|
||||
if (typeof document === "undefined")
|
||||
return resizeOutput.data();
|
||||
tf19.dispose(squeeze7);
|
||||
if (env2.node) {
|
||||
const data = await resizeOutput.data();
|
||||
tf19.dispose(resizeOutput);
|
||||
return data;
|
||||
}
|
||||
const overlay = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(width, height) : document.createElement("canvas");
|
||||
overlay.width = width;
|
||||
overlay.height = height;
|
||||
if (tf19.browser)
|
||||
await tf19.browser.toPixels(resizeOutput, overlay);
|
||||
tf19.dispose(resizeOutput);
|
||||
tf19.dispose(squeeze7);
|
||||
tf19.dispose(res);
|
||||
const alphaCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(width, height) : document.createElement("canvas");
|
||||
alphaCanvas.width = width;
|
||||
alphaCanvas.height = height;
|
||||
|
@ -10297,7 +10323,7 @@ var detectFace = async (parent, input) => {
|
|||
delete faces[i].annotations.rightEyeIris;
|
||||
}
|
||||
const irisSize = ((_e = faces[i].annotations) == null ? void 0 : _e.leftEyeIris) && ((_f = faces[i].annotations) == null ? void 0 : _f.rightEyeIris) ? Math.max(Math.abs(faces[i].annotations.leftEyeIris[3][0] - faces[i].annotations.leftEyeIris[1][0]), Math.abs(faces[i].annotations.rightEyeIris[4][1] - faces[i].annotations.rightEyeIris[2][1])) / input.shape[2] : 0;
|
||||
const tensor2 = parent.config.face.detector.return ? tf20.squeeze(faces[i].tensor) : null;
|
||||
const tensor3 = parent.config.face.detector.return ? tf20.squeeze(faces[i].tensor) : null;
|
||||
tf20.dispose(faces[i].tensor);
|
||||
if (faces[i].tensor)
|
||||
delete faces[i].tensor;
|
||||
|
@ -10311,7 +10337,7 @@ var detectFace = async (parent, input) => {
|
|||
emotion: emotionRes,
|
||||
iris: irisSize !== 0 ? Math.trunc(500 / irisSize / 11.7) / 100 : 0,
|
||||
rotation,
|
||||
tensor: tensor2
|
||||
tensor: tensor3
|
||||
});
|
||||
parent.analyze("End Face");
|
||||
}
|
||||
|
@ -10446,7 +10472,7 @@ var draw_exports = {};
|
|||
__export(draw_exports, {
|
||||
all: () => all,
|
||||
body: () => body2,
|
||||
canvas: () => canvas,
|
||||
canvas: () => canvas2,
|
||||
face: () => face2,
|
||||
gesture: () => gesture,
|
||||
hand: () => hand2,
|
||||
|
@ -10919,7 +10945,7 @@ async function person(inCanvas2, result, drawOptions) {
|
|||
}
|
||||
}
|
||||
}
|
||||
async function canvas(inCanvas2, outCanvas2) {
|
||||
async function canvas2(inCanvas2, outCanvas2) {
|
||||
if (!inCanvas2 || !outCanvas2)
|
||||
return;
|
||||
getCanvasContext(outCanvas2);
|
||||
|
@ -12002,29 +12028,34 @@ async function warmupBitmap(instance) {
|
|||
async function warmupCanvas(instance) {
|
||||
return new Promise((resolve) => {
|
||||
let src;
|
||||
let size = 0;
|
||||
switch (instance.config.warmup) {
|
||||
case "face":
|
||||
size = 256;
|
||||
src = "data:image/jpeg;base64," + face3;
|
||||
break;
|
||||
case "full":
|
||||
case "body":
|
||||
size = 1200;
|
||||
src = "data:image/jpeg;base64," + body3;
|
||||
break;
|
||||
default:
|
||||
src = null;
|
||||
}
|
||||
const img = new Image();
|
||||
let img;
|
||||
if (typeof Image !== "undefined")
|
||||
img = new Image();
|
||||
else if (env2.Image)
|
||||
img = new env2.Image();
|
||||
img.onload = async () => {
|
||||
const canvas2 = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(size, size) : document.createElement("canvas");
|
||||
canvas2.width = img.naturalWidth;
|
||||
canvas2.height = img.naturalHeight;
|
||||
const ctx = canvas2.getContext("2d");
|
||||
ctx == null ? void 0 : ctx.drawImage(img, 0, 0);
|
||||
const res = await instance.detect(canvas2, instance.config);
|
||||
resolve(res);
|
||||
const canvas3 = canvas(img.naturalWidth, img.naturalHeight);
|
||||
if (!canvas3) {
|
||||
log("Warmup: Canvas not found");
|
||||
resolve({});
|
||||
} else {
|
||||
const ctx = canvas3.getContext("2d");
|
||||
ctx.drawImage(img, 0, 0);
|
||||
const tensor3 = await instance.image(canvas3);
|
||||
const res = await instance.detect(tensor3.tensor, instance.config);
|
||||
resolve(res);
|
||||
}
|
||||
};
|
||||
if (src)
|
||||
img.src = src;
|
||||
|
@ -12063,7 +12094,7 @@ async function warmup(instance, userConfig) {
|
|||
let res;
|
||||
if (typeof createImageBitmap === "function")
|
||||
res = await warmupBitmap(instance);
|
||||
else if (typeof Image !== "undefined")
|
||||
else if (typeof Image !== "undefined" || env2.Canvas !== void 0)
|
||||
res = await warmupCanvas(instance);
|
||||
else
|
||||
res = await warmupNode(instance);
|
||||
|
@ -12155,7 +12186,7 @@ var Human = class {
|
|||
return similarity(embedding1, embedding2);
|
||||
}
|
||||
segmentation(input, background) {
|
||||
return process5(input, background, this.config);
|
||||
return input ? process5(input, background, this.config) : null;
|
||||
}
|
||||
enhance(input) {
|
||||
return enhance(input);
|
||||
|
@ -12213,32 +12244,32 @@ var Human = class {
|
|||
await check(this);
|
||||
await this.load();
|
||||
timeStamp = now();
|
||||
this.process = process4(input, this.config);
|
||||
const inputTensor = this.process.tensor;
|
||||
let img = process4(input, this.config);
|
||||
this.process = img;
|
||||
this.performance.image = Math.trunc(now() - timeStamp);
|
||||
this.analyze("Get Image:");
|
||||
if (this.config.segmentation.enabled && this.process && inputTensor) {
|
||||
if (this.config.segmentation.enabled && this.process && img.tensor && img.canvas) {
|
||||
this.analyze("Start Segmentation:");
|
||||
this.state = "run:segmentation";
|
||||
timeStamp = now();
|
||||
await predict11(this.process);
|
||||
await predict11(img);
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0)
|
||||
this.performance.segmentation = elapsedTime;
|
||||
if (this.process.canvas) {
|
||||
tf24.dispose(inputTensor);
|
||||
this.process = process4(this.process.canvas, this.config);
|
||||
if (img.canvas) {
|
||||
tf24.dispose(img.tensor);
|
||||
img = process4(img.canvas, this.config);
|
||||
}
|
||||
this.analyze("End Segmentation:");
|
||||
}
|
||||
if (!this.process || !inputTensor) {
|
||||
if (!img.tensor) {
|
||||
log("could not convert input to tensor");
|
||||
resolve({ error: "could not convert input to tensor" });
|
||||
return;
|
||||
}
|
||||
this.emit("image");
|
||||
timeStamp = now();
|
||||
this.config.skipFrame = await skip(this, inputTensor);
|
||||
this.config.skipFrame = await skip(this.config, img.tensor);
|
||||
if (!this.performance.frames)
|
||||
this.performance.frames = 0;
|
||||
if (!this.performance.cached)
|
||||
|
@ -12253,13 +12284,13 @@ var Human = class {
|
|||
let handRes = [];
|
||||
let objectRes = [];
|
||||
if (this.config.async) {
|
||||
faceRes = this.config.face.enabled ? detectFace(this, inputTensor) : [];
|
||||
faceRes = this.config.face.enabled ? detectFace(this, img.tensor) : [];
|
||||
if (this.performance.face)
|
||||
delete this.performance.face;
|
||||
} else {
|
||||
this.state = "run:face";
|
||||
timeStamp = now();
|
||||
faceRes = this.config.face.enabled ? await detectFace(this, inputTensor) : [];
|
||||
faceRes = this.config.face.enabled ? await detectFace(this, img.tensor) : [];
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0)
|
||||
this.performance.face = elapsedTime;
|
||||
|
@ -12267,26 +12298,26 @@ var Human = class {
|
|||
this.analyze("Start Body:");
|
||||
if (this.config.async) {
|
||||
if ((_a = this.config.body.modelPath) == null ? void 0 : _a.includes("posenet"))
|
||||
bodyRes = this.config.body.enabled ? predict4(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? predict4(img.tensor, this.config) : [];
|
||||
else if ((_b = this.config.body.modelPath) == null ? void 0 : _b.includes("blazepose"))
|
||||
bodyRes = this.config.body.enabled ? predict6(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? predict6(img.tensor, this.config) : [];
|
||||
else if ((_c = this.config.body.modelPath) == null ? void 0 : _c.includes("efficientpose"))
|
||||
bodyRes = this.config.body.enabled ? predict7(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? predict7(img.tensor, this.config) : [];
|
||||
else if ((_d = this.config.body.modelPath) == null ? void 0 : _d.includes("movenet"))
|
||||
bodyRes = this.config.body.enabled ? predict8(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? predict8(img.tensor, this.config) : [];
|
||||
if (this.performance.body)
|
||||
delete this.performance.body;
|
||||
} else {
|
||||
this.state = "run:body";
|
||||
timeStamp = now();
|
||||
if ((_e = this.config.body.modelPath) == null ? void 0 : _e.includes("posenet"))
|
||||
bodyRes = this.config.body.enabled ? await predict4(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? await predict4(img.tensor, this.config) : [];
|
||||
else if ((_f = this.config.body.modelPath) == null ? void 0 : _f.includes("blazepose"))
|
||||
bodyRes = this.config.body.enabled ? await predict6(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? await predict6(img.tensor, this.config) : [];
|
||||
else if ((_g = this.config.body.modelPath) == null ? void 0 : _g.includes("efficientpose"))
|
||||
bodyRes = this.config.body.enabled ? await predict7(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? await predict7(img.tensor, this.config) : [];
|
||||
else if ((_h = this.config.body.modelPath) == null ? void 0 : _h.includes("movenet"))
|
||||
bodyRes = this.config.body.enabled ? await predict8(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? await predict8(img.tensor, this.config) : [];
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0)
|
||||
this.performance.body = elapsedTime;
|
||||
|
@ -12294,13 +12325,13 @@ var Human = class {
|
|||
this.analyze("End Body:");
|
||||
this.analyze("Start Hand:");
|
||||
if (this.config.async) {
|
||||
handRes = this.config.hand.enabled ? predict5(inputTensor, this.config) : [];
|
||||
handRes = this.config.hand.enabled ? predict5(img.tensor, this.config) : [];
|
||||
if (this.performance.hand)
|
||||
delete this.performance.hand;
|
||||
} else {
|
||||
this.state = "run:hand";
|
||||
timeStamp = now();
|
||||
handRes = this.config.hand.enabled ? await predict5(inputTensor, this.config) : [];
|
||||
handRes = this.config.hand.enabled ? await predict5(img.tensor, this.config) : [];
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0)
|
||||
this.performance.hand = elapsedTime;
|
||||
|
@ -12309,18 +12340,18 @@ var Human = class {
|
|||
this.analyze("Start Object:");
|
||||
if (this.config.async) {
|
||||
if ((_i = this.config.object.modelPath) == null ? void 0 : _i.includes("nanodet"))
|
||||
objectRes = this.config.object.enabled ? predict9(inputTensor, this.config) : [];
|
||||
objectRes = this.config.object.enabled ? predict9(img.tensor, this.config) : [];
|
||||
else if ((_j = this.config.object.modelPath) == null ? void 0 : _j.includes("centernet"))
|
||||
objectRes = this.config.object.enabled ? predict10(inputTensor, this.config) : [];
|
||||
objectRes = this.config.object.enabled ? predict10(img.tensor, this.config) : [];
|
||||
if (this.performance.object)
|
||||
delete this.performance.object;
|
||||
} else {
|
||||
this.state = "run:object";
|
||||
timeStamp = now();
|
||||
if ((_k = this.config.object.modelPath) == null ? void 0 : _k.includes("nanodet"))
|
||||
objectRes = this.config.object.enabled ? await predict9(inputTensor, this.config) : [];
|
||||
objectRes = this.config.object.enabled ? await predict9(img.tensor, this.config) : [];
|
||||
else if ((_l = this.config.object.modelPath) == null ? void 0 : _l.includes("centernet"))
|
||||
objectRes = this.config.object.enabled ? await predict10(inputTensor, this.config) : [];
|
||||
objectRes = this.config.object.enabled ? await predict10(img.tensor, this.config) : [];
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0)
|
||||
this.performance.object = elapsedTime;
|
||||
|
@ -12353,7 +12384,7 @@ var Human = class {
|
|||
return join2(faceRes, bodyRes, handRes, gestureRes, shape);
|
||||
}
|
||||
};
|
||||
tf24.dispose(inputTensor);
|
||||
tf24.dispose(img.tensor);
|
||||
this.emit("detect");
|
||||
resolve(this.result);
|
||||
});
|
||||
|
|
|
@ -256,16 +256,16 @@ function getBoxCenter(box6) {
|
|||
box6.startPoint[1] + (box6.endPoint[1] - box6.startPoint[1]) / 2
|
||||
];
|
||||
}
|
||||
function cutBoxFromImageAndResize(box6, image19, cropSize) {
|
||||
const h = image19.shape[1];
|
||||
const w = image19.shape[2];
|
||||
function cutBoxFromImageAndResize(box6, image20, cropSize) {
|
||||
const h = image20.shape[1];
|
||||
const w = image20.shape[2];
|
||||
const boxes = [[
|
||||
box6.startPoint[1] / h,
|
||||
box6.startPoint[0] / w,
|
||||
box6.endPoint[1] / h,
|
||||
box6.endPoint[0] / w
|
||||
]];
|
||||
return tf.image.cropAndResize(image19, boxes, [0], cropSize);
|
||||
return tf.image.cropAndResize(image20, boxes, [0], cropSize);
|
||||
}
|
||||
function enlargeBox(box6, factor = 1.5) {
|
||||
const center = getBoxCenter(box6);
|
||||
|
@ -409,7 +409,7 @@ var BlazeFaceModel = class {
|
|||
async getBoundingBoxes(inputImage, userConfig) {
|
||||
var _a, _b, _c, _d;
|
||||
if (!inputImage || inputImage["isDisposedInternal"] || inputImage.shape.length !== 4 || inputImage.shape[1] < 1 || inputImage.shape[2] < 1)
|
||||
return null;
|
||||
return { boxes: [] };
|
||||
const [batch, boxes, scores] = tf2.tidy(() => {
|
||||
const resizedImage = tf2.image.resizeBilinear(inputImage, [this.inputSize, this.inputSize]);
|
||||
const normalizedImage = tf2.sub(tf2.div(resizedImage, 127.5), 0.5);
|
||||
|
@ -439,11 +439,9 @@ var BlazeFaceModel = class {
|
|||
const confidence = scoresData[nms[i]];
|
||||
if (confidence > (((_d = this.config.face.detector) == null ? void 0 : _d.minConfidence) || 0)) {
|
||||
const boundingBox = tf2.slice(boxes, [nms[i], 0], [1, -1]);
|
||||
const localBox = createBox(boundingBox);
|
||||
tf2.dispose(boundingBox);
|
||||
const anchor = this.anchorsData[nms[i]];
|
||||
const landmarks = tf2.tidy(() => tf2.reshape(tf2.squeeze(tf2.slice(batch, [nms[i], keypointsCount - 1], [1, -1])), [keypointsCount, -1]));
|
||||
annotatedBoxes.push({ box: localBox, landmarks, anchor, confidence });
|
||||
annotatedBoxes.push({ box: createBox(boundingBox), landmarks, anchor: this.anchorsData[nms[i]], confidence });
|
||||
tf2.dispose(boundingBox);
|
||||
}
|
||||
}
|
||||
tf2.dispose(batch);
|
||||
|
@ -3935,12 +3933,12 @@ var Pipeline = class {
|
|||
const angle = computeRotation(box6.landmarks[indexOfMouth], box6.landmarks[indexOfForehead]);
|
||||
const faceCenter = getBoxCenter({ startPoint: box6.startPoint, endPoint: box6.endPoint });
|
||||
const faceCenterNormalized = [faceCenter[0] / input.shape[2], faceCenter[1] / input.shape[1]];
|
||||
const rotatedImage = tf4.image.rotateWithOffset(input, angle, 0, faceCenterNormalized);
|
||||
const rotated = tf4.image.rotateWithOffset(input, angle, 0, faceCenterNormalized);
|
||||
const rotationMatrix = buildRotationMatrix(-angle, faceCenter);
|
||||
const cut = config3.face.mesh.enabled ? cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, rotatedImage, [this.meshSize, this.meshSize]) : cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, rotatedImage, [this.boxSize, this.boxSize]);
|
||||
const cut = config3.face.mesh.enabled ? cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, rotated, [this.meshSize, this.meshSize]) : cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, rotated, [this.boxSize, this.boxSize]);
|
||||
const face5 = tf4.div(cut, 255);
|
||||
tf4.dispose(cut);
|
||||
tf4.dispose(rotatedImage);
|
||||
tf4.dispose(rotated);
|
||||
return [angle, rotationMatrix, face5];
|
||||
}
|
||||
async augmentIris(rawCoords, face5) {
|
||||
|
@ -4024,11 +4022,11 @@ var Pipeline = class {
|
|||
[angle, rotationMatrix, face5] = this.correctFaceRotation(config3, box6, input);
|
||||
} else {
|
||||
rotationMatrix = IDENTITY_MATRIX;
|
||||
const clonedImage = input.clone();
|
||||
const cut = config3.face.mesh.enabled ? cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, clonedImage, [this.meshSize, this.meshSize]) : cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, clonedImage, [this.boxSize, this.boxSize]);
|
||||
const cloned = input.clone();
|
||||
const cut = config3.face.mesh.enabled ? cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, cloned, [this.meshSize, this.meshSize]) : cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, cloned, [this.boxSize, this.boxSize]);
|
||||
face5 = tf4.div(cut, 255);
|
||||
tf4.dispose(cut);
|
||||
tf4.dispose(clonedImage);
|
||||
tf4.dispose(cloned);
|
||||
}
|
||||
if (!config3.face.mesh.enabled) {
|
||||
results.push({
|
||||
|
@ -4057,6 +4055,7 @@ var Pipeline = class {
|
|||
const mesh = this.transformRawCoords(rawCoords, box6, angle, rotationMatrix);
|
||||
box6 = { ...enlargeBox(calculateLandmarksBoundingBox(mesh), 1.5), confidence: box6.confidence };
|
||||
if (config3.face.detector.rotation && config3.face.mesh.enabled && config3.face.description.enabled && env2.kernels.includes("rotatewithoffset")) {
|
||||
tf4.dispose(face5);
|
||||
[angle, rotationMatrix, face5] = this.correctFaceRotation(config3, box6, input);
|
||||
}
|
||||
results.push({
|
||||
|
@ -4123,8 +4122,6 @@ async function predict(input, config3) {
|
|||
annotations: annotations3,
|
||||
tensor: prediction.image
|
||||
});
|
||||
if (prediction.coords)
|
||||
tf5.dispose(prediction.coords);
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
@ -4205,20 +4202,20 @@ function match(embedding, db, threshold = 0) {
|
|||
return best;
|
||||
}
|
||||
function enhance(input) {
|
||||
const image19 = tf6.tidy(() => {
|
||||
const tensor2 = input.image || input.tensor || input;
|
||||
if (!(tensor2 instanceof tf6.Tensor))
|
||||
const image20 = tf6.tidy(() => {
|
||||
const tensor3 = input.image || input.tensor || input;
|
||||
if (!(tensor3 instanceof tf6.Tensor))
|
||||
return null;
|
||||
const box6 = [[0.05, 0.15, 0.85, 0.85]];
|
||||
if (!model.inputs[0].shape)
|
||||
return null;
|
||||
const crop = tensor2.shape.length === 3 ? tf6.image.cropAndResize(tf6.expandDims(tensor2, 0), box6, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]) : tf6.image.cropAndResize(tensor2, box6, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]);
|
||||
const crop = tensor3.shape.length === 3 ? tf6.image.cropAndResize(tf6.expandDims(tensor3, 0), box6, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]) : tf6.image.cropAndResize(tensor3, box6, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]);
|
||||
const norm = tf6.mul(crop, 255);
|
||||
return norm;
|
||||
});
|
||||
return image19;
|
||||
return image20;
|
||||
}
|
||||
async function predict2(image19, config3, idx, count2) {
|
||||
async function predict2(image20, config3, idx, count2) {
|
||||
var _a, _b, _c;
|
||||
if (!model)
|
||||
return null;
|
||||
|
@ -4229,7 +4226,7 @@ async function predict2(image19, config3, idx, count2) {
|
|||
skipped = 0;
|
||||
return new Promise(async (resolve) => {
|
||||
var _a2, _b2;
|
||||
const enhanced = enhance(image19);
|
||||
const enhanced = enhance(image20);
|
||||
let resT;
|
||||
const obj = {
|
||||
age: 0,
|
||||
|
@ -4249,6 +4246,7 @@ async function predict2(image19, config3, idx, count2) {
|
|||
}
|
||||
const argmax = tf6.argMax(resT.find((t) => t.shape[1] === 100), 1);
|
||||
const age = (await argmax.data())[0];
|
||||
tf6.dispose(argmax);
|
||||
const all2 = await resT.find((t) => t.shape[1] === 100).data();
|
||||
obj.age = Math.round(all2[age - 1] > all2[age + 1] ? 10 * age - 100 * all2[age - 1] : 10 * age + 100 * all2[age + 1]) / 10;
|
||||
const desc = resT.find((t) => t.shape[1] === 1024);
|
||||
|
@ -4282,7 +4280,7 @@ async function load4(config3) {
|
|||
log("cached model:", model2.modelUrl);
|
||||
return model2;
|
||||
}
|
||||
async function predict3(image19, config3, idx, count2) {
|
||||
async function predict3(image20, config3, idx, count2) {
|
||||
var _a;
|
||||
if (!model2)
|
||||
return null;
|
||||
|
@ -4293,7 +4291,7 @@ async function predict3(image19, config3, idx, count2) {
|
|||
skipped2 = 0;
|
||||
return new Promise(async (resolve) => {
|
||||
var _a2, _b;
|
||||
const resize = tf7.image.resizeBilinear(image19, [model2.inputs[0].shape[2], model2.inputs[0].shape[1]], false);
|
||||
const resize = tf7.image.resizeBilinear(image20, [model2.inputs[0].shape[2], model2.inputs[0].shape[1]], false);
|
||||
const [red, green, blue] = tf7.split(resize, 3, 3);
|
||||
tf7.dispose(resize);
|
||||
const redNorm = tf7.mul(red, rgb[0]);
|
||||
|
@ -4649,7 +4647,7 @@ async function predict4(input, config3) {
|
|||
results3d[1] = results3d[1].sigmoid();
|
||||
return results3d;
|
||||
});
|
||||
const buffers = await Promise.all(res.map((tensor2) => tensor2.buffer()));
|
||||
const buffers = await Promise.all(res.map((tensor3) => tensor3.buffer()));
|
||||
for (const t of res)
|
||||
tf8.dispose(t);
|
||||
const decoded = await decode(buffers[0], buffers[1], buffers[2], buffers[3], config3.body.maxDetected, config3.body.minConfidence);
|
||||
|
@ -4690,16 +4688,16 @@ function getBoxCenter2(box6) {
|
|||
box6.startPoint[1] + (box6.endPoint[1] - box6.startPoint[1]) / 2
|
||||
];
|
||||
}
|
||||
function cutBoxFromImageAndResize2(box6, image19, cropSize) {
|
||||
const h = image19.shape[1];
|
||||
const w = image19.shape[2];
|
||||
function cutBoxFromImageAndResize2(box6, image20, cropSize) {
|
||||
const h = image20.shape[1];
|
||||
const w = image20.shape[2];
|
||||
const boxes = [[
|
||||
box6.startPoint[1] / h,
|
||||
box6.startPoint[0] / w,
|
||||
box6.endPoint[1] / h,
|
||||
box6.endPoint[0] / w
|
||||
]];
|
||||
return tf9.image.cropAndResize(image19, boxes, [0], cropSize);
|
||||
return tf9.image.cropAndResize(image20, boxes, [0], cropSize);
|
||||
}
|
||||
function scaleBoxCoordinates2(box6, factor) {
|
||||
const startPoint = [box6.startPoint[0] * factor[0], box6.startPoint[1] * factor[1]];
|
||||
|
@ -7719,16 +7717,16 @@ var HandDetector = class {
|
|||
const palmLandmarks = tf10.tidy(() => tf10.reshape(this.normalizeLandmarks(tf10.slice(t.predictions, [index, 5], [1, 14]), index), [-1, 2]));
|
||||
hands.push({ box: palmBox, palmLandmarks, confidence: scores[index] });
|
||||
}
|
||||
for (const tensor2 of Object.keys(t))
|
||||
tf10.dispose(t[tensor2]);
|
||||
for (const tensor3 of Object.keys(t))
|
||||
tf10.dispose(t[tensor3]);
|
||||
return hands;
|
||||
}
|
||||
async estimateHandBounds(input, config3) {
|
||||
const inputHeight = input.shape[1];
|
||||
const inputWidth = input.shape[2];
|
||||
const image19 = tf10.tidy(() => tf10.sub(tf10.div(tf10.image.resizeBilinear(input, [this.inputSize, this.inputSize]), 127.5), 1));
|
||||
const predictions = await this.getBoxes(image19, config3);
|
||||
tf10.dispose(image19);
|
||||
const image20 = tf10.tidy(() => tf10.sub(tf10.div(tf10.image.resizeBilinear(input, [this.inputSize, this.inputSize]), 127.5), 1));
|
||||
const predictions = await this.getBoxes(image20, config3);
|
||||
tf10.dispose(image20);
|
||||
const hands = [];
|
||||
if (!predictions || predictions.length === 0)
|
||||
return hands;
|
||||
|
@ -7873,11 +7871,11 @@ var HandPipeline = class {
|
|||
Math.trunc(coord[2])
|
||||
]);
|
||||
}
|
||||
async estimateHands(image19, config3) {
|
||||
async estimateHands(image20, config3) {
|
||||
let useFreshBox = false;
|
||||
let boxes;
|
||||
if (this.skipped === 0 || this.skipped > config3.hand.skipFrames || !config3.hand.landmarks || !config3.skipFrame) {
|
||||
boxes = await this.handDetector.estimateHandBounds(image19, config3);
|
||||
boxes = await this.handDetector.estimateHandBounds(image20, config3);
|
||||
this.skipped = 0;
|
||||
}
|
||||
if (config3.skipFrame)
|
||||
|
@ -7896,8 +7894,8 @@ var HandPipeline = class {
|
|||
if (config3.hand.landmarks) {
|
||||
const angle = config3.hand.rotation ? computeRotation2(currentBox.palmLandmarks[palmLandmarksPalmBase], currentBox.palmLandmarks[palmLandmarksMiddleFingerBase]) : 0;
|
||||
const palmCenter = getBoxCenter2(currentBox);
|
||||
const palmCenterNormalized = [palmCenter[0] / image19.shape[2], palmCenter[1] / image19.shape[1]];
|
||||
const rotatedImage = config3.hand.rotation && env2.kernels.includes("rotatewithoffset") ? tf11.image.rotateWithOffset(image19, angle, 0, palmCenterNormalized) : image19.clone();
|
||||
const palmCenterNormalized = [palmCenter[0] / image20.shape[2], palmCenter[1] / image20.shape[1]];
|
||||
const rotatedImage = config3.hand.rotation && env2.kernels.includes("rotatewithoffset") ? tf11.image.rotateWithOffset(image20, angle, 0, palmCenterNormalized) : image20.clone();
|
||||
const rotationMatrix = buildRotationMatrix2(-angle, palmCenter);
|
||||
const newBox = useFreshBox ? this.getBoxForPalmLandmarks(currentBox.palmLandmarks, rotationMatrix) : currentBox;
|
||||
const croppedInput = cutBoxFromImageAndResize2(newBox, rotatedImage, [this.inputSize, this.inputSize]);
|
||||
|
@ -8502,13 +8500,13 @@ async function load7(config3) {
|
|||
log("cached model:", model4["modelUrl"]);
|
||||
return model4;
|
||||
}
|
||||
async function predict6(image19, config3) {
|
||||
async function predict6(image20, config3) {
|
||||
if (!model4)
|
||||
return [];
|
||||
if (!config3.body.enabled)
|
||||
return [];
|
||||
const imgSize = { width: image19.shape[2] || 0, height: image19.shape[1] || 0 };
|
||||
const resize = tf13.image.resizeBilinear(image19, [model4["width"], model4["height"]], false);
|
||||
const imgSize = { width: image20.shape[2] || 0, height: image20.shape[1] || 0 };
|
||||
const resize = tf13.image.resizeBilinear(image20, [model4["width"], model4["height"]], false);
|
||||
const normalize = tf13.div(resize, [255]);
|
||||
tf13.dispose(resize);
|
||||
const resT = await model4.predict(normalize);
|
||||
|
@ -8585,7 +8583,7 @@ function max2d(inputs, minScore) {
|
|||
return [0, 0, newScore];
|
||||
});
|
||||
}
|
||||
async function predict7(image19, config3) {
|
||||
async function predict7(image20, config3) {
|
||||
var _a;
|
||||
if (skipped3 < (((_a = config3.body) == null ? void 0 : _a.skipFrames) || 0) && config3.skipFrame && Object.keys(keypoints).length > 0) {
|
||||
skipped3++;
|
||||
|
@ -8594,26 +8592,26 @@ async function predict7(image19, config3) {
|
|||
skipped3 = 0;
|
||||
return new Promise(async (resolve) => {
|
||||
var _a2;
|
||||
const tensor2 = tf14.tidy(() => {
|
||||
const tensor3 = tf14.tidy(() => {
|
||||
if (!model5.inputs[0].shape)
|
||||
return null;
|
||||
const resize = tf14.image.resizeBilinear(image19, [model5.inputs[0].shape[2], model5.inputs[0].shape[1]], false);
|
||||
const resize = tf14.image.resizeBilinear(image20, [model5.inputs[0].shape[2], model5.inputs[0].shape[1]], false);
|
||||
const enhance2 = tf14.mul(resize, 2);
|
||||
const norm = enhance2.sub(1);
|
||||
return norm;
|
||||
});
|
||||
let resT;
|
||||
if (config3.body.enabled)
|
||||
resT = await model5.predict(tensor2);
|
||||
tf14.dispose(tensor2);
|
||||
resT = await model5.predict(tensor3);
|
||||
tf14.dispose(tensor3);
|
||||
if (resT) {
|
||||
keypoints.length = 0;
|
||||
const squeeze7 = resT.squeeze();
|
||||
tf14.dispose(resT);
|
||||
const stack2 = squeeze7.unstack(2);
|
||||
const stack3 = squeeze7.unstack(2);
|
||||
tf14.dispose(squeeze7);
|
||||
for (let id = 0; id < stack2.length; id++) {
|
||||
const [x2, y2, partScore] = max2d(stack2[id], config3.body.minConfidence);
|
||||
for (let id = 0; id < stack3.length; id++) {
|
||||
const [x2, y2, partScore] = max2d(stack3[id], config3.body.minConfidence);
|
||||
if (score > (((_a2 = config3.body) == null ? void 0 : _a2.minConfidence) || 0)) {
|
||||
keypoints.push({
|
||||
score: Math.round(100 * partScore) / 100,
|
||||
|
@ -8623,13 +8621,13 @@ async function predict7(image19, config3) {
|
|||
y2 / model5.inputs[0].shape[1]
|
||||
],
|
||||
position: [
|
||||
Math.round(image19.shape[2] * x2 / model5.inputs[0].shape[2]),
|
||||
Math.round(image19.shape[1] * y2 / model5.inputs[0].shape[1])
|
||||
Math.round(image20.shape[2] * x2 / model5.inputs[0].shape[2]),
|
||||
Math.round(image20.shape[1] * y2 / model5.inputs[0].shape[1])
|
||||
]
|
||||
});
|
||||
}
|
||||
}
|
||||
stack2.forEach((s) => tf14.dispose(s));
|
||||
stack3.forEach((s) => tf14.dispose(s));
|
||||
}
|
||||
score = keypoints.reduce((prev, curr) => curr.score > prev ? curr.score : prev, 0);
|
||||
const x = keypoints.map((a) => a.position[0]);
|
||||
|
@ -8672,7 +8670,7 @@ async function load9(config3) {
|
|||
log("cached model:", model6["modelUrl"]);
|
||||
return model6;
|
||||
}
|
||||
async function parseSinglePose(res, config3, image19) {
|
||||
async function parseSinglePose(res, config3, image20) {
|
||||
keypoints2.length = 0;
|
||||
const kpt3 = res[0][0];
|
||||
for (let id = 0; id < kpt3.length; id++) {
|
||||
|
@ -8686,8 +8684,8 @@ async function parseSinglePose(res, config3, image19) {
|
|||
kpt3[id][0]
|
||||
],
|
||||
position: [
|
||||
Math.round((image19.shape[2] || 0) * kpt3[id][1]),
|
||||
Math.round((image19.shape[1] || 0) * kpt3[id][0])
|
||||
Math.round((image20.shape[2] || 0) * kpt3[id][1]),
|
||||
Math.round((image20.shape[1] || 0) * kpt3[id][0])
|
||||
]
|
||||
});
|
||||
}
|
||||
|
@ -8713,7 +8711,7 @@ async function parseSinglePose(res, config3, image19) {
|
|||
persons2.push({ id: 0, score: score2, box: box5, boxRaw: boxRaw2, keypoints: keypoints2 });
|
||||
return persons2;
|
||||
}
|
||||
async function parseMultiPose(res, config3, image19) {
|
||||
async function parseMultiPose(res, config3, image20) {
|
||||
const persons2 = [];
|
||||
for (let p = 0; p < res[0].length; p++) {
|
||||
const kpt3 = res[0][p];
|
||||
|
@ -8732,8 +8730,8 @@ async function parseMultiPose(res, config3, image19) {
|
|||
kpt3[3 * i + 0]
|
||||
],
|
||||
position: [
|
||||
Math.trunc(kpt3[3 * i + 1] * (image19.shape[2] || 0)),
|
||||
Math.trunc(kpt3[3 * i + 0] * (image19.shape[1] || 0))
|
||||
Math.trunc(kpt3[3 * i + 1] * (image20.shape[2] || 0)),
|
||||
Math.trunc(kpt3[3 * i + 0] * (image20.shape[1] || 0))
|
||||
]
|
||||
});
|
||||
}
|
||||
|
@ -8744,45 +8742,45 @@ async function parseMultiPose(res, config3, image19) {
|
|||
score: score2,
|
||||
boxRaw: boxRaw2,
|
||||
box: [
|
||||
Math.trunc(boxRaw2[0] * (image19.shape[2] || 0)),
|
||||
Math.trunc(boxRaw2[1] * (image19.shape[1] || 0)),
|
||||
Math.trunc(boxRaw2[2] * (image19.shape[2] || 0)),
|
||||
Math.trunc(boxRaw2[3] * (image19.shape[1] || 0))
|
||||
Math.trunc(boxRaw2[0] * (image20.shape[2] || 0)),
|
||||
Math.trunc(boxRaw2[1] * (image20.shape[1] || 0)),
|
||||
Math.trunc(boxRaw2[2] * (image20.shape[2] || 0)),
|
||||
Math.trunc(boxRaw2[3] * (image20.shape[1] || 0))
|
||||
],
|
||||
keypoints: keypoints2
|
||||
});
|
||||
}
|
||||
return persons2;
|
||||
}
|
||||
async function predict8(image19, config3) {
|
||||
async function predict8(image20, config3) {
|
||||
if (skipped4 < (config3.body.skipFrames || 0) && config3.skipFrame && Object.keys(keypoints2).length > 0) {
|
||||
skipped4++;
|
||||
return [{ id: 0, score: score2, box: box5, boxRaw: boxRaw2, keypoints: keypoints2 }];
|
||||
}
|
||||
skipped4 = 0;
|
||||
return new Promise(async (resolve) => {
|
||||
const tensor2 = tf15.tidy(() => {
|
||||
const tensor3 = tf15.tidy(() => {
|
||||
if (!model6.inputs[0].shape)
|
||||
return null;
|
||||
let inputSize = model6.inputs[0].shape[2];
|
||||
if (inputSize === -1)
|
||||
inputSize = 256;
|
||||
const resize = tf15.image.resizeBilinear(image19, [inputSize, inputSize], false);
|
||||
const resize = tf15.image.resizeBilinear(image20, [inputSize, inputSize], false);
|
||||
const cast4 = tf15.cast(resize, "int32");
|
||||
return cast4;
|
||||
});
|
||||
let resT;
|
||||
if (config3.body.enabled)
|
||||
resT = await model6.predict(tensor2);
|
||||
tf15.dispose(tensor2);
|
||||
resT = await model6.predict(tensor3);
|
||||
tf15.dispose(tensor3);
|
||||
if (!resT)
|
||||
resolve([]);
|
||||
const res = await resT.array();
|
||||
let persons2;
|
||||
if (resT.shape[2] === 17)
|
||||
persons2 = await parseSinglePose(res, config3, image19);
|
||||
persons2 = await parseSinglePose(res, config3, image20);
|
||||
else if (resT.shape[2] === 56)
|
||||
persons2 = await parseMultiPose(res, config3, image19);
|
||||
persons2 = await parseMultiPose(res, config3, image20);
|
||||
tf15.dispose(resT);
|
||||
resolve(persons2);
|
||||
});
|
||||
|
@ -8956,7 +8954,7 @@ async function process2(res, inputSize, outputShape, config3) {
|
|||
results = results.filter((_val, idx) => nmsIdx.includes(idx)).sort((a, b) => b.score - a.score);
|
||||
return results;
|
||||
}
|
||||
async function predict9(image19, config3) {
|
||||
async function predict9(image20, config3) {
|
||||
if (skipped5 < (config3.object.skipFrames || 0) && config3.skipFrame && last3.length > 0) {
|
||||
skipped5++;
|
||||
return last3;
|
||||
|
@ -8965,8 +8963,8 @@ async function predict9(image19, config3) {
|
|||
if (!env2.kernels.includes("mod") || !env2.kernels.includes("sparsetodense"))
|
||||
return last3;
|
||||
return new Promise(async (resolve) => {
|
||||
const outputSize = [image19.shape[2], image19.shape[1]];
|
||||
const resize = tf16.image.resizeBilinear(image19, [model7.inputSize, model7.inputSize], false);
|
||||
const outputSize = [image20.shape[2], image20.shape[1]];
|
||||
const resize = tf16.image.resizeBilinear(image20, [model7.inputSize, model7.inputSize], false);
|
||||
const norm = tf16.div(resize, 255);
|
||||
const transpose = norm.transpose([0, 3, 1, 2]);
|
||||
tf16.dispose(norm);
|
||||
|
@ -9012,6 +9010,7 @@ async function process3(res, inputSize, outputShape, config3) {
|
|||
tf17.dispose(squeezeT);
|
||||
const stackT = tf17.stack([arr[1], arr[0], arr[3], arr[2]], 1);
|
||||
const boxesT = tf17.squeeze(stackT);
|
||||
tf17.dispose(stackT);
|
||||
const scoresT = tf17.squeeze(arr[4]);
|
||||
const classesT = tf17.squeeze(arr[5]);
|
||||
arr.forEach((t) => tf17.dispose(t));
|
||||
|
@ -9221,8 +9220,8 @@ function GLImageFilter(params) {
|
|||
gl.uniform1f(_currentProgram.uniform.flipY, flipY ? -1 : 1);
|
||||
gl.drawArrays(gl.TRIANGLES, 0, 6);
|
||||
};
|
||||
this.apply = function(image19) {
|
||||
_resize(image19.width, image19.height);
|
||||
this.apply = function(image20) {
|
||||
_resize(image20.width, image20.height);
|
||||
_drawCount = 0;
|
||||
if (!_sourceTexture)
|
||||
_sourceTexture = gl.createTexture();
|
||||
|
@ -9231,7 +9230,7 @@ function GLImageFilter(params) {
|
|||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
|
||||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
|
||||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
|
||||
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, image19);
|
||||
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, image20);
|
||||
if (_filterChain.length === 0) {
|
||||
_draw();
|
||||
return _canvas;
|
||||
|
@ -9782,16 +9781,33 @@ var maxSize = 2048;
|
|||
var inCanvas;
|
||||
var outCanvas;
|
||||
var fx;
|
||||
function canvas(width, height) {
|
||||
let c;
|
||||
if (env2.browser) {
|
||||
if (typeof OffscreenCanvas !== "undefined") {
|
||||
c = new OffscreenCanvas(width, height);
|
||||
} else {
|
||||
c = document.createElement("canvas");
|
||||
c.width = width;
|
||||
c.height = height;
|
||||
}
|
||||
} else {
|
||||
c = typeof env2.Canvas !== "undefined" ? new env2.Canvas(width, height) : null;
|
||||
}
|
||||
if (!c)
|
||||
throw new Error("Human: Cannot create canvas");
|
||||
return c;
|
||||
}
|
||||
function process4(input, config3) {
|
||||
let tensor2;
|
||||
let tensor3;
|
||||
if (!input)
|
||||
throw new Error("Human: Input is missing");
|
||||
if (!(input instanceof tf18.Tensor) && !(typeof Image !== "undefined" && input instanceof Image) && !(typeof ImageData !== "undefined" && input instanceof ImageData) && !(typeof ImageBitmap !== "undefined" && input instanceof ImageBitmap) && !(typeof HTMLImageElement !== "undefined" && input instanceof HTMLImageElement) && !(typeof HTMLMediaElement !== "undefined" && input instanceof HTMLMediaElement) && !(typeof HTMLVideoElement !== "undefined" && input instanceof HTMLVideoElement) && !(typeof HTMLCanvasElement !== "undefined" && input instanceof HTMLCanvasElement) && !(typeof OffscreenCanvas !== "undefined" && input instanceof OffscreenCanvas)) {
|
||||
if (!(input instanceof tf18.Tensor) && !(typeof Image !== "undefined" && input instanceof Image) && !(typeof env2.Canvas !== "undefined" && input instanceof env2.Canvas) && !(typeof ImageData !== "undefined" && input instanceof ImageData) && !(typeof ImageBitmap !== "undefined" && input instanceof ImageBitmap) && !(typeof HTMLImageElement !== "undefined" && input instanceof HTMLImageElement) && !(typeof HTMLMediaElement !== "undefined" && input instanceof HTMLMediaElement) && !(typeof HTMLVideoElement !== "undefined" && input instanceof HTMLVideoElement) && !(typeof HTMLCanvasElement !== "undefined" && input instanceof HTMLCanvasElement) && !(typeof OffscreenCanvas !== "undefined" && input instanceof OffscreenCanvas)) {
|
||||
throw new Error("Human: Input type is not recognized");
|
||||
}
|
||||
if (input instanceof tf18.Tensor) {
|
||||
if (input.shape && input.shape.length === 4 && input.shape[0] === 1 && input.shape[3] === 3)
|
||||
tensor2 = tf18.clone(input);
|
||||
tensor3 = tf18.clone(input);
|
||||
else
|
||||
throw new Error(`Human: Input tensor shape must be [1, height, width, 3] and instead was ${input.shape}`);
|
||||
} else {
|
||||
|
@ -9819,15 +9835,10 @@ function process4(input, config3) {
|
|||
targetHeight = originalHeight * ((config3.filter.width || 0) / originalWidth);
|
||||
if (!targetWidth || !targetHeight)
|
||||
throw new Error("Human: Input cannot determine dimension");
|
||||
if (!inCanvas || (inCanvas == null ? void 0 : inCanvas.width) !== targetWidth || (inCanvas == null ? void 0 : inCanvas.height) !== targetHeight) {
|
||||
inCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement("canvas");
|
||||
if ((inCanvas == null ? void 0 : inCanvas.width) !== targetWidth)
|
||||
inCanvas.width = targetWidth;
|
||||
if ((inCanvas == null ? void 0 : inCanvas.height) !== targetHeight)
|
||||
inCanvas.height = targetHeight;
|
||||
}
|
||||
if (!inCanvas || (inCanvas == null ? void 0 : inCanvas.width) !== targetWidth || (inCanvas == null ? void 0 : inCanvas.height) !== targetHeight)
|
||||
inCanvas = canvas(targetWidth, targetHeight);
|
||||
const ctx = inCanvas.getContext("2d");
|
||||
if (input instanceof ImageData) {
|
||||
if (typeof ImageData !== "undefined" && input instanceof ImageData) {
|
||||
ctx.putImageData(input, 0, 0);
|
||||
} else {
|
||||
if (config3.filter.flip && typeof ctx.translate !== "undefined") {
|
||||
|
@ -9841,7 +9852,7 @@ function process4(input, config3) {
|
|||
}
|
||||
if (config3.filter.enabled) {
|
||||
if (!fx || !outCanvas || inCanvas.width !== outCanvas.width || (inCanvas == null ? void 0 : inCanvas.height) !== (outCanvas == null ? void 0 : outCanvas.height)) {
|
||||
outCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(inCanvas == null ? void 0 : inCanvas.width, inCanvas == null ? void 0 : inCanvas.height) : document.createElement("canvas");
|
||||
outCanvas = canvas(inCanvas == null ? void 0 : inCanvas.width, inCanvas == null ? void 0 : inCanvas.height);
|
||||
if ((outCanvas == null ? void 0 : outCanvas.width) !== (inCanvas == null ? void 0 : inCanvas.width))
|
||||
outCanvas.width = inCanvas == null ? void 0 : inCanvas.width;
|
||||
if ((outCanvas == null ? void 0 : outCanvas.height) !== (inCanvas == null ? void 0 : inCanvas.height))
|
||||
|
@ -9884,58 +9895,70 @@ function process4(input, config3) {
|
|||
if (fx)
|
||||
fx = null;
|
||||
}
|
||||
if (!tensor2) {
|
||||
if (!tensor3) {
|
||||
let pixels;
|
||||
if (outCanvas.data) {
|
||||
const shape = [outCanvas.height, outCanvas.width, 3];
|
||||
pixels = tf18.tensor3d(outCanvas.data, shape, "int32");
|
||||
} else if (outCanvas instanceof ImageData) {
|
||||
} else if (typeof ImageData !== "undefined" && outCanvas instanceof ImageData) {
|
||||
pixels = tf18.browser ? tf18.browser.fromPixels(outCanvas) : null;
|
||||
} else if (config3.backend === "webgl" || config3.backend === "humangl") {
|
||||
const tempCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement("canvas");
|
||||
const tempCanvas = canvas(targetWidth, targetHeight);
|
||||
tempCanvas.width = targetWidth;
|
||||
tempCanvas.height = targetHeight;
|
||||
const tempCtx = tempCanvas.getContext("2d");
|
||||
tempCtx == null ? void 0 : tempCtx.drawImage(outCanvas, 0, 0);
|
||||
pixels = tf18.browser ? tf18.browser.fromPixels(tempCanvas) : null;
|
||||
pixels = tf18.browser && env2.browser ? tf18.browser.fromPixels(tempCanvas) : null;
|
||||
} else {
|
||||
const tempCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement("canvas");
|
||||
const tempCanvas = canvas(targetWidth, targetHeight);
|
||||
tempCanvas.width = targetWidth;
|
||||
tempCanvas.height = targetHeight;
|
||||
const tempCtx = tempCanvas.getContext("2d");
|
||||
tempCtx == null ? void 0 : tempCtx.drawImage(outCanvas, 0, 0);
|
||||
const data = tempCtx == null ? void 0 : tempCtx.getImageData(0, 0, targetWidth, targetHeight);
|
||||
pixels = tf18.browser ? tf18.browser.fromPixels(data) : null;
|
||||
tempCtx.drawImage(outCanvas, 0, 0);
|
||||
const data = tempCtx.getImageData(0, 0, targetWidth, targetHeight);
|
||||
if (tf18.browser && env2.browser) {
|
||||
pixels = tf18.browser.fromPixels(data);
|
||||
} else {
|
||||
pixels = tf18.tidy(() => {
|
||||
const imageData = tf18.tensor(Array.from(data.data), [targetWidth, targetHeight, 4]);
|
||||
const channels = tf18.split(imageData, 4, 2);
|
||||
const rgb2 = tf18.stack([channels[0], channels[1], channels[2]], 2);
|
||||
const expand = tf18.reshape(rgb2, [imageData.shape[0], imageData.shape[1], 3]);
|
||||
return expand;
|
||||
});
|
||||
}
|
||||
}
|
||||
if (pixels) {
|
||||
const casted = tf18.cast(pixels, "float32");
|
||||
tensor2 = tf18.expandDims(casted, 0);
|
||||
tensor3 = tf18.expandDims(casted, 0);
|
||||
tf18.dispose(pixels);
|
||||
tf18.dispose(casted);
|
||||
} else {
|
||||
tensor3 = tf18.zeros([1, targetWidth, targetHeight, 3]);
|
||||
throw new Error("Human: Cannot create tensor from input");
|
||||
}
|
||||
}
|
||||
}
|
||||
const canvas2 = config3.filter.return ? outCanvas : null;
|
||||
return { tensor: tensor2, canvas: canvas2 };
|
||||
return { tensor: tensor3, canvas: config3.filter.return ? outCanvas : null };
|
||||
}
|
||||
var lastInputSum = 0;
|
||||
var lastCacheDiff = 1;
|
||||
async function skip(instance, input) {
|
||||
if (instance.config.cacheSensitivity === 0)
|
||||
async function skip(config3, input) {
|
||||
if (config3.cacheSensitivity === 0)
|
||||
return false;
|
||||
const resizeFact = 32;
|
||||
if (!input.shape[1] || !input.shape[2])
|
||||
return false;
|
||||
const reduced = tf18.image.resizeBilinear(input, [Math.trunc(input.shape[1] / resizeFact), Math.trunc(input.shape[2] / resizeFact)]);
|
||||
const reducedData = await reduced.data();
|
||||
tf18.dispose(reduced);
|
||||
let sum = 0;
|
||||
for (let i = 0; i < reducedData.length / 3; i++)
|
||||
sum += reducedData[3 * i + 2];
|
||||
reduced.dispose();
|
||||
const diff = 100 * (Math.max(sum, lastInputSum) / Math.min(sum, lastInputSum) - 1);
|
||||
lastInputSum = sum;
|
||||
const skipFrame = diff < Math.max(instance.config.cacheSensitivity, lastCacheDiff);
|
||||
lastCacheDiff = diff > 10 * instance.config.cacheSensitivity ? 0 : diff;
|
||||
const skipFrame = diff < Math.max(config3.cacheSensitivity, lastCacheDiff);
|
||||
lastCacheDiff = diff > 10 * config3.cacheSensitivity ? 0 : diff;
|
||||
return skipFrame;
|
||||
}
|
||||
|
||||
|
@ -9967,6 +9990,7 @@ async function predict11(input) {
|
|||
tf19.dispose(resizeInput);
|
||||
tf19.dispose(norm);
|
||||
const squeeze7 = tf19.squeeze(res, 0);
|
||||
tf19.dispose(res);
|
||||
let resizeOutput;
|
||||
if (squeeze7.shape[2] === 2) {
|
||||
const softmax = squeeze7.softmax();
|
||||
|
@ -9984,16 +10008,18 @@ async function predict11(input) {
|
|||
} else {
|
||||
resizeOutput = tf19.image.resizeBilinear(squeeze7, [width, height]);
|
||||
}
|
||||
if (typeof document === "undefined")
|
||||
return resizeOutput.data();
|
||||
tf19.dispose(squeeze7);
|
||||
if (env2.node) {
|
||||
const data = await resizeOutput.data();
|
||||
tf19.dispose(resizeOutput);
|
||||
return data;
|
||||
}
|
||||
const overlay = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(width, height) : document.createElement("canvas");
|
||||
overlay.width = width;
|
||||
overlay.height = height;
|
||||
if (tf19.browser)
|
||||
await tf19.browser.toPixels(resizeOutput, overlay);
|
||||
tf19.dispose(resizeOutput);
|
||||
tf19.dispose(squeeze7);
|
||||
tf19.dispose(res);
|
||||
const alphaCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(width, height) : document.createElement("canvas");
|
||||
alphaCanvas.width = width;
|
||||
alphaCanvas.height = height;
|
||||
|
@ -10298,7 +10324,7 @@ var detectFace = async (parent, input) => {
|
|||
delete faces[i].annotations.rightEyeIris;
|
||||
}
|
||||
const irisSize = ((_e = faces[i].annotations) == null ? void 0 : _e.leftEyeIris) && ((_f = faces[i].annotations) == null ? void 0 : _f.rightEyeIris) ? Math.max(Math.abs(faces[i].annotations.leftEyeIris[3][0] - faces[i].annotations.leftEyeIris[1][0]), Math.abs(faces[i].annotations.rightEyeIris[4][1] - faces[i].annotations.rightEyeIris[2][1])) / input.shape[2] : 0;
|
||||
const tensor2 = parent.config.face.detector.return ? tf20.squeeze(faces[i].tensor) : null;
|
||||
const tensor3 = parent.config.face.detector.return ? tf20.squeeze(faces[i].tensor) : null;
|
||||
tf20.dispose(faces[i].tensor);
|
||||
if (faces[i].tensor)
|
||||
delete faces[i].tensor;
|
||||
|
@ -10312,7 +10338,7 @@ var detectFace = async (parent, input) => {
|
|||
emotion: emotionRes,
|
||||
iris: irisSize !== 0 ? Math.trunc(500 / irisSize / 11.7) / 100 : 0,
|
||||
rotation,
|
||||
tensor: tensor2
|
||||
tensor: tensor3
|
||||
});
|
||||
parent.analyze("End Face");
|
||||
}
|
||||
|
@ -10447,7 +10473,7 @@ var draw_exports = {};
|
|||
__export(draw_exports, {
|
||||
all: () => all,
|
||||
body: () => body2,
|
||||
canvas: () => canvas,
|
||||
canvas: () => canvas2,
|
||||
face: () => face2,
|
||||
gesture: () => gesture,
|
||||
hand: () => hand2,
|
||||
|
@ -10920,7 +10946,7 @@ async function person(inCanvas2, result, drawOptions) {
|
|||
}
|
||||
}
|
||||
}
|
||||
async function canvas(inCanvas2, outCanvas2) {
|
||||
async function canvas2(inCanvas2, outCanvas2) {
|
||||
if (!inCanvas2 || !outCanvas2)
|
||||
return;
|
||||
getCanvasContext(outCanvas2);
|
||||
|
@ -12003,29 +12029,34 @@ async function warmupBitmap(instance) {
|
|||
async function warmupCanvas(instance) {
|
||||
return new Promise((resolve) => {
|
||||
let src;
|
||||
let size = 0;
|
||||
switch (instance.config.warmup) {
|
||||
case "face":
|
||||
size = 256;
|
||||
src = "data:image/jpeg;base64," + face3;
|
||||
break;
|
||||
case "full":
|
||||
case "body":
|
||||
size = 1200;
|
||||
src = "data:image/jpeg;base64," + body3;
|
||||
break;
|
||||
default:
|
||||
src = null;
|
||||
}
|
||||
const img = new Image();
|
||||
let img;
|
||||
if (typeof Image !== "undefined")
|
||||
img = new Image();
|
||||
else if (env2.Image)
|
||||
img = new env2.Image();
|
||||
img.onload = async () => {
|
||||
const canvas2 = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(size, size) : document.createElement("canvas");
|
||||
canvas2.width = img.naturalWidth;
|
||||
canvas2.height = img.naturalHeight;
|
||||
const ctx = canvas2.getContext("2d");
|
||||
ctx == null ? void 0 : ctx.drawImage(img, 0, 0);
|
||||
const res = await instance.detect(canvas2, instance.config);
|
||||
resolve(res);
|
||||
const canvas3 = canvas(img.naturalWidth, img.naturalHeight);
|
||||
if (!canvas3) {
|
||||
log("Warmup: Canvas not found");
|
||||
resolve({});
|
||||
} else {
|
||||
const ctx = canvas3.getContext("2d");
|
||||
ctx.drawImage(img, 0, 0);
|
||||
const tensor3 = await instance.image(canvas3);
|
||||
const res = await instance.detect(tensor3.tensor, instance.config);
|
||||
resolve(res);
|
||||
}
|
||||
};
|
||||
if (src)
|
||||
img.src = src;
|
||||
|
@ -12064,7 +12095,7 @@ async function warmup(instance, userConfig) {
|
|||
let res;
|
||||
if (typeof createImageBitmap === "function")
|
||||
res = await warmupBitmap(instance);
|
||||
else if (typeof Image !== "undefined")
|
||||
else if (typeof Image !== "undefined" || env2.Canvas !== void 0)
|
||||
res = await warmupCanvas(instance);
|
||||
else
|
||||
res = await warmupNode(instance);
|
||||
|
@ -12156,7 +12187,7 @@ var Human = class {
|
|||
return similarity(embedding1, embedding2);
|
||||
}
|
||||
segmentation(input, background) {
|
||||
return process5(input, background, this.config);
|
||||
return input ? process5(input, background, this.config) : null;
|
||||
}
|
||||
enhance(input) {
|
||||
return enhance(input);
|
||||
|
@ -12214,32 +12245,32 @@ var Human = class {
|
|||
await check(this);
|
||||
await this.load();
|
||||
timeStamp = now();
|
||||
this.process = process4(input, this.config);
|
||||
const inputTensor = this.process.tensor;
|
||||
let img = process4(input, this.config);
|
||||
this.process = img;
|
||||
this.performance.image = Math.trunc(now() - timeStamp);
|
||||
this.analyze("Get Image:");
|
||||
if (this.config.segmentation.enabled && this.process && inputTensor) {
|
||||
if (this.config.segmentation.enabled && this.process && img.tensor && img.canvas) {
|
||||
this.analyze("Start Segmentation:");
|
||||
this.state = "run:segmentation";
|
||||
timeStamp = now();
|
||||
await predict11(this.process);
|
||||
await predict11(img);
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0)
|
||||
this.performance.segmentation = elapsedTime;
|
||||
if (this.process.canvas) {
|
||||
tf24.dispose(inputTensor);
|
||||
this.process = process4(this.process.canvas, this.config);
|
||||
if (img.canvas) {
|
||||
tf24.dispose(img.tensor);
|
||||
img = process4(img.canvas, this.config);
|
||||
}
|
||||
this.analyze("End Segmentation:");
|
||||
}
|
||||
if (!this.process || !inputTensor) {
|
||||
if (!img.tensor) {
|
||||
log("could not convert input to tensor");
|
||||
resolve({ error: "could not convert input to tensor" });
|
||||
return;
|
||||
}
|
||||
this.emit("image");
|
||||
timeStamp = now();
|
||||
this.config.skipFrame = await skip(this, inputTensor);
|
||||
this.config.skipFrame = await skip(this.config, img.tensor);
|
||||
if (!this.performance.frames)
|
||||
this.performance.frames = 0;
|
||||
if (!this.performance.cached)
|
||||
|
@ -12254,13 +12285,13 @@ var Human = class {
|
|||
let handRes = [];
|
||||
let objectRes = [];
|
||||
if (this.config.async) {
|
||||
faceRes = this.config.face.enabled ? detectFace(this, inputTensor) : [];
|
||||
faceRes = this.config.face.enabled ? detectFace(this, img.tensor) : [];
|
||||
if (this.performance.face)
|
||||
delete this.performance.face;
|
||||
} else {
|
||||
this.state = "run:face";
|
||||
timeStamp = now();
|
||||
faceRes = this.config.face.enabled ? await detectFace(this, inputTensor) : [];
|
||||
faceRes = this.config.face.enabled ? await detectFace(this, img.tensor) : [];
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0)
|
||||
this.performance.face = elapsedTime;
|
||||
|
@ -12268,26 +12299,26 @@ var Human = class {
|
|||
this.analyze("Start Body:");
|
||||
if (this.config.async) {
|
||||
if ((_a = this.config.body.modelPath) == null ? void 0 : _a.includes("posenet"))
|
||||
bodyRes = this.config.body.enabled ? predict4(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? predict4(img.tensor, this.config) : [];
|
||||
else if ((_b = this.config.body.modelPath) == null ? void 0 : _b.includes("blazepose"))
|
||||
bodyRes = this.config.body.enabled ? predict6(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? predict6(img.tensor, this.config) : [];
|
||||
else if ((_c = this.config.body.modelPath) == null ? void 0 : _c.includes("efficientpose"))
|
||||
bodyRes = this.config.body.enabled ? predict7(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? predict7(img.tensor, this.config) : [];
|
||||
else if ((_d = this.config.body.modelPath) == null ? void 0 : _d.includes("movenet"))
|
||||
bodyRes = this.config.body.enabled ? predict8(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? predict8(img.tensor, this.config) : [];
|
||||
if (this.performance.body)
|
||||
delete this.performance.body;
|
||||
} else {
|
||||
this.state = "run:body";
|
||||
timeStamp = now();
|
||||
if ((_e = this.config.body.modelPath) == null ? void 0 : _e.includes("posenet"))
|
||||
bodyRes = this.config.body.enabled ? await predict4(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? await predict4(img.tensor, this.config) : [];
|
||||
else if ((_f = this.config.body.modelPath) == null ? void 0 : _f.includes("blazepose"))
|
||||
bodyRes = this.config.body.enabled ? await predict6(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? await predict6(img.tensor, this.config) : [];
|
||||
else if ((_g = this.config.body.modelPath) == null ? void 0 : _g.includes("efficientpose"))
|
||||
bodyRes = this.config.body.enabled ? await predict7(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? await predict7(img.tensor, this.config) : [];
|
||||
else if ((_h = this.config.body.modelPath) == null ? void 0 : _h.includes("movenet"))
|
||||
bodyRes = this.config.body.enabled ? await predict8(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? await predict8(img.tensor, this.config) : [];
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0)
|
||||
this.performance.body = elapsedTime;
|
||||
|
@ -12295,13 +12326,13 @@ var Human = class {
|
|||
this.analyze("End Body:");
|
||||
this.analyze("Start Hand:");
|
||||
if (this.config.async) {
|
||||
handRes = this.config.hand.enabled ? predict5(inputTensor, this.config) : [];
|
||||
handRes = this.config.hand.enabled ? predict5(img.tensor, this.config) : [];
|
||||
if (this.performance.hand)
|
||||
delete this.performance.hand;
|
||||
} else {
|
||||
this.state = "run:hand";
|
||||
timeStamp = now();
|
||||
handRes = this.config.hand.enabled ? await predict5(inputTensor, this.config) : [];
|
||||
handRes = this.config.hand.enabled ? await predict5(img.tensor, this.config) : [];
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0)
|
||||
this.performance.hand = elapsedTime;
|
||||
|
@ -12310,18 +12341,18 @@ var Human = class {
|
|||
this.analyze("Start Object:");
|
||||
if (this.config.async) {
|
||||
if ((_i = this.config.object.modelPath) == null ? void 0 : _i.includes("nanodet"))
|
||||
objectRes = this.config.object.enabled ? predict9(inputTensor, this.config) : [];
|
||||
objectRes = this.config.object.enabled ? predict9(img.tensor, this.config) : [];
|
||||
else if ((_j = this.config.object.modelPath) == null ? void 0 : _j.includes("centernet"))
|
||||
objectRes = this.config.object.enabled ? predict10(inputTensor, this.config) : [];
|
||||
objectRes = this.config.object.enabled ? predict10(img.tensor, this.config) : [];
|
||||
if (this.performance.object)
|
||||
delete this.performance.object;
|
||||
} else {
|
||||
this.state = "run:object";
|
||||
timeStamp = now();
|
||||
if ((_k = this.config.object.modelPath) == null ? void 0 : _k.includes("nanodet"))
|
||||
objectRes = this.config.object.enabled ? await predict9(inputTensor, this.config) : [];
|
||||
objectRes = this.config.object.enabled ? await predict9(img.tensor, this.config) : [];
|
||||
else if ((_l = this.config.object.modelPath) == null ? void 0 : _l.includes("centernet"))
|
||||
objectRes = this.config.object.enabled ? await predict10(inputTensor, this.config) : [];
|
||||
objectRes = this.config.object.enabled ? await predict10(img.tensor, this.config) : [];
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0)
|
||||
this.performance.object = elapsedTime;
|
||||
|
@ -12354,7 +12385,7 @@ var Human = class {
|
|||
return join2(faceRes, bodyRes, handRes, gestureRes, shape);
|
||||
}
|
||||
};
|
||||
tf24.dispose(inputTensor);
|
||||
tf24.dispose(img.tensor);
|
||||
this.emit("detect");
|
||||
resolve(this.result);
|
||||
});
|
||||
|
|
|
@ -255,16 +255,16 @@ function getBoxCenter(box6) {
|
|||
box6.startPoint[1] + (box6.endPoint[1] - box6.startPoint[1]) / 2
|
||||
];
|
||||
}
|
||||
function cutBoxFromImageAndResize(box6, image19, cropSize) {
|
||||
const h = image19.shape[1];
|
||||
const w = image19.shape[2];
|
||||
function cutBoxFromImageAndResize(box6, image20, cropSize) {
|
||||
const h = image20.shape[1];
|
||||
const w = image20.shape[2];
|
||||
const boxes = [[
|
||||
box6.startPoint[1] / h,
|
||||
box6.startPoint[0] / w,
|
||||
box6.endPoint[1] / h,
|
||||
box6.endPoint[0] / w
|
||||
]];
|
||||
return tf.image.cropAndResize(image19, boxes, [0], cropSize);
|
||||
return tf.image.cropAndResize(image20, boxes, [0], cropSize);
|
||||
}
|
||||
function enlargeBox(box6, factor = 1.5) {
|
||||
const center = getBoxCenter(box6);
|
||||
|
@ -408,7 +408,7 @@ var BlazeFaceModel = class {
|
|||
async getBoundingBoxes(inputImage, userConfig) {
|
||||
var _a, _b, _c, _d;
|
||||
if (!inputImage || inputImage["isDisposedInternal"] || inputImage.shape.length !== 4 || inputImage.shape[1] < 1 || inputImage.shape[2] < 1)
|
||||
return null;
|
||||
return { boxes: [] };
|
||||
const [batch, boxes, scores] = tf2.tidy(() => {
|
||||
const resizedImage = tf2.image.resizeBilinear(inputImage, [this.inputSize, this.inputSize]);
|
||||
const normalizedImage = tf2.sub(tf2.div(resizedImage, 127.5), 0.5);
|
||||
|
@ -438,11 +438,9 @@ var BlazeFaceModel = class {
|
|||
const confidence = scoresData[nms[i]];
|
||||
if (confidence > (((_d = this.config.face.detector) == null ? void 0 : _d.minConfidence) || 0)) {
|
||||
const boundingBox = tf2.slice(boxes, [nms[i], 0], [1, -1]);
|
||||
const localBox = createBox(boundingBox);
|
||||
tf2.dispose(boundingBox);
|
||||
const anchor = this.anchorsData[nms[i]];
|
||||
const landmarks = tf2.tidy(() => tf2.reshape(tf2.squeeze(tf2.slice(batch, [nms[i], keypointsCount - 1], [1, -1])), [keypointsCount, -1]));
|
||||
annotatedBoxes.push({ box: localBox, landmarks, anchor, confidence });
|
||||
annotatedBoxes.push({ box: createBox(boundingBox), landmarks, anchor: this.anchorsData[nms[i]], confidence });
|
||||
tf2.dispose(boundingBox);
|
||||
}
|
||||
}
|
||||
tf2.dispose(batch);
|
||||
|
@ -3934,12 +3932,12 @@ var Pipeline = class {
|
|||
const angle = computeRotation(box6.landmarks[indexOfMouth], box6.landmarks[indexOfForehead]);
|
||||
const faceCenter = getBoxCenter({ startPoint: box6.startPoint, endPoint: box6.endPoint });
|
||||
const faceCenterNormalized = [faceCenter[0] / input.shape[2], faceCenter[1] / input.shape[1]];
|
||||
const rotatedImage = tf4.image.rotateWithOffset(input, angle, 0, faceCenterNormalized);
|
||||
const rotated = tf4.image.rotateWithOffset(input, angle, 0, faceCenterNormalized);
|
||||
const rotationMatrix = buildRotationMatrix(-angle, faceCenter);
|
||||
const cut = config3.face.mesh.enabled ? cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, rotatedImage, [this.meshSize, this.meshSize]) : cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, rotatedImage, [this.boxSize, this.boxSize]);
|
||||
const cut = config3.face.mesh.enabled ? cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, rotated, [this.meshSize, this.meshSize]) : cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, rotated, [this.boxSize, this.boxSize]);
|
||||
const face5 = tf4.div(cut, 255);
|
||||
tf4.dispose(cut);
|
||||
tf4.dispose(rotatedImage);
|
||||
tf4.dispose(rotated);
|
||||
return [angle, rotationMatrix, face5];
|
||||
}
|
||||
async augmentIris(rawCoords, face5) {
|
||||
|
@ -4023,11 +4021,11 @@ var Pipeline = class {
|
|||
[angle, rotationMatrix, face5] = this.correctFaceRotation(config3, box6, input);
|
||||
} else {
|
||||
rotationMatrix = IDENTITY_MATRIX;
|
||||
const clonedImage = input.clone();
|
||||
const cut = config3.face.mesh.enabled ? cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, clonedImage, [this.meshSize, this.meshSize]) : cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, clonedImage, [this.boxSize, this.boxSize]);
|
||||
const cloned = input.clone();
|
||||
const cut = config3.face.mesh.enabled ? cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, cloned, [this.meshSize, this.meshSize]) : cutBoxFromImageAndResize({ startPoint: box6.startPoint, endPoint: box6.endPoint }, cloned, [this.boxSize, this.boxSize]);
|
||||
face5 = tf4.div(cut, 255);
|
||||
tf4.dispose(cut);
|
||||
tf4.dispose(clonedImage);
|
||||
tf4.dispose(cloned);
|
||||
}
|
||||
if (!config3.face.mesh.enabled) {
|
||||
results.push({
|
||||
|
@ -4056,6 +4054,7 @@ var Pipeline = class {
|
|||
const mesh = this.transformRawCoords(rawCoords, box6, angle, rotationMatrix);
|
||||
box6 = { ...enlargeBox(calculateLandmarksBoundingBox(mesh), 1.5), confidence: box6.confidence };
|
||||
if (config3.face.detector.rotation && config3.face.mesh.enabled && config3.face.description.enabled && env2.kernels.includes("rotatewithoffset")) {
|
||||
tf4.dispose(face5);
|
||||
[angle, rotationMatrix, face5] = this.correctFaceRotation(config3, box6, input);
|
||||
}
|
||||
results.push({
|
||||
|
@ -4122,8 +4121,6 @@ async function predict(input, config3) {
|
|||
annotations: annotations3,
|
||||
tensor: prediction.image
|
||||
});
|
||||
if (prediction.coords)
|
||||
tf5.dispose(prediction.coords);
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
@ -4204,20 +4201,20 @@ function match(embedding, db, threshold = 0) {
|
|||
return best;
|
||||
}
|
||||
function enhance(input) {
|
||||
const image19 = tf6.tidy(() => {
|
||||
const tensor2 = input.image || input.tensor || input;
|
||||
if (!(tensor2 instanceof tf6.Tensor))
|
||||
const image20 = tf6.tidy(() => {
|
||||
const tensor3 = input.image || input.tensor || input;
|
||||
if (!(tensor3 instanceof tf6.Tensor))
|
||||
return null;
|
||||
const box6 = [[0.05, 0.15, 0.85, 0.85]];
|
||||
if (!model.inputs[0].shape)
|
||||
return null;
|
||||
const crop = tensor2.shape.length === 3 ? tf6.image.cropAndResize(tf6.expandDims(tensor2, 0), box6, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]) : tf6.image.cropAndResize(tensor2, box6, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]);
|
||||
const crop = tensor3.shape.length === 3 ? tf6.image.cropAndResize(tf6.expandDims(tensor3, 0), box6, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]) : tf6.image.cropAndResize(tensor3, box6, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]);
|
||||
const norm = tf6.mul(crop, 255);
|
||||
return norm;
|
||||
});
|
||||
return image19;
|
||||
return image20;
|
||||
}
|
||||
async function predict2(image19, config3, idx, count2) {
|
||||
async function predict2(image20, config3, idx, count2) {
|
||||
var _a, _b, _c;
|
||||
if (!model)
|
||||
return null;
|
||||
|
@ -4228,7 +4225,7 @@ async function predict2(image19, config3, idx, count2) {
|
|||
skipped = 0;
|
||||
return new Promise(async (resolve) => {
|
||||
var _a2, _b2;
|
||||
const enhanced = enhance(image19);
|
||||
const enhanced = enhance(image20);
|
||||
let resT;
|
||||
const obj = {
|
||||
age: 0,
|
||||
|
@ -4248,6 +4245,7 @@ async function predict2(image19, config3, idx, count2) {
|
|||
}
|
||||
const argmax = tf6.argMax(resT.find((t) => t.shape[1] === 100), 1);
|
||||
const age = (await argmax.data())[0];
|
||||
tf6.dispose(argmax);
|
||||
const all2 = await resT.find((t) => t.shape[1] === 100).data();
|
||||
obj.age = Math.round(all2[age - 1] > all2[age + 1] ? 10 * age - 100 * all2[age - 1] : 10 * age + 100 * all2[age + 1]) / 10;
|
||||
const desc = resT.find((t) => t.shape[1] === 1024);
|
||||
|
@ -4281,7 +4279,7 @@ async function load4(config3) {
|
|||
log("cached model:", model2.modelUrl);
|
||||
return model2;
|
||||
}
|
||||
async function predict3(image19, config3, idx, count2) {
|
||||
async function predict3(image20, config3, idx, count2) {
|
||||
var _a;
|
||||
if (!model2)
|
||||
return null;
|
||||
|
@ -4292,7 +4290,7 @@ async function predict3(image19, config3, idx, count2) {
|
|||
skipped2 = 0;
|
||||
return new Promise(async (resolve) => {
|
||||
var _a2, _b;
|
||||
const resize = tf7.image.resizeBilinear(image19, [model2.inputs[0].shape[2], model2.inputs[0].shape[1]], false);
|
||||
const resize = tf7.image.resizeBilinear(image20, [model2.inputs[0].shape[2], model2.inputs[0].shape[1]], false);
|
||||
const [red, green, blue] = tf7.split(resize, 3, 3);
|
||||
tf7.dispose(resize);
|
||||
const redNorm = tf7.mul(red, rgb[0]);
|
||||
|
@ -4648,7 +4646,7 @@ async function predict4(input, config3) {
|
|||
results3d[1] = results3d[1].sigmoid();
|
||||
return results3d;
|
||||
});
|
||||
const buffers = await Promise.all(res.map((tensor2) => tensor2.buffer()));
|
||||
const buffers = await Promise.all(res.map((tensor3) => tensor3.buffer()));
|
||||
for (const t of res)
|
||||
tf8.dispose(t);
|
||||
const decoded = await decode(buffers[0], buffers[1], buffers[2], buffers[3], config3.body.maxDetected, config3.body.minConfidence);
|
||||
|
@ -4689,16 +4687,16 @@ function getBoxCenter2(box6) {
|
|||
box6.startPoint[1] + (box6.endPoint[1] - box6.startPoint[1]) / 2
|
||||
];
|
||||
}
|
||||
function cutBoxFromImageAndResize2(box6, image19, cropSize) {
|
||||
const h = image19.shape[1];
|
||||
const w = image19.shape[2];
|
||||
function cutBoxFromImageAndResize2(box6, image20, cropSize) {
|
||||
const h = image20.shape[1];
|
||||
const w = image20.shape[2];
|
||||
const boxes = [[
|
||||
box6.startPoint[1] / h,
|
||||
box6.startPoint[0] / w,
|
||||
box6.endPoint[1] / h,
|
||||
box6.endPoint[0] / w
|
||||
]];
|
||||
return tf9.image.cropAndResize(image19, boxes, [0], cropSize);
|
||||
return tf9.image.cropAndResize(image20, boxes, [0], cropSize);
|
||||
}
|
||||
function scaleBoxCoordinates2(box6, factor) {
|
||||
const startPoint = [box6.startPoint[0] * factor[0], box6.startPoint[1] * factor[1]];
|
||||
|
@ -7718,16 +7716,16 @@ var HandDetector = class {
|
|||
const palmLandmarks = tf10.tidy(() => tf10.reshape(this.normalizeLandmarks(tf10.slice(t.predictions, [index, 5], [1, 14]), index), [-1, 2]));
|
||||
hands.push({ box: palmBox, palmLandmarks, confidence: scores[index] });
|
||||
}
|
||||
for (const tensor2 of Object.keys(t))
|
||||
tf10.dispose(t[tensor2]);
|
||||
for (const tensor3 of Object.keys(t))
|
||||
tf10.dispose(t[tensor3]);
|
||||
return hands;
|
||||
}
|
||||
async estimateHandBounds(input, config3) {
|
||||
const inputHeight = input.shape[1];
|
||||
const inputWidth = input.shape[2];
|
||||
const image19 = tf10.tidy(() => tf10.sub(tf10.div(tf10.image.resizeBilinear(input, [this.inputSize, this.inputSize]), 127.5), 1));
|
||||
const predictions = await this.getBoxes(image19, config3);
|
||||
tf10.dispose(image19);
|
||||
const image20 = tf10.tidy(() => tf10.sub(tf10.div(tf10.image.resizeBilinear(input, [this.inputSize, this.inputSize]), 127.5), 1));
|
||||
const predictions = await this.getBoxes(image20, config3);
|
||||
tf10.dispose(image20);
|
||||
const hands = [];
|
||||
if (!predictions || predictions.length === 0)
|
||||
return hands;
|
||||
|
@ -7872,11 +7870,11 @@ var HandPipeline = class {
|
|||
Math.trunc(coord[2])
|
||||
]);
|
||||
}
|
||||
async estimateHands(image19, config3) {
|
||||
async estimateHands(image20, config3) {
|
||||
let useFreshBox = false;
|
||||
let boxes;
|
||||
if (this.skipped === 0 || this.skipped > config3.hand.skipFrames || !config3.hand.landmarks || !config3.skipFrame) {
|
||||
boxes = await this.handDetector.estimateHandBounds(image19, config3);
|
||||
boxes = await this.handDetector.estimateHandBounds(image20, config3);
|
||||
this.skipped = 0;
|
||||
}
|
||||
if (config3.skipFrame)
|
||||
|
@ -7895,8 +7893,8 @@ var HandPipeline = class {
|
|||
if (config3.hand.landmarks) {
|
||||
const angle = config3.hand.rotation ? computeRotation2(currentBox.palmLandmarks[palmLandmarksPalmBase], currentBox.palmLandmarks[palmLandmarksMiddleFingerBase]) : 0;
|
||||
const palmCenter = getBoxCenter2(currentBox);
|
||||
const palmCenterNormalized = [palmCenter[0] / image19.shape[2], palmCenter[1] / image19.shape[1]];
|
||||
const rotatedImage = config3.hand.rotation && env2.kernels.includes("rotatewithoffset") ? tf11.image.rotateWithOffset(image19, angle, 0, palmCenterNormalized) : image19.clone();
|
||||
const palmCenterNormalized = [palmCenter[0] / image20.shape[2], palmCenter[1] / image20.shape[1]];
|
||||
const rotatedImage = config3.hand.rotation && env2.kernels.includes("rotatewithoffset") ? tf11.image.rotateWithOffset(image20, angle, 0, palmCenterNormalized) : image20.clone();
|
||||
const rotationMatrix = buildRotationMatrix2(-angle, palmCenter);
|
||||
const newBox = useFreshBox ? this.getBoxForPalmLandmarks(currentBox.palmLandmarks, rotationMatrix) : currentBox;
|
||||
const croppedInput = cutBoxFromImageAndResize2(newBox, rotatedImage, [this.inputSize, this.inputSize]);
|
||||
|
@ -8501,13 +8499,13 @@ async function load7(config3) {
|
|||
log("cached model:", model4["modelUrl"]);
|
||||
return model4;
|
||||
}
|
||||
async function predict6(image19, config3) {
|
||||
async function predict6(image20, config3) {
|
||||
if (!model4)
|
||||
return [];
|
||||
if (!config3.body.enabled)
|
||||
return [];
|
||||
const imgSize = { width: image19.shape[2] || 0, height: image19.shape[1] || 0 };
|
||||
const resize = tf13.image.resizeBilinear(image19, [model4["width"], model4["height"]], false);
|
||||
const imgSize = { width: image20.shape[2] || 0, height: image20.shape[1] || 0 };
|
||||
const resize = tf13.image.resizeBilinear(image20, [model4["width"], model4["height"]], false);
|
||||
const normalize = tf13.div(resize, [255]);
|
||||
tf13.dispose(resize);
|
||||
const resT = await model4.predict(normalize);
|
||||
|
@ -8584,7 +8582,7 @@ function max2d(inputs, minScore) {
|
|||
return [0, 0, newScore];
|
||||
});
|
||||
}
|
||||
async function predict7(image19, config3) {
|
||||
async function predict7(image20, config3) {
|
||||
var _a;
|
||||
if (skipped3 < (((_a = config3.body) == null ? void 0 : _a.skipFrames) || 0) && config3.skipFrame && Object.keys(keypoints).length > 0) {
|
||||
skipped3++;
|
||||
|
@ -8593,26 +8591,26 @@ async function predict7(image19, config3) {
|
|||
skipped3 = 0;
|
||||
return new Promise(async (resolve) => {
|
||||
var _a2;
|
||||
const tensor2 = tf14.tidy(() => {
|
||||
const tensor3 = tf14.tidy(() => {
|
||||
if (!model5.inputs[0].shape)
|
||||
return null;
|
||||
const resize = tf14.image.resizeBilinear(image19, [model5.inputs[0].shape[2], model5.inputs[0].shape[1]], false);
|
||||
const resize = tf14.image.resizeBilinear(image20, [model5.inputs[0].shape[2], model5.inputs[0].shape[1]], false);
|
||||
const enhance2 = tf14.mul(resize, 2);
|
||||
const norm = enhance2.sub(1);
|
||||
return norm;
|
||||
});
|
||||
let resT;
|
||||
if (config3.body.enabled)
|
||||
resT = await model5.predict(tensor2);
|
||||
tf14.dispose(tensor2);
|
||||
resT = await model5.predict(tensor3);
|
||||
tf14.dispose(tensor3);
|
||||
if (resT) {
|
||||
keypoints.length = 0;
|
||||
const squeeze7 = resT.squeeze();
|
||||
tf14.dispose(resT);
|
||||
const stack2 = squeeze7.unstack(2);
|
||||
const stack3 = squeeze7.unstack(2);
|
||||
tf14.dispose(squeeze7);
|
||||
for (let id = 0; id < stack2.length; id++) {
|
||||
const [x2, y2, partScore] = max2d(stack2[id], config3.body.minConfidence);
|
||||
for (let id = 0; id < stack3.length; id++) {
|
||||
const [x2, y2, partScore] = max2d(stack3[id], config3.body.minConfidence);
|
||||
if (score > (((_a2 = config3.body) == null ? void 0 : _a2.minConfidence) || 0)) {
|
||||
keypoints.push({
|
||||
score: Math.round(100 * partScore) / 100,
|
||||
|
@ -8622,13 +8620,13 @@ async function predict7(image19, config3) {
|
|||
y2 / model5.inputs[0].shape[1]
|
||||
],
|
||||
position: [
|
||||
Math.round(image19.shape[2] * x2 / model5.inputs[0].shape[2]),
|
||||
Math.round(image19.shape[1] * y2 / model5.inputs[0].shape[1])
|
||||
Math.round(image20.shape[2] * x2 / model5.inputs[0].shape[2]),
|
||||
Math.round(image20.shape[1] * y2 / model5.inputs[0].shape[1])
|
||||
]
|
||||
});
|
||||
}
|
||||
}
|
||||
stack2.forEach((s) => tf14.dispose(s));
|
||||
stack3.forEach((s) => tf14.dispose(s));
|
||||
}
|
||||
score = keypoints.reduce((prev, curr) => curr.score > prev ? curr.score : prev, 0);
|
||||
const x = keypoints.map((a) => a.position[0]);
|
||||
|
@ -8671,7 +8669,7 @@ async function load9(config3) {
|
|||
log("cached model:", model6["modelUrl"]);
|
||||
return model6;
|
||||
}
|
||||
async function parseSinglePose(res, config3, image19) {
|
||||
async function parseSinglePose(res, config3, image20) {
|
||||
keypoints2.length = 0;
|
||||
const kpt3 = res[0][0];
|
||||
for (let id = 0; id < kpt3.length; id++) {
|
||||
|
@ -8685,8 +8683,8 @@ async function parseSinglePose(res, config3, image19) {
|
|||
kpt3[id][0]
|
||||
],
|
||||
position: [
|
||||
Math.round((image19.shape[2] || 0) * kpt3[id][1]),
|
||||
Math.round((image19.shape[1] || 0) * kpt3[id][0])
|
||||
Math.round((image20.shape[2] || 0) * kpt3[id][1]),
|
||||
Math.round((image20.shape[1] || 0) * kpt3[id][0])
|
||||
]
|
||||
});
|
||||
}
|
||||
|
@ -8712,7 +8710,7 @@ async function parseSinglePose(res, config3, image19) {
|
|||
persons2.push({ id: 0, score: score2, box: box5, boxRaw: boxRaw2, keypoints: keypoints2 });
|
||||
return persons2;
|
||||
}
|
||||
async function parseMultiPose(res, config3, image19) {
|
||||
async function parseMultiPose(res, config3, image20) {
|
||||
const persons2 = [];
|
||||
for (let p = 0; p < res[0].length; p++) {
|
||||
const kpt3 = res[0][p];
|
||||
|
@ -8731,8 +8729,8 @@ async function parseMultiPose(res, config3, image19) {
|
|||
kpt3[3 * i + 0]
|
||||
],
|
||||
position: [
|
||||
Math.trunc(kpt3[3 * i + 1] * (image19.shape[2] || 0)),
|
||||
Math.trunc(kpt3[3 * i + 0] * (image19.shape[1] || 0))
|
||||
Math.trunc(kpt3[3 * i + 1] * (image20.shape[2] || 0)),
|
||||
Math.trunc(kpt3[3 * i + 0] * (image20.shape[1] || 0))
|
||||
]
|
||||
});
|
||||
}
|
||||
|
@ -8743,45 +8741,45 @@ async function parseMultiPose(res, config3, image19) {
|
|||
score: score2,
|
||||
boxRaw: boxRaw2,
|
||||
box: [
|
||||
Math.trunc(boxRaw2[0] * (image19.shape[2] || 0)),
|
||||
Math.trunc(boxRaw2[1] * (image19.shape[1] || 0)),
|
||||
Math.trunc(boxRaw2[2] * (image19.shape[2] || 0)),
|
||||
Math.trunc(boxRaw2[3] * (image19.shape[1] || 0))
|
||||
Math.trunc(boxRaw2[0] * (image20.shape[2] || 0)),
|
||||
Math.trunc(boxRaw2[1] * (image20.shape[1] || 0)),
|
||||
Math.trunc(boxRaw2[2] * (image20.shape[2] || 0)),
|
||||
Math.trunc(boxRaw2[3] * (image20.shape[1] || 0))
|
||||
],
|
||||
keypoints: keypoints2
|
||||
});
|
||||
}
|
||||
return persons2;
|
||||
}
|
||||
async function predict8(image19, config3) {
|
||||
async function predict8(image20, config3) {
|
||||
if (skipped4 < (config3.body.skipFrames || 0) && config3.skipFrame && Object.keys(keypoints2).length > 0) {
|
||||
skipped4++;
|
||||
return [{ id: 0, score: score2, box: box5, boxRaw: boxRaw2, keypoints: keypoints2 }];
|
||||
}
|
||||
skipped4 = 0;
|
||||
return new Promise(async (resolve) => {
|
||||
const tensor2 = tf15.tidy(() => {
|
||||
const tensor3 = tf15.tidy(() => {
|
||||
if (!model6.inputs[0].shape)
|
||||
return null;
|
||||
let inputSize = model6.inputs[0].shape[2];
|
||||
if (inputSize === -1)
|
||||
inputSize = 256;
|
||||
const resize = tf15.image.resizeBilinear(image19, [inputSize, inputSize], false);
|
||||
const resize = tf15.image.resizeBilinear(image20, [inputSize, inputSize], false);
|
||||
const cast4 = tf15.cast(resize, "int32");
|
||||
return cast4;
|
||||
});
|
||||
let resT;
|
||||
if (config3.body.enabled)
|
||||
resT = await model6.predict(tensor2);
|
||||
tf15.dispose(tensor2);
|
||||
resT = await model6.predict(tensor3);
|
||||
tf15.dispose(tensor3);
|
||||
if (!resT)
|
||||
resolve([]);
|
||||
const res = await resT.array();
|
||||
let persons2;
|
||||
if (resT.shape[2] === 17)
|
||||
persons2 = await parseSinglePose(res, config3, image19);
|
||||
persons2 = await parseSinglePose(res, config3, image20);
|
||||
else if (resT.shape[2] === 56)
|
||||
persons2 = await parseMultiPose(res, config3, image19);
|
||||
persons2 = await parseMultiPose(res, config3, image20);
|
||||
tf15.dispose(resT);
|
||||
resolve(persons2);
|
||||
});
|
||||
|
@ -8955,7 +8953,7 @@ async function process2(res, inputSize, outputShape, config3) {
|
|||
results = results.filter((_val, idx) => nmsIdx.includes(idx)).sort((a, b) => b.score - a.score);
|
||||
return results;
|
||||
}
|
||||
async function predict9(image19, config3) {
|
||||
async function predict9(image20, config3) {
|
||||
if (skipped5 < (config3.object.skipFrames || 0) && config3.skipFrame && last3.length > 0) {
|
||||
skipped5++;
|
||||
return last3;
|
||||
|
@ -8964,8 +8962,8 @@ async function predict9(image19, config3) {
|
|||
if (!env2.kernels.includes("mod") || !env2.kernels.includes("sparsetodense"))
|
||||
return last3;
|
||||
return new Promise(async (resolve) => {
|
||||
const outputSize = [image19.shape[2], image19.shape[1]];
|
||||
const resize = tf16.image.resizeBilinear(image19, [model7.inputSize, model7.inputSize], false);
|
||||
const outputSize = [image20.shape[2], image20.shape[1]];
|
||||
const resize = tf16.image.resizeBilinear(image20, [model7.inputSize, model7.inputSize], false);
|
||||
const norm = tf16.div(resize, 255);
|
||||
const transpose = norm.transpose([0, 3, 1, 2]);
|
||||
tf16.dispose(norm);
|
||||
|
@ -9011,6 +9009,7 @@ async function process3(res, inputSize, outputShape, config3) {
|
|||
tf17.dispose(squeezeT);
|
||||
const stackT = tf17.stack([arr[1], arr[0], arr[3], arr[2]], 1);
|
||||
const boxesT = tf17.squeeze(stackT);
|
||||
tf17.dispose(stackT);
|
||||
const scoresT = tf17.squeeze(arr[4]);
|
||||
const classesT = tf17.squeeze(arr[5]);
|
||||
arr.forEach((t) => tf17.dispose(t));
|
||||
|
@ -9220,8 +9219,8 @@ function GLImageFilter(params) {
|
|||
gl.uniform1f(_currentProgram.uniform.flipY, flipY ? -1 : 1);
|
||||
gl.drawArrays(gl.TRIANGLES, 0, 6);
|
||||
};
|
||||
this.apply = function(image19) {
|
||||
_resize(image19.width, image19.height);
|
||||
this.apply = function(image20) {
|
||||
_resize(image20.width, image20.height);
|
||||
_drawCount = 0;
|
||||
if (!_sourceTexture)
|
||||
_sourceTexture = gl.createTexture();
|
||||
|
@ -9230,7 +9229,7 @@ function GLImageFilter(params) {
|
|||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
|
||||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
|
||||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
|
||||
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, image19);
|
||||
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, image20);
|
||||
if (_filterChain.length === 0) {
|
||||
_draw();
|
||||
return _canvas;
|
||||
|
@ -9781,16 +9780,33 @@ var maxSize = 2048;
|
|||
var inCanvas;
|
||||
var outCanvas;
|
||||
var fx;
|
||||
function canvas(width, height) {
|
||||
let c;
|
||||
if (env2.browser) {
|
||||
if (typeof OffscreenCanvas !== "undefined") {
|
||||
c = new OffscreenCanvas(width, height);
|
||||
} else {
|
||||
c = document.createElement("canvas");
|
||||
c.width = width;
|
||||
c.height = height;
|
||||
}
|
||||
} else {
|
||||
c = typeof env2.Canvas !== "undefined" ? new env2.Canvas(width, height) : null;
|
||||
}
|
||||
if (!c)
|
||||
throw new Error("Human: Cannot create canvas");
|
||||
return c;
|
||||
}
|
||||
function process4(input, config3) {
|
||||
let tensor2;
|
||||
let tensor3;
|
||||
if (!input)
|
||||
throw new Error("Human: Input is missing");
|
||||
if (!(input instanceof tf18.Tensor) && !(typeof Image !== "undefined" && input instanceof Image) && !(typeof ImageData !== "undefined" && input instanceof ImageData) && !(typeof ImageBitmap !== "undefined" && input instanceof ImageBitmap) && !(typeof HTMLImageElement !== "undefined" && input instanceof HTMLImageElement) && !(typeof HTMLMediaElement !== "undefined" && input instanceof HTMLMediaElement) && !(typeof HTMLVideoElement !== "undefined" && input instanceof HTMLVideoElement) && !(typeof HTMLCanvasElement !== "undefined" && input instanceof HTMLCanvasElement) && !(typeof OffscreenCanvas !== "undefined" && input instanceof OffscreenCanvas)) {
|
||||
if (!(input instanceof tf18.Tensor) && !(typeof Image !== "undefined" && input instanceof Image) && !(typeof env2.Canvas !== "undefined" && input instanceof env2.Canvas) && !(typeof ImageData !== "undefined" && input instanceof ImageData) && !(typeof ImageBitmap !== "undefined" && input instanceof ImageBitmap) && !(typeof HTMLImageElement !== "undefined" && input instanceof HTMLImageElement) && !(typeof HTMLMediaElement !== "undefined" && input instanceof HTMLMediaElement) && !(typeof HTMLVideoElement !== "undefined" && input instanceof HTMLVideoElement) && !(typeof HTMLCanvasElement !== "undefined" && input instanceof HTMLCanvasElement) && !(typeof OffscreenCanvas !== "undefined" && input instanceof OffscreenCanvas)) {
|
||||
throw new Error("Human: Input type is not recognized");
|
||||
}
|
||||
if (input instanceof tf18.Tensor) {
|
||||
if (input.shape && input.shape.length === 4 && input.shape[0] === 1 && input.shape[3] === 3)
|
||||
tensor2 = tf18.clone(input);
|
||||
tensor3 = tf18.clone(input);
|
||||
else
|
||||
throw new Error(`Human: Input tensor shape must be [1, height, width, 3] and instead was ${input.shape}`);
|
||||
} else {
|
||||
|
@ -9818,15 +9834,10 @@ function process4(input, config3) {
|
|||
targetHeight = originalHeight * ((config3.filter.width || 0) / originalWidth);
|
||||
if (!targetWidth || !targetHeight)
|
||||
throw new Error("Human: Input cannot determine dimension");
|
||||
if (!inCanvas || (inCanvas == null ? void 0 : inCanvas.width) !== targetWidth || (inCanvas == null ? void 0 : inCanvas.height) !== targetHeight) {
|
||||
inCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement("canvas");
|
||||
if ((inCanvas == null ? void 0 : inCanvas.width) !== targetWidth)
|
||||
inCanvas.width = targetWidth;
|
||||
if ((inCanvas == null ? void 0 : inCanvas.height) !== targetHeight)
|
||||
inCanvas.height = targetHeight;
|
||||
}
|
||||
if (!inCanvas || (inCanvas == null ? void 0 : inCanvas.width) !== targetWidth || (inCanvas == null ? void 0 : inCanvas.height) !== targetHeight)
|
||||
inCanvas = canvas(targetWidth, targetHeight);
|
||||
const ctx = inCanvas.getContext("2d");
|
||||
if (input instanceof ImageData) {
|
||||
if (typeof ImageData !== "undefined" && input instanceof ImageData) {
|
||||
ctx.putImageData(input, 0, 0);
|
||||
} else {
|
||||
if (config3.filter.flip && typeof ctx.translate !== "undefined") {
|
||||
|
@ -9840,7 +9851,7 @@ function process4(input, config3) {
|
|||
}
|
||||
if (config3.filter.enabled) {
|
||||
if (!fx || !outCanvas || inCanvas.width !== outCanvas.width || (inCanvas == null ? void 0 : inCanvas.height) !== (outCanvas == null ? void 0 : outCanvas.height)) {
|
||||
outCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(inCanvas == null ? void 0 : inCanvas.width, inCanvas == null ? void 0 : inCanvas.height) : document.createElement("canvas");
|
||||
outCanvas = canvas(inCanvas == null ? void 0 : inCanvas.width, inCanvas == null ? void 0 : inCanvas.height);
|
||||
if ((outCanvas == null ? void 0 : outCanvas.width) !== (inCanvas == null ? void 0 : inCanvas.width))
|
||||
outCanvas.width = inCanvas == null ? void 0 : inCanvas.width;
|
||||
if ((outCanvas == null ? void 0 : outCanvas.height) !== (inCanvas == null ? void 0 : inCanvas.height))
|
||||
|
@ -9883,58 +9894,70 @@ function process4(input, config3) {
|
|||
if (fx)
|
||||
fx = null;
|
||||
}
|
||||
if (!tensor2) {
|
||||
if (!tensor3) {
|
||||
let pixels;
|
||||
if (outCanvas.data) {
|
||||
const shape = [outCanvas.height, outCanvas.width, 3];
|
||||
pixels = tf18.tensor3d(outCanvas.data, shape, "int32");
|
||||
} else if (outCanvas instanceof ImageData) {
|
||||
} else if (typeof ImageData !== "undefined" && outCanvas instanceof ImageData) {
|
||||
pixels = tf18.browser ? tf18.browser.fromPixels(outCanvas) : null;
|
||||
} else if (config3.backend === "webgl" || config3.backend === "humangl") {
|
||||
const tempCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement("canvas");
|
||||
const tempCanvas = canvas(targetWidth, targetHeight);
|
||||
tempCanvas.width = targetWidth;
|
||||
tempCanvas.height = targetHeight;
|
||||
const tempCtx = tempCanvas.getContext("2d");
|
||||
tempCtx == null ? void 0 : tempCtx.drawImage(outCanvas, 0, 0);
|
||||
pixels = tf18.browser ? tf18.browser.fromPixels(tempCanvas) : null;
|
||||
pixels = tf18.browser && env2.browser ? tf18.browser.fromPixels(tempCanvas) : null;
|
||||
} else {
|
||||
const tempCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement("canvas");
|
||||
const tempCanvas = canvas(targetWidth, targetHeight);
|
||||
tempCanvas.width = targetWidth;
|
||||
tempCanvas.height = targetHeight;
|
||||
const tempCtx = tempCanvas.getContext("2d");
|
||||
tempCtx == null ? void 0 : tempCtx.drawImage(outCanvas, 0, 0);
|
||||
const data = tempCtx == null ? void 0 : tempCtx.getImageData(0, 0, targetWidth, targetHeight);
|
||||
pixels = tf18.browser ? tf18.browser.fromPixels(data) : null;
|
||||
tempCtx.drawImage(outCanvas, 0, 0);
|
||||
const data = tempCtx.getImageData(0, 0, targetWidth, targetHeight);
|
||||
if (tf18.browser && env2.browser) {
|
||||
pixels = tf18.browser.fromPixels(data);
|
||||
} else {
|
||||
pixels = tf18.tidy(() => {
|
||||
const imageData = tf18.tensor(Array.from(data.data), [targetWidth, targetHeight, 4]);
|
||||
const channels = tf18.split(imageData, 4, 2);
|
||||
const rgb2 = tf18.stack([channels[0], channels[1], channels[2]], 2);
|
||||
const expand = tf18.reshape(rgb2, [imageData.shape[0], imageData.shape[1], 3]);
|
||||
return expand;
|
||||
});
|
||||
}
|
||||
}
|
||||
if (pixels) {
|
||||
const casted = tf18.cast(pixels, "float32");
|
||||
tensor2 = tf18.expandDims(casted, 0);
|
||||
tensor3 = tf18.expandDims(casted, 0);
|
||||
tf18.dispose(pixels);
|
||||
tf18.dispose(casted);
|
||||
} else {
|
||||
tensor3 = tf18.zeros([1, targetWidth, targetHeight, 3]);
|
||||
throw new Error("Human: Cannot create tensor from input");
|
||||
}
|
||||
}
|
||||
}
|
||||
const canvas2 = config3.filter.return ? outCanvas : null;
|
||||
return { tensor: tensor2, canvas: canvas2 };
|
||||
return { tensor: tensor3, canvas: config3.filter.return ? outCanvas : null };
|
||||
}
|
||||
var lastInputSum = 0;
|
||||
var lastCacheDiff = 1;
|
||||
async function skip(instance, input) {
|
||||
if (instance.config.cacheSensitivity === 0)
|
||||
async function skip(config3, input) {
|
||||
if (config3.cacheSensitivity === 0)
|
||||
return false;
|
||||
const resizeFact = 32;
|
||||
if (!input.shape[1] || !input.shape[2])
|
||||
return false;
|
||||
const reduced = tf18.image.resizeBilinear(input, [Math.trunc(input.shape[1] / resizeFact), Math.trunc(input.shape[2] / resizeFact)]);
|
||||
const reducedData = await reduced.data();
|
||||
tf18.dispose(reduced);
|
||||
let sum = 0;
|
||||
for (let i = 0; i < reducedData.length / 3; i++)
|
||||
sum += reducedData[3 * i + 2];
|
||||
reduced.dispose();
|
||||
const diff = 100 * (Math.max(sum, lastInputSum) / Math.min(sum, lastInputSum) - 1);
|
||||
lastInputSum = sum;
|
||||
const skipFrame = diff < Math.max(instance.config.cacheSensitivity, lastCacheDiff);
|
||||
lastCacheDiff = diff > 10 * instance.config.cacheSensitivity ? 0 : diff;
|
||||
const skipFrame = diff < Math.max(config3.cacheSensitivity, lastCacheDiff);
|
||||
lastCacheDiff = diff > 10 * config3.cacheSensitivity ? 0 : diff;
|
||||
return skipFrame;
|
||||
}
|
||||
|
||||
|
@ -9966,6 +9989,7 @@ async function predict11(input) {
|
|||
tf19.dispose(resizeInput);
|
||||
tf19.dispose(norm);
|
||||
const squeeze7 = tf19.squeeze(res, 0);
|
||||
tf19.dispose(res);
|
||||
let resizeOutput;
|
||||
if (squeeze7.shape[2] === 2) {
|
||||
const softmax = squeeze7.softmax();
|
||||
|
@ -9983,16 +10007,18 @@ async function predict11(input) {
|
|||
} else {
|
||||
resizeOutput = tf19.image.resizeBilinear(squeeze7, [width, height]);
|
||||
}
|
||||
if (typeof document === "undefined")
|
||||
return resizeOutput.data();
|
||||
tf19.dispose(squeeze7);
|
||||
if (env2.node) {
|
||||
const data = await resizeOutput.data();
|
||||
tf19.dispose(resizeOutput);
|
||||
return data;
|
||||
}
|
||||
const overlay = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(width, height) : document.createElement("canvas");
|
||||
overlay.width = width;
|
||||
overlay.height = height;
|
||||
if (tf19.browser)
|
||||
await tf19.browser.toPixels(resizeOutput, overlay);
|
||||
tf19.dispose(resizeOutput);
|
||||
tf19.dispose(squeeze7);
|
||||
tf19.dispose(res);
|
||||
const alphaCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(width, height) : document.createElement("canvas");
|
||||
alphaCanvas.width = width;
|
||||
alphaCanvas.height = height;
|
||||
|
@ -10297,7 +10323,7 @@ var detectFace = async (parent, input) => {
|
|||
delete faces[i].annotations.rightEyeIris;
|
||||
}
|
||||
const irisSize = ((_e = faces[i].annotations) == null ? void 0 : _e.leftEyeIris) && ((_f = faces[i].annotations) == null ? void 0 : _f.rightEyeIris) ? Math.max(Math.abs(faces[i].annotations.leftEyeIris[3][0] - faces[i].annotations.leftEyeIris[1][0]), Math.abs(faces[i].annotations.rightEyeIris[4][1] - faces[i].annotations.rightEyeIris[2][1])) / input.shape[2] : 0;
|
||||
const tensor2 = parent.config.face.detector.return ? tf20.squeeze(faces[i].tensor) : null;
|
||||
const tensor3 = parent.config.face.detector.return ? tf20.squeeze(faces[i].tensor) : null;
|
||||
tf20.dispose(faces[i].tensor);
|
||||
if (faces[i].tensor)
|
||||
delete faces[i].tensor;
|
||||
|
@ -10311,7 +10337,7 @@ var detectFace = async (parent, input) => {
|
|||
emotion: emotionRes,
|
||||
iris: irisSize !== 0 ? Math.trunc(500 / irisSize / 11.7) / 100 : 0,
|
||||
rotation,
|
||||
tensor: tensor2
|
||||
tensor: tensor3
|
||||
});
|
||||
parent.analyze("End Face");
|
||||
}
|
||||
|
@ -10446,7 +10472,7 @@ var draw_exports = {};
|
|||
__export(draw_exports, {
|
||||
all: () => all,
|
||||
body: () => body2,
|
||||
canvas: () => canvas,
|
||||
canvas: () => canvas2,
|
||||
face: () => face2,
|
||||
gesture: () => gesture,
|
||||
hand: () => hand2,
|
||||
|
@ -10919,7 +10945,7 @@ async function person(inCanvas2, result, drawOptions) {
|
|||
}
|
||||
}
|
||||
}
|
||||
async function canvas(inCanvas2, outCanvas2) {
|
||||
async function canvas2(inCanvas2, outCanvas2) {
|
||||
if (!inCanvas2 || !outCanvas2)
|
||||
return;
|
||||
getCanvasContext(outCanvas2);
|
||||
|
@ -12002,29 +12028,34 @@ async function warmupBitmap(instance) {
|
|||
async function warmupCanvas(instance) {
|
||||
return new Promise((resolve) => {
|
||||
let src;
|
||||
let size = 0;
|
||||
switch (instance.config.warmup) {
|
||||
case "face":
|
||||
size = 256;
|
||||
src = "data:image/jpeg;base64," + face3;
|
||||
break;
|
||||
case "full":
|
||||
case "body":
|
||||
size = 1200;
|
||||
src = "data:image/jpeg;base64," + body3;
|
||||
break;
|
||||
default:
|
||||
src = null;
|
||||
}
|
||||
const img = new Image();
|
||||
let img;
|
||||
if (typeof Image !== "undefined")
|
||||
img = new Image();
|
||||
else if (env2.Image)
|
||||
img = new env2.Image();
|
||||
img.onload = async () => {
|
||||
const canvas2 = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(size, size) : document.createElement("canvas");
|
||||
canvas2.width = img.naturalWidth;
|
||||
canvas2.height = img.naturalHeight;
|
||||
const ctx = canvas2.getContext("2d");
|
||||
ctx == null ? void 0 : ctx.drawImage(img, 0, 0);
|
||||
const res = await instance.detect(canvas2, instance.config);
|
||||
resolve(res);
|
||||
const canvas3 = canvas(img.naturalWidth, img.naturalHeight);
|
||||
if (!canvas3) {
|
||||
log("Warmup: Canvas not found");
|
||||
resolve({});
|
||||
} else {
|
||||
const ctx = canvas3.getContext("2d");
|
||||
ctx.drawImage(img, 0, 0);
|
||||
const tensor3 = await instance.image(canvas3);
|
||||
const res = await instance.detect(tensor3.tensor, instance.config);
|
||||
resolve(res);
|
||||
}
|
||||
};
|
||||
if (src)
|
||||
img.src = src;
|
||||
|
@ -12063,7 +12094,7 @@ async function warmup(instance, userConfig) {
|
|||
let res;
|
||||
if (typeof createImageBitmap === "function")
|
||||
res = await warmupBitmap(instance);
|
||||
else if (typeof Image !== "undefined")
|
||||
else if (typeof Image !== "undefined" || env2.Canvas !== void 0)
|
||||
res = await warmupCanvas(instance);
|
||||
else
|
||||
res = await warmupNode(instance);
|
||||
|
@ -12155,7 +12186,7 @@ var Human = class {
|
|||
return similarity(embedding1, embedding2);
|
||||
}
|
||||
segmentation(input, background) {
|
||||
return process5(input, background, this.config);
|
||||
return input ? process5(input, background, this.config) : null;
|
||||
}
|
||||
enhance(input) {
|
||||
return enhance(input);
|
||||
|
@ -12213,32 +12244,32 @@ var Human = class {
|
|||
await check(this);
|
||||
await this.load();
|
||||
timeStamp = now();
|
||||
this.process = process4(input, this.config);
|
||||
const inputTensor = this.process.tensor;
|
||||
let img = process4(input, this.config);
|
||||
this.process = img;
|
||||
this.performance.image = Math.trunc(now() - timeStamp);
|
||||
this.analyze("Get Image:");
|
||||
if (this.config.segmentation.enabled && this.process && inputTensor) {
|
||||
if (this.config.segmentation.enabled && this.process && img.tensor && img.canvas) {
|
||||
this.analyze("Start Segmentation:");
|
||||
this.state = "run:segmentation";
|
||||
timeStamp = now();
|
||||
await predict11(this.process);
|
||||
await predict11(img);
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0)
|
||||
this.performance.segmentation = elapsedTime;
|
||||
if (this.process.canvas) {
|
||||
tf24.dispose(inputTensor);
|
||||
this.process = process4(this.process.canvas, this.config);
|
||||
if (img.canvas) {
|
||||
tf24.dispose(img.tensor);
|
||||
img = process4(img.canvas, this.config);
|
||||
}
|
||||
this.analyze("End Segmentation:");
|
||||
}
|
||||
if (!this.process || !inputTensor) {
|
||||
if (!img.tensor) {
|
||||
log("could not convert input to tensor");
|
||||
resolve({ error: "could not convert input to tensor" });
|
||||
return;
|
||||
}
|
||||
this.emit("image");
|
||||
timeStamp = now();
|
||||
this.config.skipFrame = await skip(this, inputTensor);
|
||||
this.config.skipFrame = await skip(this.config, img.tensor);
|
||||
if (!this.performance.frames)
|
||||
this.performance.frames = 0;
|
||||
if (!this.performance.cached)
|
||||
|
@ -12253,13 +12284,13 @@ var Human = class {
|
|||
let handRes = [];
|
||||
let objectRes = [];
|
||||
if (this.config.async) {
|
||||
faceRes = this.config.face.enabled ? detectFace(this, inputTensor) : [];
|
||||
faceRes = this.config.face.enabled ? detectFace(this, img.tensor) : [];
|
||||
if (this.performance.face)
|
||||
delete this.performance.face;
|
||||
} else {
|
||||
this.state = "run:face";
|
||||
timeStamp = now();
|
||||
faceRes = this.config.face.enabled ? await detectFace(this, inputTensor) : [];
|
||||
faceRes = this.config.face.enabled ? await detectFace(this, img.tensor) : [];
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0)
|
||||
this.performance.face = elapsedTime;
|
||||
|
@ -12267,26 +12298,26 @@ var Human = class {
|
|||
this.analyze("Start Body:");
|
||||
if (this.config.async) {
|
||||
if ((_a = this.config.body.modelPath) == null ? void 0 : _a.includes("posenet"))
|
||||
bodyRes = this.config.body.enabled ? predict4(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? predict4(img.tensor, this.config) : [];
|
||||
else if ((_b = this.config.body.modelPath) == null ? void 0 : _b.includes("blazepose"))
|
||||
bodyRes = this.config.body.enabled ? predict6(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? predict6(img.tensor, this.config) : [];
|
||||
else if ((_c = this.config.body.modelPath) == null ? void 0 : _c.includes("efficientpose"))
|
||||
bodyRes = this.config.body.enabled ? predict7(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? predict7(img.tensor, this.config) : [];
|
||||
else if ((_d = this.config.body.modelPath) == null ? void 0 : _d.includes("movenet"))
|
||||
bodyRes = this.config.body.enabled ? predict8(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? predict8(img.tensor, this.config) : [];
|
||||
if (this.performance.body)
|
||||
delete this.performance.body;
|
||||
} else {
|
||||
this.state = "run:body";
|
||||
timeStamp = now();
|
||||
if ((_e = this.config.body.modelPath) == null ? void 0 : _e.includes("posenet"))
|
||||
bodyRes = this.config.body.enabled ? await predict4(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? await predict4(img.tensor, this.config) : [];
|
||||
else if ((_f = this.config.body.modelPath) == null ? void 0 : _f.includes("blazepose"))
|
||||
bodyRes = this.config.body.enabled ? await predict6(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? await predict6(img.tensor, this.config) : [];
|
||||
else if ((_g = this.config.body.modelPath) == null ? void 0 : _g.includes("efficientpose"))
|
||||
bodyRes = this.config.body.enabled ? await predict7(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? await predict7(img.tensor, this.config) : [];
|
||||
else if ((_h = this.config.body.modelPath) == null ? void 0 : _h.includes("movenet"))
|
||||
bodyRes = this.config.body.enabled ? await predict8(inputTensor, this.config) : [];
|
||||
bodyRes = this.config.body.enabled ? await predict8(img.tensor, this.config) : [];
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0)
|
||||
this.performance.body = elapsedTime;
|
||||
|
@ -12294,13 +12325,13 @@ var Human = class {
|
|||
this.analyze("End Body:");
|
||||
this.analyze("Start Hand:");
|
||||
if (this.config.async) {
|
||||
handRes = this.config.hand.enabled ? predict5(inputTensor, this.config) : [];
|
||||
handRes = this.config.hand.enabled ? predict5(img.tensor, this.config) : [];
|
||||
if (this.performance.hand)
|
||||
delete this.performance.hand;
|
||||
} else {
|
||||
this.state = "run:hand";
|
||||
timeStamp = now();
|
||||
handRes = this.config.hand.enabled ? await predict5(inputTensor, this.config) : [];
|
||||
handRes = this.config.hand.enabled ? await predict5(img.tensor, this.config) : [];
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0)
|
||||
this.performance.hand = elapsedTime;
|
||||
|
@ -12309,18 +12340,18 @@ var Human = class {
|
|||
this.analyze("Start Object:");
|
||||
if (this.config.async) {
|
||||
if ((_i = this.config.object.modelPath) == null ? void 0 : _i.includes("nanodet"))
|
||||
objectRes = this.config.object.enabled ? predict9(inputTensor, this.config) : [];
|
||||
objectRes = this.config.object.enabled ? predict9(img.tensor, this.config) : [];
|
||||
else if ((_j = this.config.object.modelPath) == null ? void 0 : _j.includes("centernet"))
|
||||
objectRes = this.config.object.enabled ? predict10(inputTensor, this.config) : [];
|
||||
objectRes = this.config.object.enabled ? predict10(img.tensor, this.config) : [];
|
||||
if (this.performance.object)
|
||||
delete this.performance.object;
|
||||
} else {
|
||||
this.state = "run:object";
|
||||
timeStamp = now();
|
||||
if ((_k = this.config.object.modelPath) == null ? void 0 : _k.includes("nanodet"))
|
||||
objectRes = this.config.object.enabled ? await predict9(inputTensor, this.config) : [];
|
||||
objectRes = this.config.object.enabled ? await predict9(img.tensor, this.config) : [];
|
||||
else if ((_l = this.config.object.modelPath) == null ? void 0 : _l.includes("centernet"))
|
||||
objectRes = this.config.object.enabled ? await predict10(inputTensor, this.config) : [];
|
||||
objectRes = this.config.object.enabled ? await predict10(img.tensor, this.config) : [];
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0)
|
||||
this.performance.object = elapsedTime;
|
||||
|
@ -12353,7 +12384,7 @@ var Human = class {
|
|||
return join2(faceRes, bodyRes, handRes, gestureRes, shape);
|
||||
}
|
||||
};
|
||||
tf24.dispose(inputTensor);
|
||||
tf24.dispose(img.tensor);
|
||||
this.emit("detect");
|
||||
resolve(this.result);
|
||||
});
|
||||
|
|
|
@ -5,8 +5,8 @@
|
|||
|
||||
import { log, join } from '../helpers';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import { Config } from '../config';
|
||||
import { GraphModel, Tensor } from '../tfjs/types';
|
||||
import type { Config } from '../config';
|
||||
import type { GraphModel, Tensor } from '../tfjs/types';
|
||||
|
||||
let model: GraphModel;
|
||||
|
||||
|
|
|
@ -2,8 +2,8 @@ import { log, join, mergeDeep } from '../helpers';
|
|||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import * as box from './box';
|
||||
import * as util from './util';
|
||||
import { Config } from '../config';
|
||||
import { Tensor, GraphModel } from '../tfjs/types';
|
||||
import type { Config } from '../config';
|
||||
import type { Tensor, GraphModel } from '../tfjs/types';
|
||||
|
||||
const keypointsCount = 6;
|
||||
|
||||
|
@ -39,7 +39,7 @@ export class BlazeFaceModel {
|
|||
|
||||
async getBoundingBoxes(inputImage: Tensor, userConfig: Config) {
|
||||
// sanity check on input
|
||||
if ((!inputImage) || (inputImage['isDisposedInternal']) || (inputImage.shape.length !== 4) || (inputImage.shape[1] < 1) || (inputImage.shape[2] < 1)) return null;
|
||||
if ((!inputImage) || (inputImage['isDisposedInternal']) || (inputImage.shape.length !== 4) || (inputImage.shape[1] < 1) || (inputImage.shape[2] < 1)) return { boxes: [] };
|
||||
const [batch, boxes, scores] = tf.tidy(() => {
|
||||
const resizedImage = tf.image.resizeBilinear(inputImage, [this.inputSize, this.inputSize]);
|
||||
const normalizedImage = tf.sub(tf.div(resizedImage, 127.5), 0.5);
|
||||
|
@ -65,22 +65,21 @@ export class BlazeFaceModel {
|
|||
const nmsTensor = await tf.image.nonMaxSuppressionAsync(boxes, scores, (this.config.face.detector?.maxDetected || 0), (this.config.face.detector?.iouThreshold || 0), (this.config.face.detector?.minConfidence || 0));
|
||||
const nms = await nmsTensor.array();
|
||||
tf.dispose(nmsTensor);
|
||||
const annotatedBoxes: Array<{ box: { startPoint: Tensor, endPoint: Tensor }, landmarks: Tensor, anchor: number[], confidence: number }> = [];
|
||||
const annotatedBoxes: Array<{ box: { startPoint: Tensor, endPoint: Tensor }, landmarks: Tensor, anchor: [number, number] | undefined, confidence: number }> = [];
|
||||
const scoresData = await scores.data();
|
||||
for (let i = 0; i < nms.length; i++) {
|
||||
const confidence = scoresData[nms[i]];
|
||||
if (confidence > (this.config.face.detector?.minConfidence || 0)) {
|
||||
const boundingBox = tf.slice(boxes, [nms[i], 0], [1, -1]);
|
||||
const localBox = box.createBox(boundingBox);
|
||||
tf.dispose(boundingBox);
|
||||
const anchor = this.anchorsData[nms[i]];
|
||||
const landmarks = tf.tidy(() => tf.reshape(tf.squeeze(tf.slice(batch, [nms[i], keypointsCount - 1], [1, -1])), [keypointsCount, -1]));
|
||||
annotatedBoxes.push({ box: localBox, landmarks, anchor, confidence });
|
||||
annotatedBoxes.push({ box: box.createBox(boundingBox), landmarks, anchor: this.anchorsData[nms[i]], confidence });
|
||||
tf.dispose(boundingBox);
|
||||
}
|
||||
}
|
||||
tf.dispose(batch);
|
||||
tf.dispose(boxes);
|
||||
tf.dispose(scores);
|
||||
|
||||
return {
|
||||
boxes: annotatedBoxes,
|
||||
scaleFactor: [inputImage.shape[2] / this.inputSize, inputImage.shape[1] / this.inputSize],
|
||||
|
|
|
@ -6,14 +6,14 @@ export function scaleBoxCoordinates(box, factor) {
|
|||
return { startPoint, endPoint };
|
||||
}
|
||||
|
||||
export function getBoxSize(box) {
|
||||
export function getBoxSize(box): [number, number] {
|
||||
return [
|
||||
Math.abs(box.endPoint[0] - box.startPoint[0]),
|
||||
Math.abs(box.endPoint[1] - box.startPoint[1]),
|
||||
];
|
||||
}
|
||||
|
||||
export function getBoxCenter(box) {
|
||||
export function getBoxCenter(box): [number, number] {
|
||||
return [
|
||||
box.startPoint[0] + (box.endPoint[0] - box.startPoint[0]) / 2,
|
||||
box.startPoint[1] + (box.endPoint[1] - box.startPoint[1]) / 2,
|
||||
|
@ -35,7 +35,7 @@ export function cutBoxFromImageAndResize(box, image, cropSize) {
|
|||
export function enlargeBox(box, factor = 1.5) {
|
||||
const center = getBoxCenter(box);
|
||||
const size = getBoxSize(box);
|
||||
const newHalfSize = [factor * size[0] / 2, factor * size[1] / 2];
|
||||
const newHalfSize: [number, number] = [factor * size[0] / 2, factor * size[1] / 2];
|
||||
const startPoint = [center[0] - newHalfSize[0], center[1] - newHalfSize[1]];
|
||||
const endPoint = [center[0] + newHalfSize[0], center[1] + newHalfSize[1]];
|
||||
return { startPoint, endPoint, landmarks: box.landmarks };
|
||||
|
|
|
@ -7,9 +7,9 @@ import * as tf from '../../dist/tfjs.esm.js';
|
|||
import * as blazeface from './blazeface';
|
||||
import * as facepipeline from './facepipeline';
|
||||
import * as coords from './coords';
|
||||
import { GraphModel, Tensor } from '../tfjs/types';
|
||||
import { FaceResult } from '../result';
|
||||
import { Config } from '../config';
|
||||
import type { GraphModel, Tensor } from '../tfjs/types';
|
||||
import type { FaceResult } from '../result';
|
||||
import type { Config } from '../config';
|
||||
|
||||
let faceModels: [blazeface.BlazeFaceModel | null, GraphModel | null, GraphModel | null] = [null, null, null];
|
||||
let facePipeline;
|
||||
|
@ -53,7 +53,6 @@ export async function predict(input: Tensor, config: Config): Promise<FaceResult
|
|||
annotations,
|
||||
tensor: prediction.image,
|
||||
});
|
||||
if (prediction.coords) tf.dispose(prediction.coords);
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
|
|
@ -2,8 +2,8 @@ import * as tf from '../../dist/tfjs.esm.js';
|
|||
import * as bounding from './box';
|
||||
import * as util from './util';
|
||||
import * as coords from './coords';
|
||||
import { Tensor, GraphModel } from '../tfjs/types';
|
||||
import { BlazeFaceModel } from './blazeface';
|
||||
import type { Tensor, GraphModel } from '../tfjs/types';
|
||||
import type { BlazeFaceModel } from './blazeface';
|
||||
import { env } from '../env';
|
||||
|
||||
const leftOutline = coords.MESH_ANNOTATIONS['leftEyeLower0'];
|
||||
|
@ -56,7 +56,7 @@ function replaceRawCoordinates(rawCoords, newCoords, prefix, keys) {
|
|||
}
|
||||
// The Pipeline coordinates between the bounding box and skeleton models.
|
||||
export class Pipeline {
|
||||
storedBoxes: Array<{ startPoint: number[], endPoint: number[], landmarks: Array<number>, confidence: number, faceConfidence?: number }>;
|
||||
storedBoxes: Array<{ startPoint: number[], endPoint: number[], landmarks: Array<number>, confidence: number, faceConfidence?: number | undefined }>;
|
||||
boundingBoxDetector: BlazeFaceModel; // tf.GraphModel
|
||||
meshDetector: GraphModel; // tf.GraphModel
|
||||
irisModel: GraphModel; // tf.GraphModel
|
||||
|
@ -158,17 +158,17 @@ export class Pipeline {
|
|||
|
||||
correctFaceRotation(config, box, input) {
|
||||
const [indexOfMouth, indexOfForehead] = (box.landmarks.length >= meshLandmarks.count) ? meshLandmarks.symmetryLine : blazeFaceLandmarks.symmetryLine;
|
||||
const angle = util.computeRotation(box.landmarks[indexOfMouth], box.landmarks[indexOfForehead]);
|
||||
const faceCenter = bounding.getBoxCenter({ startPoint: box.startPoint, endPoint: box.endPoint });
|
||||
const faceCenterNormalized = [faceCenter[0] / input.shape[2], faceCenter[1] / input.shape[1]];
|
||||
const rotatedImage = tf.image.rotateWithOffset(input, angle, 0, faceCenterNormalized); // rotateWithOffset is not defined for tfjs-node
|
||||
const angle: number = util.computeRotation(box.landmarks[indexOfMouth], box.landmarks[indexOfForehead]);
|
||||
const faceCenter: [number, number] = bounding.getBoxCenter({ startPoint: box.startPoint, endPoint: box.endPoint });
|
||||
const faceCenterNormalized: [number, number] = [faceCenter[0] / input.shape[2], faceCenter[1] / input.shape[1]];
|
||||
const rotated = tf.image.rotateWithOffset(input, angle, 0, faceCenterNormalized); // rotateWithOffset is not defined for tfjs-node
|
||||
const rotationMatrix = util.buildRotationMatrix(-angle, faceCenter);
|
||||
const cut = config.face.mesh.enabled
|
||||
? bounding.cutBoxFromImageAndResize({ startPoint: box.startPoint, endPoint: box.endPoint }, rotatedImage, [this.meshSize, this.meshSize])
|
||||
: bounding.cutBoxFromImageAndResize({ startPoint: box.startPoint, endPoint: box.endPoint }, rotatedImage, [this.boxSize, this.boxSize]);
|
||||
? bounding.cutBoxFromImageAndResize({ startPoint: box.startPoint, endPoint: box.endPoint }, rotated, [this.meshSize, this.meshSize])
|
||||
: bounding.cutBoxFromImageAndResize({ startPoint: box.startPoint, endPoint: box.endPoint }, rotated, [this.boxSize, this.boxSize]);
|
||||
const face = tf.div(cut, 255);
|
||||
tf.dispose(cut);
|
||||
tf.dispose(rotatedImage);
|
||||
tf.dispose(rotated);
|
||||
return [angle, rotationMatrix, face];
|
||||
}
|
||||
|
||||
|
@ -262,15 +262,14 @@ export class Pipeline {
|
|||
[angle, rotationMatrix, face] = this.correctFaceRotation(config, box, input);
|
||||
} else {
|
||||
rotationMatrix = util.IDENTITY_MATRIX;
|
||||
const clonedImage = input.clone();
|
||||
const cloned = input.clone();
|
||||
const cut = config.face.mesh.enabled
|
||||
? bounding.cutBoxFromImageAndResize({ startPoint: box.startPoint, endPoint: box.endPoint }, clonedImage, [this.meshSize, this.meshSize])
|
||||
: bounding.cutBoxFromImageAndResize({ startPoint: box.startPoint, endPoint: box.endPoint }, clonedImage, [this.boxSize, this.boxSize]);
|
||||
? bounding.cutBoxFromImageAndResize({ startPoint: box.startPoint, endPoint: box.endPoint }, cloned, [this.meshSize, this.meshSize])
|
||||
: bounding.cutBoxFromImageAndResize({ startPoint: box.startPoint, endPoint: box.endPoint }, cloned, [this.boxSize, this.boxSize]);
|
||||
face = tf.div(cut, 255);
|
||||
tf.dispose(cut);
|
||||
tf.dispose(clonedImage);
|
||||
tf.dispose(cloned);
|
||||
}
|
||||
|
||||
// if we're not going to produce mesh, don't spend time with further processing
|
||||
if (!config.face.mesh.enabled) {
|
||||
results.push({
|
||||
|
@ -304,9 +303,9 @@ export class Pipeline {
|
|||
|
||||
// do rotation one more time with mesh keypoints if we want to return perfect image
|
||||
if (config.face.detector.rotation && config.face.mesh.enabled && config.face.description.enabled && env.kernels.includes('rotatewithoffset')) {
|
||||
tf.dispose(face); // we'll overwrite original face
|
||||
[angle, rotationMatrix, face] = this.correctFaceRotation(config, box, input);
|
||||
}
|
||||
|
||||
results.push({
|
||||
mesh,
|
||||
box,
|
||||
|
|
|
@ -7,9 +7,9 @@
|
|||
import { log, join } from '../helpers';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import * as annotations from './annotations';
|
||||
import { Tensor, GraphModel } from '../tfjs/types';
|
||||
import { BodyResult } from '../result';
|
||||
import { Config } from '../config';
|
||||
import type { Tensor, GraphModel } from '../tfjs/types';
|
||||
import type { BodyResult } from '../result';
|
||||
import type { Config } from '../config';
|
||||
|
||||
let model: GraphModel;
|
||||
|
||||
|
|
|
@ -186,8 +186,8 @@ export interface GestureConfig {
|
|||
*/
|
||||
export interface Config {
|
||||
/** Backend used for TFJS operations */
|
||||
// backend: '' | 'cpu' | 'wasm' | 'webgl' | 'humangl' | 'tensorflow' | 'webgpu' | null,
|
||||
backend: string;
|
||||
backend: '' | 'cpu' | 'wasm' | 'webgl' | 'humangl' | 'tensorflow' | 'webgpu',
|
||||
// backend: string;
|
||||
|
||||
/** Path to *.wasm files if backend is set to `wasm` */
|
||||
wasmPath: string,
|
||||
|
@ -202,8 +202,8 @@ export interface Config {
|
|||
* - warmup pre-initializes all models for faster inference but can take significant time on startup
|
||||
* - only used for `webgl` and `humangl` backends
|
||||
*/
|
||||
// warmup: 'none' | 'face' | 'full' | 'body' | string,
|
||||
warmup: string;
|
||||
warmup: 'none' | 'face' | 'full' | 'body',
|
||||
// warmup: string;
|
||||
|
||||
/** Base model path (typically starting with file://, http:// or https://) for all models
|
||||
* - individual modelPath values are relative to this path
|
||||
|
|
|
@ -4,9 +4,9 @@
|
|||
|
||||
import { log, join } from '../helpers';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import { BodyResult } from '../result';
|
||||
import { GraphModel, Tensor } from '../tfjs/types';
|
||||
import { Config } from '../config';
|
||||
import type { BodyResult } from '../result';
|
||||
import type { GraphModel, Tensor } from '../tfjs/types';
|
||||
import type { Config } from '../config';
|
||||
|
||||
let model: GraphModel;
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
import { log, join } from '../helpers';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import { Tensor, GraphModel } from '../tfjs/types';
|
||||
import type { Tensor, GraphModel } from '../tfjs/types';
|
||||
|
||||
type DB = Array<{ name: string, source: string, embedding: number[] }>;
|
||||
let model: GraphModel;
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
*/
|
||||
|
||||
import { log, join } from '../helpers';
|
||||
import { Config } from '../config';
|
||||
import { Tensor, GraphModel } from '../tfjs/types';
|
||||
import type { Config } from '../config';
|
||||
import type { Tensor, GraphModel } from '../tfjs/types';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
|
||||
const annotations = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral'];
|
||||
|
|
|
@ -8,8 +8,8 @@ import * as tf from '../dist/tfjs.esm.js';
|
|||
import * as facemesh from './blazeface/facemesh';
|
||||
import * as emotion from './emotion/emotion';
|
||||
import * as faceres from './faceres/faceres';
|
||||
import { FaceResult } from './result';
|
||||
import { Tensor } from './tfjs/types';
|
||||
import type { FaceResult } from './result';
|
||||
import type { Tensor } from './tfjs/types';
|
||||
|
||||
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
|
||||
const rad2deg = (theta) => Math.round((theta * 180) / Math.PI);
|
||||
|
@ -250,7 +250,6 @@ export const detectFace = async (parent /* instance of human */, input: Tensor):
|
|||
rotation,
|
||||
tensor,
|
||||
});
|
||||
|
||||
parent.analyze('End Face');
|
||||
}
|
||||
parent.analyze('End FaceMesh:');
|
||||
|
|
|
@ -6,8 +6,8 @@
|
|||
|
||||
import { log, join } from '../helpers';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import { Tensor, GraphModel } from '../tfjs/types';
|
||||
import { Config } from '../config';
|
||||
import type { Tensor, GraphModel } from '../tfjs/types';
|
||||
import type { Config } from '../config';
|
||||
|
||||
let model: GraphModel;
|
||||
const last: Array<{
|
||||
|
@ -140,7 +140,8 @@ export async function predict(image: Tensor, config: Config, idx, count) {
|
|||
}
|
||||
const argmax = tf.argMax(resT.find((t) => t.shape[1] === 100), 1);
|
||||
const age = (await argmax.data())[0];
|
||||
const all = await resT.find((t) => t.shape[1] === 100).data(); // inside tf.tidy
|
||||
tf.dispose(argmax);
|
||||
const all = await resT.find((t) => t.shape[1] === 100).data();
|
||||
obj.age = Math.round(all[age - 1] > all[age + 1] ? 10 * age - 100 * all[age - 1] : 10 * age + 100 * all[age + 1]) / 10;
|
||||
|
||||
const desc = resT.find((t) => t.shape[1] === 1024);
|
||||
|
@ -151,7 +152,6 @@ export async function predict(image: Tensor, config: Config, idx, count) {
|
|||
obj.descriptor = [...descriptor];
|
||||
resT.forEach((t) => tf.dispose(t));
|
||||
}
|
||||
|
||||
last[idx] = obj;
|
||||
lastCount = count;
|
||||
resolve(obj);
|
||||
|
|
|
@ -23,8 +23,8 @@
|
|||
|
||||
import { log, join } from '../helpers';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import { Config } from '../config';
|
||||
import { GraphModel, Tensor } from '../tfjs/types';
|
||||
import type { Config } from '../config';
|
||||
import type { GraphModel, Tensor } from '../tfjs/types';
|
||||
|
||||
let model: GraphModel;
|
||||
|
||||
|
|
|
@ -5,8 +5,8 @@
|
|||
|
||||
import { log, join } from '../helpers';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import { Config } from '../config';
|
||||
import { GraphModel, Tensor } from '../tfjs/types';
|
||||
import type { Config } from '../config';
|
||||
import type { GraphModel, Tensor } from '../tfjs/types';
|
||||
|
||||
let model: GraphModel;
|
||||
let last = { gender: '' };
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
* Gesture detection module
|
||||
*/
|
||||
|
||||
import { GestureResult } from '../result';
|
||||
import type { GestureResult } from '../result';
|
||||
import * as fingerPose from '../fingerpose/fingerpose';
|
||||
|
||||
/**
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import * as box from './box';
|
||||
import * as anchors from './anchors';
|
||||
import { Tensor, GraphModel } from '../tfjs/types';
|
||||
import type { Tensor, GraphModel } from '../tfjs/types';
|
||||
|
||||
export class HandDetector {
|
||||
model: GraphModel;
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import * as box from './box';
|
||||
import * as util from './util';
|
||||
import * as detector from './handdetector';
|
||||
import { Tensor, GraphModel } from '../tfjs/types';
|
||||
import type * as detector from './handdetector';
|
||||
import type { Tensor, GraphModel } from '../tfjs/types';
|
||||
import { env } from '../env';
|
||||
|
||||
const palmBoxEnlargeFactor = 5; // default 3
|
||||
|
|
|
@ -7,9 +7,9 @@ import * as tf from '../../dist/tfjs.esm.js';
|
|||
import * as handdetector from './handdetector';
|
||||
import * as handpipeline from './handpipeline';
|
||||
import * as fingerPose from '../fingerpose/fingerpose';
|
||||
import { HandResult } from '../result';
|
||||
import { Tensor, GraphModel } from '../tfjs/types';
|
||||
import { Config } from '../config';
|
||||
import type { HandResult } from '../result';
|
||||
import type { Tensor, GraphModel } from '../tfjs/types';
|
||||
import type { Config } from '../config';
|
||||
|
||||
const meshAnnotations = {
|
||||
thumb: [1, 2, 3, 4],
|
||||
|
|
60
src/human.ts
60
src/human.ts
|
@ -4,7 +4,7 @@
|
|||
|
||||
import { log, now, mergeDeep } from './helpers';
|
||||
import { Config, defaults } from './config';
|
||||
import { Result, FaceResult, HandResult, BodyResult, ObjectResult, GestureResult } from './result';
|
||||
import type { Result, FaceResult, HandResult, BodyResult, ObjectResult, GestureResult } from './result';
|
||||
import * as tf from '../dist/tfjs.esm.js';
|
||||
import * as models from './models';
|
||||
import * as face from './face';
|
||||
|
@ -27,7 +27,7 @@ import * as env from './env';
|
|||
import * as backend from './tfjs/backend';
|
||||
import * as app from '../package.json';
|
||||
import * as warmups from './warmup';
|
||||
import { Tensor, GraphModel } from './tfjs/types';
|
||||
import type { Tensor, GraphModel } from './tfjs/types';
|
||||
|
||||
// export types
|
||||
export * from './config';
|
||||
|
@ -38,7 +38,7 @@ export { env } from './env';
|
|||
/** Defines all possible input types for **Human** detection
|
||||
* @typedef Input Type
|
||||
*/
|
||||
export type Input = Tensor | typeof Image | ImageData | ImageBitmap | HTMLImageElement | HTMLMediaElement | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas;
|
||||
export type Input = Tensor | ImageData | ImageBitmap | HTMLImageElement | HTMLMediaElement | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas | typeof Image | typeof env.env.Canvas;
|
||||
|
||||
/** Events dispatched by `human.events`
|
||||
* - `create`: triggered when Human object is instantiated
|
||||
|
@ -257,7 +257,7 @@ export class Human {
|
|||
* @returns Canvas
|
||||
*/
|
||||
segmentation(input: Input, background?: Input) {
|
||||
return segmentation.process(input, background, this.config);
|
||||
return input ? segmentation.process(input, background, this.config) : null;
|
||||
}
|
||||
|
||||
/** Enhance method performs additional enhacements to face image previously detected for futher this.processing
|
||||
|
@ -373,28 +373,28 @@ export class Human {
|
|||
await this.load();
|
||||
|
||||
timeStamp = now();
|
||||
this.process = image.process(input, this.config);
|
||||
const inputTensor = this.process.tensor;
|
||||
let img = image.process(input, this.config);
|
||||
this.process = img;
|
||||
this.performance.image = Math.trunc(now() - timeStamp);
|
||||
this.analyze('Get Image:');
|
||||
|
||||
// run segmentation prethis.processing
|
||||
if (this.config.segmentation.enabled && this.process && inputTensor) {
|
||||
if (this.config.segmentation.enabled && this.process && img.tensor && img.canvas) {
|
||||
this.analyze('Start Segmentation:');
|
||||
this.state = 'run:segmentation';
|
||||
timeStamp = now();
|
||||
await segmentation.predict(this.process);
|
||||
await segmentation.predict(img);
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0) this.performance.segmentation = elapsedTime;
|
||||
if (this.process.canvas) {
|
||||
if (img.canvas) {
|
||||
// replace input
|
||||
tf.dispose(inputTensor);
|
||||
this.process = image.process(this.process.canvas, this.config);
|
||||
tf.dispose(img.tensor);
|
||||
img = image.process(img.canvas, this.config);
|
||||
}
|
||||
this.analyze('End Segmentation:');
|
||||
}
|
||||
|
||||
if (!this.process || !inputTensor) {
|
||||
if (!img.tensor) {
|
||||
log('could not convert input to tensor');
|
||||
resolve({ error: 'could not convert input to tensor' });
|
||||
return;
|
||||
|
@ -402,7 +402,7 @@ export class Human {
|
|||
this.emit('image');
|
||||
|
||||
timeStamp = now();
|
||||
this.config.skipFrame = await image.skip(this, inputTensor);
|
||||
this.config.skipFrame = await image.skip(this.config, img.tensor);
|
||||
if (!this.performance.frames) this.performance.frames = 0;
|
||||
if (!this.performance.cached) this.performance.cached = 0;
|
||||
(this.performance.frames as number)++;
|
||||
|
@ -419,12 +419,12 @@ export class Human {
|
|||
|
||||
// run face detection followed by all models that rely on face bounding box: face mesh, age, gender, emotion
|
||||
if (this.config.async) {
|
||||
faceRes = this.config.face.enabled ? face.detectFace(this, inputTensor) : [];
|
||||
faceRes = this.config.face.enabled ? face.detectFace(this, img.tensor) : [];
|
||||
if (this.performance.face) delete this.performance.face;
|
||||
} else {
|
||||
this.state = 'run:face';
|
||||
timeStamp = now();
|
||||
faceRes = this.config.face.enabled ? await face.detectFace(this, inputTensor) : [];
|
||||
faceRes = this.config.face.enabled ? await face.detectFace(this, img.tensor) : [];
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0) this.performance.face = elapsedTime;
|
||||
}
|
||||
|
@ -432,18 +432,18 @@ export class Human {
|
|||
// run body: can be posenet, blazepose, efficientpose, movenet
|
||||
this.analyze('Start Body:');
|
||||
if (this.config.async) {
|
||||
if (this.config.body.modelPath?.includes('posenet')) bodyRes = this.config.body.enabled ? posenet.predict(inputTensor, this.config) : [];
|
||||
else if (this.config.body.modelPath?.includes('blazepose')) bodyRes = this.config.body.enabled ? blazepose.predict(inputTensor, this.config) : [];
|
||||
else if (this.config.body.modelPath?.includes('efficientpose')) bodyRes = this.config.body.enabled ? efficientpose.predict(inputTensor, this.config) : [];
|
||||
else if (this.config.body.modelPath?.includes('movenet')) bodyRes = this.config.body.enabled ? movenet.predict(inputTensor, this.config) : [];
|
||||
if (this.config.body.modelPath?.includes('posenet')) bodyRes = this.config.body.enabled ? posenet.predict(img.tensor, this.config) : [];
|
||||
else if (this.config.body.modelPath?.includes('blazepose')) bodyRes = this.config.body.enabled ? blazepose.predict(img.tensor, this.config) : [];
|
||||
else if (this.config.body.modelPath?.includes('efficientpose')) bodyRes = this.config.body.enabled ? efficientpose.predict(img.tensor, this.config) : [];
|
||||
else if (this.config.body.modelPath?.includes('movenet')) bodyRes = this.config.body.enabled ? movenet.predict(img.tensor, this.config) : [];
|
||||
if (this.performance.body) delete this.performance.body;
|
||||
} else {
|
||||
this.state = 'run:body';
|
||||
timeStamp = now();
|
||||
if (this.config.body.modelPath?.includes('posenet')) bodyRes = this.config.body.enabled ? await posenet.predict(inputTensor, this.config) : [];
|
||||
else if (this.config.body.modelPath?.includes('blazepose')) bodyRes = this.config.body.enabled ? await blazepose.predict(inputTensor, this.config) : [];
|
||||
else if (this.config.body.modelPath?.includes('efficientpose')) bodyRes = this.config.body.enabled ? await efficientpose.predict(inputTensor, this.config) : [];
|
||||
else if (this.config.body.modelPath?.includes('movenet')) bodyRes = this.config.body.enabled ? await movenet.predict(inputTensor, this.config) : [];
|
||||
if (this.config.body.modelPath?.includes('posenet')) bodyRes = this.config.body.enabled ? await posenet.predict(img.tensor, this.config) : [];
|
||||
else if (this.config.body.modelPath?.includes('blazepose')) bodyRes = this.config.body.enabled ? await blazepose.predict(img.tensor, this.config) : [];
|
||||
else if (this.config.body.modelPath?.includes('efficientpose')) bodyRes = this.config.body.enabled ? await efficientpose.predict(img.tensor, this.config) : [];
|
||||
else if (this.config.body.modelPath?.includes('movenet')) bodyRes = this.config.body.enabled ? await movenet.predict(img.tensor, this.config) : [];
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0) this.performance.body = elapsedTime;
|
||||
}
|
||||
|
@ -452,12 +452,12 @@ export class Human {
|
|||
// run handpose
|
||||
this.analyze('Start Hand:');
|
||||
if (this.config.async) {
|
||||
handRes = this.config.hand.enabled ? handpose.predict(inputTensor, this.config) : [];
|
||||
handRes = this.config.hand.enabled ? handpose.predict(img.tensor, this.config) : [];
|
||||
if (this.performance.hand) delete this.performance.hand;
|
||||
} else {
|
||||
this.state = 'run:hand';
|
||||
timeStamp = now();
|
||||
handRes = this.config.hand.enabled ? await handpose.predict(inputTensor, this.config) : [];
|
||||
handRes = this.config.hand.enabled ? await handpose.predict(img.tensor, this.config) : [];
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0) this.performance.hand = elapsedTime;
|
||||
}
|
||||
|
@ -466,14 +466,14 @@ export class Human {
|
|||
// run nanodet
|
||||
this.analyze('Start Object:');
|
||||
if (this.config.async) {
|
||||
if (this.config.object.modelPath?.includes('nanodet')) objectRes = this.config.object.enabled ? nanodet.predict(inputTensor, this.config) : [];
|
||||
else if (this.config.object.modelPath?.includes('centernet')) objectRes = this.config.object.enabled ? centernet.predict(inputTensor, this.config) : [];
|
||||
if (this.config.object.modelPath?.includes('nanodet')) objectRes = this.config.object.enabled ? nanodet.predict(img.tensor, this.config) : [];
|
||||
else if (this.config.object.modelPath?.includes('centernet')) objectRes = this.config.object.enabled ? centernet.predict(img.tensor, this.config) : [];
|
||||
if (this.performance.object) delete this.performance.object;
|
||||
} else {
|
||||
this.state = 'run:object';
|
||||
timeStamp = now();
|
||||
if (this.config.object.modelPath?.includes('nanodet')) objectRes = this.config.object.enabled ? await nanodet.predict(inputTensor, this.config) : [];
|
||||
else if (this.config.object.modelPath?.includes('centernet')) objectRes = this.config.object.enabled ? await centernet.predict(inputTensor, this.config) : [];
|
||||
if (this.config.object.modelPath?.includes('nanodet')) objectRes = this.config.object.enabled ? await nanodet.predict(img.tensor, this.config) : [];
|
||||
else if (this.config.object.modelPath?.includes('centernet')) objectRes = this.config.object.enabled ? await centernet.predict(img.tensor, this.config) : [];
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0) this.performance.object = elapsedTime;
|
||||
}
|
||||
|
@ -507,7 +507,7 @@ export class Human {
|
|||
};
|
||||
|
||||
// finally dispose input tensor
|
||||
tf.dispose(inputTensor);
|
||||
tf.dispose(img.tensor);
|
||||
|
||||
// log('Result:', result);
|
||||
this.emit('detect');
|
||||
|
|
|
@ -4,11 +4,11 @@
|
|||
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import * as fxImage from './imagefx';
|
||||
import { Tensor } from '../tfjs/types';
|
||||
import { Config } from '../config';
|
||||
import type { Tensor } from '../tfjs/types';
|
||||
import type { Config } from '../config';
|
||||
import { env } from '../env';
|
||||
|
||||
type Input = Tensor | typeof Image | ImageData | ImageBitmap | HTMLImageElement | HTMLMediaElement | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas;
|
||||
type Input = Tensor | ImageData | ImageBitmap | HTMLImageElement | HTMLMediaElement | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas | typeof Image | typeof env.Canvas;
|
||||
|
||||
const maxSize = 2048;
|
||||
// internal temp canvases
|
||||
|
@ -17,6 +17,25 @@ let outCanvas;
|
|||
// @ts-ignore // imagefx is js module that should be converted to a class
|
||||
let fx: fxImage.GLImageFilter | null; // instance of imagefx
|
||||
|
||||
export function canvas(width, height) {
|
||||
let c;
|
||||
if (env.browser) {
|
||||
if (typeof OffscreenCanvas !== 'undefined') {
|
||||
c = new OffscreenCanvas(width, height);
|
||||
} else {
|
||||
c = document.createElement('canvas');
|
||||
c.width = width;
|
||||
c.height = height;
|
||||
}
|
||||
} else {
|
||||
// @ts-ignore // env.canvas is an external monkey-patch
|
||||
// eslint-disable-next-line new-cap
|
||||
c = (typeof env.Canvas !== 'undefined') ? new env.Canvas(width, height) : null;
|
||||
}
|
||||
if (!c) throw new Error('Human: Cannot create canvas');
|
||||
return c;
|
||||
}
|
||||
|
||||
// process input image and return tensor
|
||||
// input can be tensor, imagedata, htmlimageelement, htmlvideoelement
|
||||
// input is resized and run through imagefx filter
|
||||
|
@ -27,6 +46,7 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
|
|||
if (
|
||||
!(input instanceof tf.Tensor)
|
||||
&& !(typeof Image !== 'undefined' && input instanceof Image)
|
||||
&& !(typeof env.Canvas !== 'undefined' && input instanceof env.Canvas)
|
||||
&& !(typeof ImageData !== 'undefined' && input instanceof ImageData)
|
||||
&& !(typeof ImageBitmap !== 'undefined' && input instanceof ImageBitmap)
|
||||
&& !(typeof HTMLImageElement !== 'undefined' && input instanceof HTMLImageElement)
|
||||
|
@ -39,8 +59,8 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
|
|||
}
|
||||
if (input instanceof tf.Tensor) {
|
||||
// if input is tensor, use as-is
|
||||
if ((input as Tensor).shape && (input as Tensor).shape.length === 4 && (input as Tensor).shape[0] === 1 && (input as Tensor).shape[3] === 3) tensor = tf.clone(input);
|
||||
else throw new Error(`Human: Input tensor shape must be [1, height, width, 3] and instead was ${(input as Tensor).shape}`);
|
||||
if ((input as unknown as Tensor).shape && (input as unknown as Tensor).shape.length === 4 && (input as unknown as Tensor).shape[0] === 1 && (input as unknown as Tensor).shape[3] === 3) tensor = tf.clone(input);
|
||||
else throw new Error(`Human: Input tensor shape must be [1, height, width, 3] and instead was ${(input as unknown as Tensor).shape}`);
|
||||
} else {
|
||||
// check if resizing will be needed
|
||||
const originalWidth = input['naturalWidth'] || input['videoWidth'] || input['width'] || (input['shape'] && (input['shape'][1] > 0));
|
||||
|
@ -63,15 +83,11 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
|
|||
if ((config.filter.height || 0) > 0) targetHeight = config.filter.height;
|
||||
else if ((config.filter.width || 0) > 0) targetHeight = originalHeight * ((config.filter.width || 0) / originalWidth);
|
||||
if (!targetWidth || !targetHeight) throw new Error('Human: Input cannot determine dimension');
|
||||
if (!inCanvas || (inCanvas?.width !== targetWidth) || (inCanvas?.height !== targetHeight)) {
|
||||
inCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement('canvas');
|
||||
if (inCanvas?.width !== targetWidth) inCanvas.width = targetWidth;
|
||||
if (inCanvas?.height !== targetHeight) inCanvas.height = targetHeight;
|
||||
}
|
||||
if (!inCanvas || (inCanvas?.width !== targetWidth) || (inCanvas?.height !== targetHeight)) inCanvas = canvas(targetWidth, targetHeight);
|
||||
|
||||
// draw input to our canvas
|
||||
const ctx = inCanvas.getContext('2d');
|
||||
if (input instanceof ImageData) {
|
||||
if ((typeof ImageData !== 'undefined') && (input instanceof ImageData)) {
|
||||
ctx.putImageData(input, 0, 0);
|
||||
} else {
|
||||
if (config.filter.flip && typeof ctx.translate !== 'undefined') {
|
||||
|
@ -83,11 +99,10 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
|
|||
ctx.drawImage(input, 0, 0, originalWidth, originalHeight, 0, 0, inCanvas?.width, inCanvas?.height);
|
||||
}
|
||||
}
|
||||
|
||||
// imagefx transforms using gl
|
||||
if (config.filter.enabled) {
|
||||
if (!fx || !outCanvas || (inCanvas.width !== outCanvas.width) || (inCanvas?.height !== outCanvas?.height)) {
|
||||
outCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(inCanvas?.width, inCanvas?.height) : document.createElement('canvas');
|
||||
outCanvas = canvas(inCanvas?.width, inCanvas?.height);
|
||||
if (outCanvas?.width !== inCanvas?.width) outCanvas.width = inCanvas?.width;
|
||||
if (outCanvas?.height !== inCanvas?.height) outCanvas.height = inCanvas?.height;
|
||||
// log('created FX filter');
|
||||
|
@ -146,45 +161,58 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
|
|||
if (outCanvas.data) { // if we have data, just convert to tensor
|
||||
const shape = [outCanvas.height, outCanvas.width, 3];
|
||||
pixels = tf.tensor3d(outCanvas.data, shape, 'int32');
|
||||
} else if (outCanvas instanceof ImageData) { // if input is imagedata, just use it
|
||||
} else if ((typeof ImageData !== 'undefined') && (outCanvas instanceof ImageData)) { // if input is imagedata, just use it
|
||||
pixels = tf.browser ? tf.browser.fromPixels(outCanvas) : null;
|
||||
} else if (config.backend === 'webgl' || config.backend === 'humangl') { // tf kernel-optimized method to get imagedata
|
||||
// we cant use canvas as-is as it already has a context, so we do a silly one more canvas
|
||||
const tempCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement('canvas');
|
||||
const tempCanvas = canvas(targetWidth, targetHeight);
|
||||
tempCanvas.width = targetWidth;
|
||||
tempCanvas.height = targetHeight;
|
||||
const tempCtx = tempCanvas.getContext('2d');
|
||||
tempCtx?.drawImage(outCanvas, 0, 0);
|
||||
pixels = tf.browser ? tf.browser.fromPixels(tempCanvas) : null;
|
||||
pixels = (tf.browser && env.browser) ? tf.browser.fromPixels(tempCanvas) : null;
|
||||
} else { // cpu and wasm kernel does not implement efficient fromPixels method
|
||||
// we cant use canvas as-is as it already has a context, so we do a silly one more canvas and do fromPixels on ImageData instead
|
||||
const tempCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement('canvas');
|
||||
const tempCanvas = canvas(targetWidth, targetHeight);
|
||||
tempCanvas.width = targetWidth;
|
||||
tempCanvas.height = targetHeight;
|
||||
const tempCtx = tempCanvas.getContext('2d');
|
||||
tempCtx?.drawImage(outCanvas, 0, 0);
|
||||
const data = tempCtx?.getImageData(0, 0, targetWidth, targetHeight);
|
||||
pixels = tf.browser ? tf.browser.fromPixels(data) : null;
|
||||
tempCtx.drawImage(outCanvas, 0, 0);
|
||||
const data = tempCtx.getImageData(0, 0, targetWidth, targetHeight);
|
||||
if (tf.browser && env.browser) {
|
||||
pixels = tf.browser.fromPixels(data);
|
||||
} else {
|
||||
pixels = tf.tidy(() => {
|
||||
const imageData = tf.tensor(Array.from(data.data), [targetWidth, targetHeight, 4]);
|
||||
const channels = tf.split(imageData, 4, 2); // split rgba to channels
|
||||
const rgb = tf.stack([channels[0], channels[1], channels[2]], 2); // stack channels back to rgb and ignore alpha
|
||||
const expand = tf.reshape(rgb, [imageData.shape[0], imageData.shape[1], 3]); // move extra dim from the end of tensor and use it as batch number instead
|
||||
return expand;
|
||||
});
|
||||
}
|
||||
}
|
||||
if (pixels) {
|
||||
const casted = tf.cast(pixels, 'float32');
|
||||
tensor = tf.expandDims(casted, 0);
|
||||
tf.dispose(pixels);
|
||||
tf.dispose(casted);
|
||||
} else {
|
||||
tensor = tf.zeros([1, targetWidth, targetHeight, 3]);
|
||||
throw new Error('Human: Cannot create tensor from input');
|
||||
}
|
||||
}
|
||||
}
|
||||
const canvas = config.filter.return ? outCanvas : null;
|
||||
return { tensor, canvas };
|
||||
return { tensor, canvas: (config.filter.return ? outCanvas : null) };
|
||||
}
|
||||
|
||||
let lastInputSum = 0;
|
||||
let lastCacheDiff = 1;
|
||||
export async function skip(instance, input: Tensor) {
|
||||
if (instance.config.cacheSensitivity === 0) return false;
|
||||
export async function skip(config, input: Tensor) {
|
||||
if (config.cacheSensitivity === 0) return false;
|
||||
const resizeFact = 32;
|
||||
if (!input.shape[1] || !input.shape[2]) return false;
|
||||
const reduced: Tensor = tf.image.resizeBilinear(input, [Math.trunc(input.shape[1] / resizeFact), Math.trunc(input.shape[2] / resizeFact)]);
|
||||
|
||||
// use tensor sum
|
||||
/*
|
||||
const sumT = this.tf.sum(reduced);
|
||||
|
@ -193,17 +221,17 @@ export async function skip(instance, input: Tensor) {
|
|||
*/
|
||||
// use js loop sum, faster than uploading tensor to gpu calculating and downloading back
|
||||
const reducedData = await reduced.data(); // raw image rgb array
|
||||
tf.dispose(reduced);
|
||||
let sum = 0;
|
||||
for (let i = 0; i < reducedData.length / 3; i++) sum += reducedData[3 * i + 2]; // look only at green value of each pixel
|
||||
|
||||
reduced.dispose();
|
||||
const diff = 100 * (Math.max(sum, lastInputSum) / Math.min(sum, lastInputSum) - 1);
|
||||
lastInputSum = sum;
|
||||
// if previous frame was skipped, skip this frame if changed more than cacheSensitivity
|
||||
// if previous frame was not skipped, then look for cacheSensitivity or difference larger than one in previous frame to avoid resetting cache in subsequent frames unnecessarily
|
||||
const skipFrame = diff < Math.max(instance.config.cacheSensitivity, lastCacheDiff);
|
||||
const skipFrame = diff < Math.max(config.cacheSensitivity, lastCacheDiff);
|
||||
// if difference is above 10x threshold, don't use last value to force reset cache for significant change of scenes or images
|
||||
lastCacheDiff = diff > 10 * instance.config.cacheSensitivity ? 0 : diff;
|
||||
lastCacheDiff = diff > 10 * config.cacheSensitivity ? 0 : diff;
|
||||
// console.log('skipFrame', skipFrame, this.config.cacheSensitivity, diff);
|
||||
return skipFrame;
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import { log } from './helpers';
|
||||
import { GraphModel } from './tfjs/types';
|
||||
import type { GraphModel } from './tfjs/types';
|
||||
import * as facemesh from './blazeface/facemesh';
|
||||
import * as faceres from './faceres/faceres';
|
||||
import * as emotion from './emotion/emotion';
|
||||
|
|
|
@ -4,9 +4,9 @@
|
|||
|
||||
import { log, join } from '../helpers';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import { BodyResult } from '../result';
|
||||
import { GraphModel, Tensor } from '../tfjs/types';
|
||||
import { Config } from '../config';
|
||||
import type { BodyResult } from '../result';
|
||||
import type { GraphModel, Tensor } from '../tfjs/types';
|
||||
import type { Config } from '../config';
|
||||
|
||||
let model: GraphModel;
|
||||
|
||||
|
|
|
@ -5,9 +5,9 @@
|
|||
import { log, join } from '../helpers';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import { labels } from './labels';
|
||||
import { ObjectResult } from '../result';
|
||||
import { GraphModel, Tensor } from '../tfjs/types';
|
||||
import { Config } from '../config';
|
||||
import type { ObjectResult } from '../result';
|
||||
import type { GraphModel, Tensor } from '../tfjs/types';
|
||||
import type { Config } from '../config';
|
||||
import { env } from '../env';
|
||||
|
||||
let model;
|
||||
|
@ -36,6 +36,7 @@ async function process(res: Tensor, inputSize, outputShape, config: Config) {
|
|||
tf.dispose(squeezeT);
|
||||
const stackT = tf.stack([arr[1], arr[0], arr[3], arr[2]], 1); // reorder dims as tf.nms expects y, x
|
||||
const boxesT = tf.squeeze(stackT);
|
||||
tf.dispose(stackT);
|
||||
const scoresT = tf.squeeze(arr[4]);
|
||||
const classesT = tf.squeeze(arr[5]);
|
||||
arr.forEach((t) => tf.dispose(t));
|
||||
|
@ -86,6 +87,7 @@ export async function predict(input: Tensor, config: Config): Promise<ObjectResu
|
|||
|
||||
const obj = await process(objectT, model.inputSize, outputSize, config);
|
||||
last = obj;
|
||||
|
||||
resolve(obj);
|
||||
});
|
||||
}
|
||||
|
|
|
@ -5,9 +5,9 @@
|
|||
import { log, join } from '../helpers';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import { labels } from './labels';
|
||||
import { ObjectResult } from '../result';
|
||||
import { GraphModel, Tensor } from '../tfjs/types';
|
||||
import { Config } from '../config';
|
||||
import type { ObjectResult } from '../result';
|
||||
import type { GraphModel, Tensor } from '../tfjs/types';
|
||||
import type { Config } from '../config';
|
||||
import { env } from '../env';
|
||||
|
||||
let model;
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
* Module that analyzes existing results and recombines them into a unified person object
|
||||
*/
|
||||
|
||||
import { FaceResult, BodyResult, HandResult, GestureResult, PersonResult } from './result';
|
||||
import type { FaceResult, BodyResult, HandResult, GestureResult, PersonResult } from './result';
|
||||
|
||||
export function join(faces: Array<FaceResult>, bodies: Array<BodyResult>, hands: Array<HandResult>, gestures: Array<GestureResult>, shape: Array<number> | undefined): Array<PersonResult> {
|
||||
let id = 0;
|
||||
|
|
|
@ -6,9 +6,9 @@ import { log, join } from '../helpers';
|
|||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import * as poses from './poses';
|
||||
import * as util from './utils';
|
||||
import { BodyResult } from '../result';
|
||||
import { Tensor, GraphModel } from '../tfjs/types';
|
||||
import { Config } from '../config';
|
||||
import type { BodyResult } from '../result';
|
||||
import type { Tensor, GraphModel } from '../tfjs/types';
|
||||
import type { Config } from '../config';
|
||||
|
||||
let model: GraphModel;
|
||||
const poseNetOutputs = ['MobilenetV1/offset_2/BiasAdd'/* offsets */, 'MobilenetV1/heatmap_2/BiasAdd'/* heatmapScores */, 'MobilenetV1/displacement_fwd_2/BiasAdd'/* displacementFwd */, 'MobilenetV1/displacement_bwd_2/BiasAdd'/* displacementBwd */];
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import * as kpt from './keypoints';
|
||||
import { BodyResult } from '../result';
|
||||
import type { BodyResult } from '../result';
|
||||
|
||||
export function eitherPointDoesntMeetConfidence(a: number, b: number, minConfidence: number) {
|
||||
return (a < minConfidence || b < minConfidence);
|
||||
|
|
|
@ -2,8 +2,8 @@
|
|||
* Type definitions for Human result object
|
||||
*/
|
||||
|
||||
import { Tensor } from './tfjs/types';
|
||||
import { FaceGesture, BodyGesture, HandGesture, IrisGesture } from './gesture/gesture';
|
||||
import type { Tensor } from './tfjs/types';
|
||||
import type { FaceGesture, BodyGesture, HandGesture, IrisGesture } from './gesture/gesture';
|
||||
|
||||
/** Face results
|
||||
* Combined results of face detector, face mesh, age, gender, emotion, embedding, iris models
|
||||
|
@ -186,7 +186,7 @@ export interface Result {
|
|||
/** global performance object with timing values for each operation */
|
||||
performance: Record<string, unknown>,
|
||||
/** optional processed canvas that can be used to draw input on screen */
|
||||
canvas?: OffscreenCanvas | HTMLCanvasElement | null,
|
||||
canvas?: OffscreenCanvas | HTMLCanvasElement | null | undefined,
|
||||
/** timestamp of detection representing the milliseconds elapsed since the UNIX epoch */
|
||||
readonly timestamp: number,
|
||||
/** getter property that returns unified persons object */
|
||||
|
|
|
@ -5,8 +5,9 @@
|
|||
import { log, join } from '../helpers';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import * as image from '../image/image';
|
||||
import { GraphModel, Tensor } from '../tfjs/types';
|
||||
import { Config } from '../config';
|
||||
import type { GraphModel, Tensor } from '../tfjs/types';
|
||||
import type { Config } from '../config';
|
||||
import { env } from '../env';
|
||||
|
||||
type Input = Tensor | typeof Image | ImageData | ImageBitmap | HTMLImageElement | HTMLMediaElement | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas;
|
||||
|
||||
|
@ -36,6 +37,7 @@ export async function predict(input: { tensor: Tensor | null, canvas: OffscreenC
|
|||
tf.dispose(norm);
|
||||
|
||||
const squeeze = tf.squeeze(res, 0);
|
||||
tf.dispose(res);
|
||||
let resizeOutput;
|
||||
if (squeeze.shape[2] === 2) {
|
||||
// model meet has two channels for fg and bg
|
||||
|
@ -57,16 +59,19 @@ export async function predict(input: { tensor: Tensor | null, canvas: OffscreenC
|
|||
} else { // model selfie has a single channel that we can use directly
|
||||
resizeOutput = tf.image.resizeBilinear(squeeze, [width, height]);
|
||||
}
|
||||
tf.dispose(squeeze);
|
||||
|
||||
if (typeof document === 'undefined') return resizeOutput.data(); // we're running in nodejs so return alpha array as-is
|
||||
if (env.node) {
|
||||
const data = await resizeOutput.data();
|
||||
tf.dispose(resizeOutput);
|
||||
return data; // we're running in nodejs so return alpha array as-is
|
||||
}
|
||||
|
||||
const overlay = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(width, height) : document.createElement('canvas');
|
||||
overlay.width = width;
|
||||
overlay.height = height;
|
||||
if (tf.browser) await tf.browser.toPixels(resizeOutput, overlay);
|
||||
tf.dispose(resizeOutput);
|
||||
tf.dispose(squeeze);
|
||||
tf.dispose(res);
|
||||
|
||||
// get alpha channel data
|
||||
const alphaCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(width, height) : document.createElement('canvas'); // need one more copy since input may already have gl context so 2d context fails
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
import { log, now, mergeDeep } from './helpers';
|
||||
import * as sample from './sample';
|
||||
import * as tf from '../dist/tfjs.esm.js';
|
||||
import { Config } from './config';
|
||||
import { Result } from './result';
|
||||
import * as image from './image/image';
|
||||
import type { Config } from './config';
|
||||
import type { Result } from './result';
|
||||
import { env } from './env';
|
||||
|
||||
async function warmupBitmap(instance) {
|
||||
const b64toBlob = (base64: string, type = 'application/octet-stream') => fetch(`data:${type};base64,${base64}`).then((res) => res.blob());
|
||||
|
@ -24,31 +26,38 @@ async function warmupBitmap(instance) {
|
|||
async function warmupCanvas(instance) {
|
||||
return new Promise((resolve) => {
|
||||
let src;
|
||||
let size = 0;
|
||||
// let size = 0;
|
||||
switch (instance.config.warmup) {
|
||||
case 'face':
|
||||
size = 256;
|
||||
// size = 256;
|
||||
src = 'data:image/jpeg;base64,' + sample.face;
|
||||
break;
|
||||
case 'full':
|
||||
case 'body':
|
||||
size = 1200;
|
||||
// size = 1200;
|
||||
src = 'data:image/jpeg;base64,' + sample.body;
|
||||
break;
|
||||
default:
|
||||
src = null;
|
||||
}
|
||||
// src = encodeURI('../assets/human-sample-upper.jpg');
|
||||
const img = new Image();
|
||||
let img;
|
||||
if (typeof Image !== 'undefined') img = new Image();
|
||||
// @ts-ignore env.image is an external monkey-patch
|
||||
else if (env.Image) img = new env.Image();
|
||||
img.onload = async () => {
|
||||
const canvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(size, size) : document.createElement('canvas');
|
||||
canvas.width = img.naturalWidth;
|
||||
canvas.height = img.naturalHeight;
|
||||
const ctx = canvas.getContext('2d');
|
||||
ctx?.drawImage(img, 0, 0);
|
||||
// const data = ctx?.getImageData(0, 0, canvas.height, canvas.width);
|
||||
const res = await instance.detect(canvas, instance.config);
|
||||
resolve(res);
|
||||
const canvas = image.canvas(img.naturalWidth, img.naturalHeight);
|
||||
if (!canvas) {
|
||||
log('Warmup: Canvas not found');
|
||||
resolve({});
|
||||
} else {
|
||||
const ctx = canvas.getContext('2d');
|
||||
ctx.drawImage(img, 0, 0);
|
||||
// const data = ctx?.getImageData(0, 0, canvas.height, canvas.width);
|
||||
const tensor = await instance.image(canvas);
|
||||
const res = await instance.detect(tensor.tensor, instance.config);
|
||||
resolve(res);
|
||||
}
|
||||
};
|
||||
if (src) img.src = src;
|
||||
else resolve(null);
|
||||
|
@ -93,7 +102,7 @@ export async function warmup(instance, userConfig?: Partial<Config>): Promise<Re
|
|||
if (!instance.config.warmup || instance.config.warmup === 'none') return { error: 'null' };
|
||||
let res;
|
||||
if (typeof createImageBitmap === 'function') res = await warmupBitmap(instance);
|
||||
else if (typeof Image !== 'undefined') res = await warmupCanvas(instance);
|
||||
else if (typeof Image !== 'undefined' || env.Canvas !== undefined) res = await warmupCanvas(instance);
|
||||
else res = await warmupNode(instance);
|
||||
const t1 = now();
|
||||
if (instance.config.debug) log('Warmup', instance.config.warmup, Math.round(t1 - t0), 'ms');
|
||||
|
|
5185
test/build.log
5185
test/build.log
File diff suppressed because it is too large
Load Diff
|
@ -5,7 +5,7 @@ const config = {
|
|||
modelBasePath: 'file://models/',
|
||||
backend: 'tensorflow',
|
||||
debug: false,
|
||||
async: false,
|
||||
async: true,
|
||||
face: {
|
||||
enabled: true,
|
||||
detector: { enabled: true, rotation: true },
|
||||
|
|
|
@ -1,10 +1,15 @@
|
|||
const tf = require('@tensorflow/tfjs/dist/tf.node.js'); // wasm backend requires tfjs to be loaded first
|
||||
const wasm = require('@tensorflow/tfjs-backend-wasm/dist/tf-backend-wasm.node.js'); // wasm backend does not get auto-loaded in nodejs
|
||||
const Human = require('../dist/human.node-wasm.js').default;
|
||||
const tf = require('@tensorflow/tfjs'); // wasm backend requires tfjs to be loaded first
|
||||
const wasm = require('@tensorflow/tfjs-backend-wasm'); // wasm backend does not get auto-loaded in nodejs
|
||||
const { Canvas, Image } = require('canvas');
|
||||
const Human = require('../dist/human.node-wasm.js');
|
||||
const test = require('./test-main.js').test;
|
||||
|
||||
Human.env.Canvas = Canvas;
|
||||
Human.env.Image = Image;
|
||||
|
||||
const config = {
|
||||
modelBasePath: 'http://localhost:10030/models/',
|
||||
// modelBasePath: 'http://localhost:10030/models/',
|
||||
modelBasePath: 'https://vladmandic.github.io/human/models/',
|
||||
backend: 'wasm',
|
||||
wasmPath: 'node_modules/@tensorflow/tfjs-backend-wasm/dist/',
|
||||
// wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.9.0/dist/',
|
||||
|
@ -20,12 +25,17 @@ const config = {
|
|||
},
|
||||
hand: { enabled: true, rotation: false },
|
||||
body: { enabled: true },
|
||||
object: { enabled: true },
|
||||
object: { enabled: false },
|
||||
segmentation: { enabled: true },
|
||||
filter: { enabled: false },
|
||||
};
|
||||
|
||||
// @ts-ignore // in nodejs+wasm must set explicitly before using human
|
||||
wasm.setWasmPaths(config.wasmPath); tf.setBackend('wasm');
|
||||
async function main() {
|
||||
wasm.setWasmPaths(config.wasmPath);
|
||||
await tf.setBackend('wasm');
|
||||
await tf.ready();
|
||||
test(Human.Human, config);
|
||||
}
|
||||
|
||||
test(Human, config);
|
||||
main();
|
||||
// @ts-ignore // in nodejs+wasm must set explicitly before using human
|
||||
|
|
|
@ -14,10 +14,10 @@ const config = {
|
|||
description: { enabled: true },
|
||||
emotion: { enabled: true },
|
||||
},
|
||||
hand: { enabled: true, rotation: true },
|
||||
body: { enabled: true },
|
||||
object: { enabled: true },
|
||||
segmentation: { enabled: true },
|
||||
hand: { enabled: false, rotation: true },
|
||||
body: { enabled: false },
|
||||
object: { enabled: false },
|
||||
segmentation: { enabled: false },
|
||||
filter: { enabled: false },
|
||||
};
|
||||
|
||||
|
|
|
@ -19,6 +19,8 @@ const ignoreMessages = [
|
|||
'cudart_stub.cc',
|
||||
'cuda_driver.cc:326',
|
||||
'cpu_allocator_impl.cc',
|
||||
'--trace-warnings',
|
||||
'ExperimentalWarning',
|
||||
];
|
||||
|
||||
const status = {
|
||||
|
@ -48,8 +50,9 @@ function logStdIO(ok, test, buffer) {
|
|||
}
|
||||
|
||||
async function runTest(test) {
|
||||
log.info();
|
||||
log.info(test, 'start');
|
||||
return new Promise((resolve) => {
|
||||
log.info(test, 'start');
|
||||
const child = fork(path.join(__dirname, test), [], { silent: true });
|
||||
child.on('message', (data) => logMessage(test, data));
|
||||
child.on('error', (data) => log.error(test, ':', data.message || data));
|
||||
|
@ -68,6 +71,7 @@ async function testAll() {
|
|||
process.on('uncaughtException', (data) => log.error('nodejs unhandled exception', data));
|
||||
log.info('tests:', tests);
|
||||
for (const test of tests) await runTest(test);
|
||||
log.info();
|
||||
log.info('status:', status);
|
||||
}
|
||||
|
||||
|
|
484
test/test.log
484
test/test.log
|
@ -1,193 +1,291 @@
|
|||
2021-09-12 18:34:14 [36mINFO: [39m @vladmandic/human version 2.2.0
|
||||
2021-09-12 18:34:14 [36mINFO: [39m User: vlado Platform: linux Arch: x64 Node: v16.5.0
|
||||
2021-09-12 18:34:14 [36mINFO: [39m tests: ["test-node.js","test-node-gpu.js","test-node-wasm.js"]
|
||||
2021-09-12 18:34:14 [36mINFO: [39m test-node.js start
|
||||
2021-09-12 18:34:14 [35mSTATE:[39m test-node.js passed: create human
|
||||
2021-09-12 18:34:14 [36mINFO: [39m test-node.js human version: 2.2.0
|
||||
2021-09-12 18:34:14 [36mINFO: [39m test-node.js platform: linux x64 agent: NodeJS v16.5.0
|
||||
2021-09-12 18:34:14 [36mINFO: [39m test-node.js tfjs version: 3.9.0
|
||||
2021-09-12 18:34:15 [35mSTATE:[39m test-node.js passed: set backend: tensorflow
|
||||
2021-09-12 18:34:15 [35mSTATE:[39m test-node.js passed: load models
|
||||
2021-09-12 18:34:15 [35mSTATE:[39m test-node.js result: defined models: 14 loaded models: 7
|
||||
2021-09-12 18:34:15 [35mSTATE:[39m test-node.js passed: warmup: none default
|
||||
2021-09-12 18:34:16 [35mSTATE:[39m test-node.js passed: warmup: face default
|
||||
2021-09-12 18:34:16 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":23.6,"gender":"female"} {"score":0.82,"class":"person"} {"score":0.42,"keypoints":4}
|
||||
2021-09-12 18:34:16 [32mDATA: [39m test-node.js result: performance: load: 324 total: 1200
|
||||
2021-09-12 18:34:17 [35mSTATE:[39m test-node.js passed: warmup: body default
|
||||
2021-09-12 18:34:17 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.92,"keypoints":17}
|
||||
2021-09-12 18:34:17 [32mDATA: [39m test-node.js result: performance: load: 324 total: 1096
|
||||
2021-09-12 18:34:17 [36mINFO: [39m test-node.js test body variants
|
||||
2021-09-12 18:34:18 [35mSTATE:[39m test-node.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-09-12 18:34:19 [35mSTATE:[39m test-node.js passed: detect: samples/ai-body.jpg posenet
|
||||
2021-09-12 18:34:19 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.96,"keypoints":16}
|
||||
2021-09-12 18:34:19 [32mDATA: [39m test-node.js result: performance: load: 324 total: 689
|
||||
2021-09-12 18:34:19 [35mSTATE:[39m test-node.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-09-12 18:34:20 [35mSTATE:[39m test-node.js passed: detect: samples/ai-body.jpg movenet
|
||||
2021-09-12 18:34:20 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.92,"keypoints":17}
|
||||
2021-09-12 18:34:20 [32mDATA: [39m test-node.js result: performance: load: 324 total: 231
|
||||
2021-09-12 18:34:20 [35mSTATE:[39m test-node.js passed: detect: random default
|
||||
2021-09-12 18:34:20 [32mDATA: [39m test-node.js result: face: 0 body: 1 hand: 0 gesture: 0 object: 0 person: 0 {} {} {"score":0,"keypoints":0}
|
||||
2021-09-12 18:34:20 [32mDATA: [39m test-node.js result: performance: load: 324 total: 625
|
||||
2021-09-12 18:34:20 [36mINFO: [39m test-node.js test: first instance
|
||||
2021-09-12 18:34:21 [35mSTATE:[39m test-node.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-09-12 18:34:21 [35mSTATE:[39m test-node.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-09-12 18:34:21 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.71,"class":"person"} {"score":0.69,"keypoints":10}
|
||||
2021-09-12 18:34:21 [32mDATA: [39m test-node.js result: performance: load: 324 total: 833
|
||||
2021-09-12 18:34:21 [36mINFO: [39m test-node.js test: second instance
|
||||
2021-09-12 18:34:22 [35mSTATE:[39m test-node.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-09-12 18:34:22 [35mSTATE:[39m test-node.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-09-12 18:34:22 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.71,"class":"person"} {"score":0.69,"keypoints":10}
|
||||
2021-09-12 18:34:22 [32mDATA: [39m test-node.js result: performance: load: 5 total: 339
|
||||
2021-09-12 18:34:22 [36mINFO: [39m test-node.js test: concurrent
|
||||
2021-09-12 18:34:22 [35mSTATE:[39m test-node.js passed: load image: samples/ai-face.jpg [1,256,256,3]
|
||||
2021-09-12 18:34:22 [35mSTATE:[39m test-node.js passed: load image: samples/ai-face.jpg [1,256,256,3]
|
||||
2021-09-12 18:34:23 [35mSTATE:[39m test-node.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-09-12 18:34:24 [35mSTATE:[39m test-node.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-09-12 18:34:24 [35mSTATE:[39m test-node.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-09-12 18:34:25 [35mSTATE:[39m test-node.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-09-12 18:34:25 [35mSTATE:[39m test-node.js passed: detect: samples/ai-face.jpg default
|
||||
2021-09-12 18:34:25 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 6 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.71,"class":"person"} {"score":0.92,"keypoints":10}
|
||||
2021-09-12 18:34:25 [32mDATA: [39m test-node.js result: performance: load: 5 total: 881
|
||||
2021-09-12 18:34:26 [35mSTATE:[39m test-node.js passed: detect: samples/ai-body.jpg default
|
||||
2021-09-12 18:34:26 [32mDATA: [39m test-node.js result: face: 0 body: 1 hand: 0 gesture: 1 object: 1 person: 0 {} {"score":0.71,"class":"person"} {"score":0.92,"keypoints":10}
|
||||
2021-09-12 18:34:26 [32mDATA: [39m test-node.js result: performance: load: 5 total: 881
|
||||
2021-09-12 18:34:26 [35mSTATE:[39m test-node.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-09-12 18:34:26 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.71,"class":"person"} {"score":0.69,"keypoints":10}
|
||||
2021-09-12 18:34:26 [32mDATA: [39m test-node.js result: performance: load: 5 total: 990
|
||||
2021-09-12 18:34:27 [35mSTATE:[39m test-node.js passed: detect: samples/ai-body.jpg default
|
||||
2021-09-12 18:34:27 [32mDATA: [39m test-node.js result: face: 0 body: 1 hand: 0 gesture: 0 object: 1 person: 0 {} {"score":0.72,"class":"person"} {"score":0.92,"keypoints":4}
|
||||
2021-09-12 18:34:27 [32mDATA: [39m test-node.js result: performance: load: 324 total: 2891
|
||||
2021-09-12 18:34:27 [35mSTATE:[39m test-node.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-09-12 18:34:27 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 2 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.71,"class":"person"} {"score":0.69,"keypoints":4}
|
||||
2021-09-12 18:34:27 [32mDATA: [39m test-node.js result: performance: load: 324 total: 2891
|
||||
2021-09-12 18:34:28 [35mSTATE:[39m test-node.js passed: detect: samples/ai-face.jpg default
|
||||
2021-09-12 18:34:28 [32mDATA: [39m test-node.js result: face: 2 body: 1 hand: 0 gesture: 9 object: 1 person: 2 {"age":23.6,"gender":"female"} {"score":0.82,"class":"person"} {"score":0.47,"keypoints":4}
|
||||
2021-09-12 18:34:28 [32mDATA: [39m test-node.js result: performance: load: 324 total: 3162
|
||||
2021-09-12 18:34:28 [36mINFO: [39m test-node.js test complete: 13509 ms
|
||||
2021-09-12 18:34:28 [36mINFO: [39m test-node-gpu.js start
|
||||
2021-09-12 18:34:28 [33mWARN: [39m test-node-gpu.js stderr: 2021-09-12 18:34:28.915334: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory
|
||||
2021-09-12 18:34:28 [33mWARN: [39m test-node-gpu.js stderr: 2021-09-12 18:34:28.962912: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcuda.so.1'; dlerror: libcuda.so.1: cannot open shared object file: No such file or directory
|
||||
2021-09-12 18:34:28 [33mWARN: [39m test-node-gpu.js stderr: 2021-09-12 18:34:28.962948: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:156] kernel driver does not appear to be running on this host (wyse): /proc/driver/nvidia/version does not exist
|
||||
2021-09-12 18:34:29 [35mSTATE:[39m test-node-gpu.js passed: create human
|
||||
2021-09-12 18:34:29 [36mINFO: [39m test-node-gpu.js human version: 2.2.0
|
||||
2021-09-12 18:34:29 [36mINFO: [39m test-node-gpu.js platform: linux x64 agent: NodeJS v16.5.0
|
||||
2021-09-12 18:34:29 [36mINFO: [39m test-node-gpu.js tfjs version: 3.9.0
|
||||
2021-09-12 18:34:29 [35mSTATE:[39m test-node-gpu.js passed: set backend: tensorflow
|
||||
2021-09-12 18:34:29 [35mSTATE:[39m test-node-gpu.js passed: load models
|
||||
2021-09-12 18:34:29 [35mSTATE:[39m test-node-gpu.js result: defined models: 14 loaded models: 7
|
||||
2021-09-12 18:34:29 [35mSTATE:[39m test-node-gpu.js passed: warmup: none default
|
||||
2021-09-12 18:34:30 [35mSTATE:[39m test-node-gpu.js passed: warmup: face default
|
||||
2021-09-12 18:34:30 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":23.6,"gender":"female"} {"score":0.82,"class":"person"} {"score":0.42,"keypoints":4}
|
||||
2021-09-12 18:34:30 [32mDATA: [39m test-node-gpu.js result: performance: load: 287 total: 1009
|
||||
2021-09-12 18:34:31 [35mSTATE:[39m test-node-gpu.js passed: warmup: body default
|
||||
2021-09-12 18:34:31 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.92,"keypoints":17}
|
||||
2021-09-12 18:34:31 [32mDATA: [39m test-node-gpu.js result: performance: load: 287 total: 1115
|
||||
2021-09-12 18:34:31 [36mINFO: [39m test-node-gpu.js test body variants
|
||||
2021-09-12 18:34:32 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-09-12 18:34:33 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-body.jpg posenet
|
||||
2021-09-12 18:34:33 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.96,"keypoints":16}
|
||||
2021-09-12 18:34:33 [32mDATA: [39m test-node-gpu.js result: performance: load: 287 total: 734
|
||||
2021-09-12 18:34:33 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-09-12 18:34:34 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-body.jpg movenet
|
||||
2021-09-12 18:34:34 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.92,"keypoints":17}
|
||||
2021-09-12 18:34:34 [32mDATA: [39m test-node-gpu.js result: performance: load: 287 total: 200
|
||||
2021-09-12 18:34:34 [35mSTATE:[39m test-node-gpu.js passed: detect: random default
|
||||
2021-09-12 18:34:34 [32mDATA: [39m test-node-gpu.js result: face: 0 body: 1 hand: 0 gesture: 0 object: 0 person: 0 {} {} {"score":0,"keypoints":0}
|
||||
2021-09-12 18:34:34 [32mDATA: [39m test-node-gpu.js result: performance: load: 287 total: 633
|
||||
2021-09-12 18:34:34 [36mINFO: [39m test-node-gpu.js test: first instance
|
||||
2021-09-12 18:34:35 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-09-12 18:34:36 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-09-12 18:34:36 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.71,"class":"person"} {"score":0.69,"keypoints":10}
|
||||
2021-09-12 18:34:36 [32mDATA: [39m test-node-gpu.js result: performance: load: 287 total: 838
|
||||
2021-09-12 18:34:36 [36mINFO: [39m test-node-gpu.js test: second instance
|
||||
2021-09-12 18:34:36 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-09-12 18:34:36 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-09-12 18:34:36 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.71,"class":"person"} {"score":0.69,"keypoints":10}
|
||||
2021-09-12 18:34:36 [32mDATA: [39m test-node-gpu.js result: performance: load: 11 total: 398
|
||||
2021-09-12 18:34:36 [36mINFO: [39m test-node-gpu.js test: concurrent
|
||||
2021-09-12 18:34:36 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-face.jpg [1,256,256,3]
|
||||
2021-09-12 18:34:36 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-face.jpg [1,256,256,3]
|
||||
2021-09-12 18:34:37 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-09-12 18:34:38 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-09-12 18:34:38 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-09-12 18:34:39 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-09-12 18:34:39 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-face.jpg default
|
||||
2021-09-12 18:34:39 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 6 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.71,"class":"person"} {"score":0.92,"keypoints":10}
|
||||
2021-09-12 18:34:39 [32mDATA: [39m test-node-gpu.js result: performance: load: 11 total: 877
|
||||
2021-09-12 18:34:40 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-body.jpg default
|
||||
2021-09-12 18:34:40 [32mDATA: [39m test-node-gpu.js result: face: 0 body: 1 hand: 0 gesture: 1 object: 1 person: 0 {} {"score":0.71,"class":"person"} {"score":0.92,"keypoints":10}
|
||||
2021-09-12 18:34:40 [32mDATA: [39m test-node-gpu.js result: performance: load: 11 total: 877
|
||||
2021-09-12 18:34:40 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-09-12 18:34:40 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.71,"class":"person"} {"score":0.69,"keypoints":10}
|
||||
2021-09-12 18:34:40 [32mDATA: [39m test-node-gpu.js result: performance: load: 11 total: 984
|
||||
2021-09-12 18:34:42 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-body.jpg default
|
||||
2021-09-12 18:34:42 [32mDATA: [39m test-node-gpu.js result: face: 0 body: 1 hand: 0 gesture: 0 object: 1 person: 0 {} {"score":0.72,"class":"person"} {"score":0.92,"keypoints":4}
|
||||
2021-09-12 18:34:42 [32mDATA: [39m test-node-gpu.js result: performance: load: 287 total: 3052
|
||||
2021-09-12 18:34:42 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-09-12 18:34:42 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 2 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.71,"class":"person"} {"score":0.69,"keypoints":4}
|
||||
2021-09-12 18:34:42 [32mDATA: [39m test-node-gpu.js result: performance: load: 287 total: 3053
|
||||
2021-09-12 18:34:42 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-face.jpg default
|
||||
2021-09-12 18:34:42 [32mDATA: [39m test-node-gpu.js result: face: 2 body: 1 hand: 0 gesture: 9 object: 1 person: 2 {"age":23.6,"gender":"female"} {"score":0.82,"class":"person"} {"score":0.47,"keypoints":4}
|
||||
2021-09-12 18:34:42 [32mDATA: [39m test-node-gpu.js result: performance: load: 287 total: 3324
|
||||
2021-09-12 18:34:42 [36mINFO: [39m test-node-gpu.js test complete: 13334 ms
|
||||
2021-09-12 18:34:42 [36mINFO: [39m test-node-wasm.js start
|
||||
2021-09-12 18:34:42 [35mSTATE:[39m test-node-wasm.js passed: model server: http://localhost:10030/models/
|
||||
2021-09-12 18:34:42 [35mSTATE:[39m test-node-wasm.js passed: create human
|
||||
2021-09-12 18:34:42 [36mINFO: [39m test-node-wasm.js human version: 2.2.0
|
||||
2021-09-12 18:34:42 [36mINFO: [39m test-node-wasm.js platform: linux x64 agent: NodeJS v16.5.0
|
||||
2021-09-12 18:34:42 [36mINFO: [39m test-node-wasm.js tfjs version: 3.9.0
|
||||
2021-09-12 18:34:43 [35mSTATE:[39m test-node-wasm.js passed: set backend: wasm
|
||||
2021-09-12 18:34:43 [35mSTATE:[39m test-node-wasm.js passed: load models
|
||||
2021-09-12 18:34:43 [35mSTATE:[39m test-node-wasm.js result: defined models: 14 loaded models: 7
|
||||
2021-09-12 18:34:43 [35mSTATE:[39m test-node-wasm.js passed: warmup: none default
|
||||
2021-09-12 18:34:43 [31mERROR:[39m test-node-wasm.js failed: warmup: face default
|
||||
2021-09-12 18:34:43 [31mERROR:[39m test-node-wasm.js failed: warmup: body default
|
||||
2021-09-12 18:34:43 [36mINFO: [39m test-node-wasm.js test body variants
|
||||
2021-09-12 18:34:45 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-09-12 18:34:46 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-body.jpg posenet
|
||||
2021-09-12 18:34:46 [32mDATA: [39m test-node-wasm.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 0 person: 1 {"age":28.5,"gender":"female"} {} {"score":0.96,"keypoints":16}
|
||||
2021-09-12 18:34:46 [32mDATA: [39m test-node-wasm.js result: performance: load: 526 total: 1537
|
||||
2021-09-12 18:34:48 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-09-12 18:34:49 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-body.jpg movenet
|
||||
2021-09-12 18:34:49 [32mDATA: [39m test-node-wasm.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 0 person: 1 {"age":28.5,"gender":"female"} {} {"score":0.92,"keypoints":17}
|
||||
2021-09-12 18:34:49 [32mDATA: [39m test-node-wasm.js result: performance: load: 526 total: 948
|
||||
2021-09-12 18:34:49 [35mSTATE:[39m test-node-wasm.js passed: detect: random default
|
||||
2021-09-12 18:34:49 [32mDATA: [39m test-node-wasm.js result: face: 0 body: 1 hand: 0 gesture: 1 object: 0 person: 0 {} {} {"score":0.92,"keypoints":17}
|
||||
2021-09-12 18:34:49 [32mDATA: [39m test-node-wasm.js result: performance: load: 526 total: 98
|
||||
2021-09-12 18:34:49 [36mINFO: [39m test-node-wasm.js test: first instance
|
||||
2021-09-12 18:34:49 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-09-12 18:34:50 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-09-12 18:34:50 [32mDATA: [39m test-node-wasm.js result: face: 0 body: 1 hand: 0 gesture: 1 object: 0 person: 0 {} {} {"score":0.69,"keypoints":10}
|
||||
2021-09-12 18:34:50 [32mDATA: [39m test-node-wasm.js result: performance: load: 526 total: 139
|
||||
2021-09-12 18:34:50 [36mINFO: [39m test-node-wasm.js test: second instance
|
||||
2021-09-12 18:34:50 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-09-12 18:34:50 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-09-12 18:34:50 [32mDATA: [39m test-node-wasm.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 0 person: 1 {"age":28.5,"gender":"female"} {} {"score":0.69,"keypoints":10}
|
||||
2021-09-12 18:34:50 [32mDATA: [39m test-node-wasm.js result: performance: load: 3 total: 432
|
||||
2021-09-12 18:34:50 [36mINFO: [39m test-node-wasm.js test: concurrent
|
||||
2021-09-12 18:34:51 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-face.jpg [1,256,256,3]
|
||||
2021-09-12 18:34:51 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-face.jpg [1,256,256,3]
|
||||
2021-09-12 18:34:52 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-09-12 18:34:54 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-09-12 18:34:54 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-09-12 18:34:55 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-09-12 18:34:55 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-face.jpg default
|
||||
2021-09-12 18:34:55 [32mDATA: [39m test-node-wasm.js result: face: 1 body: 1 hand: 0 gesture: 6 object: 0 person: 1 {"age":28.5,"gender":"female"} {} {"score":0.92,"keypoints":10}
|
||||
2021-09-12 18:34:55 [32mDATA: [39m test-node-wasm.js result: performance: load: 3 total: 849
|
||||
2021-09-12 18:34:56 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-body.jpg default
|
||||
2021-09-12 18:34:56 [32mDATA: [39m test-node-wasm.js result: face: 0 body: 1 hand: 0 gesture: 1 object: 0 person: 0 {} {} {"score":0.92,"keypoints":10}
|
||||
2021-09-12 18:34:56 [32mDATA: [39m test-node-wasm.js result: performance: load: 3 total: 849
|
||||
2021-09-12 18:34:56 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-09-12 18:34:56 [32mDATA: [39m test-node-wasm.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 0 person: 1 {"age":28.5,"gender":"female"} {} {"score":0.69,"keypoints":10}
|
||||
2021-09-12 18:34:56 [32mDATA: [39m test-node-wasm.js result: performance: load: 3 total: 914
|
||||
2021-09-12 18:34:57 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-body.jpg default
|
||||
2021-09-12 18:34:57 [32mDATA: [39m test-node-wasm.js result: face: 0 body: 1 hand: 0 gesture: 1 object: 0 person: 0 {} {} {"score":0.92,"keypoints":10}
|
||||
2021-09-12 18:34:57 [32mDATA: [39m test-node-wasm.js result: performance: load: 526 total: 2551
|
||||
2021-09-12 18:34:57 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-09-12 18:34:57 [32mDATA: [39m test-node-wasm.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 0 person: 1 {"age":29.5,"gender":"female"} {} {"score":0.69,"keypoints":10}
|
||||
2021-09-12 18:34:57 [32mDATA: [39m test-node-wasm.js result: performance: load: 526 total: 2608
|
||||
2021-09-12 18:34:57 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-face.jpg default
|
||||
2021-09-12 18:34:57 [32mDATA: [39m test-node-wasm.js result: face: 2 body: 1 hand: 0 gesture: 9 object: 0 person: 2 {"age":23.6,"gender":"female"} {} {"score":0.47,"keypoints":4}
|
||||
2021-09-12 18:34:57 [32mDATA: [39m test-node-wasm.js result: performance: load: 526 total: 2816
|
||||
2021-09-12 18:34:57 [36mINFO: [39m test-node-wasm.js test complete: 15083 ms
|
||||
2021-09-12 18:34:58 [36mINFO: [39m status: {"passed":80,"failed":2}
|
||||
2021-09-13 13:23:01 [36mINFO: [39m @vladmandic/human version 2.2.0
|
||||
2021-09-13 13:23:01 [36mINFO: [39m User: vlado Platform: linux Arch: x64 Node: v16.5.0
|
||||
2021-09-13 13:23:01 [36mINFO: [39m tests: ["test-node.js","test-node-gpu.js","test-node-wasm.js"]
|
||||
2021-09-13 13:23:01 [36mINFO: [39m
|
||||
2021-09-13 13:23:01 [36mINFO: [39m test-node.js start
|
||||
2021-09-13 13:23:01 [35mSTATE:[39m test-node.js passed: create human
|
||||
2021-09-13 13:23:01 [36mINFO: [39m test-node.js human version: 2.2.0
|
||||
2021-09-13 13:23:01 [36mINFO: [39m test-node.js platform: linux x64 agent: NodeJS v16.5.0
|
||||
2021-09-13 13:23:01 [36mINFO: [39m test-node.js tfjs version: 3.9.0
|
||||
2021-09-13 13:23:01 [35mSTATE:[39m test-node.js passed: set backend: tensorflow
|
||||
2021-09-13 13:23:01 [35mSTATE:[39m test-node.js tensors 573
|
||||
2021-09-13 13:23:01 [35mSTATE:[39m test-node.js passed: load models
|
||||
2021-09-13 13:23:01 [35mSTATE:[39m test-node.js result: defined models: 14 loaded models: 3
|
||||
2021-09-13 13:23:01 [35mSTATE:[39m test-node.js passed: warmup: none default
|
||||
2021-09-13 13:23:01 [35mSTATE:[39m test-node.js event: image
|
||||
2021-09-13 13:23:02 [35mSTATE:[39m test-node.js event: detect
|
||||
2021-09-13 13:23:02 [35mSTATE:[39m test-node.js event: warmup
|
||||
2021-09-13 13:23:02 [35mSTATE:[39m test-node.js passed: warmup: face default
|
||||
2021-09-13 13:23:02 [32mDATA: [39m test-node.js result: face: 1 body: 0 hand: 0 gesture: 3 object: 0 person: 1 {"score":1,"age":23.6,"gender":"female"} {} {}
|
||||
2021-09-13 13:23:02 [32mDATA: [39m test-node.js result: performance: load: 126 total: 447
|
||||
2021-09-13 13:23:02 [35mSTATE:[39m test-node.js event: image
|
||||
2021-09-13 13:23:02 [35mSTATE:[39m test-node.js event: detect
|
||||
2021-09-13 13:23:02 [35mSTATE:[39m test-node.js event: warmup
|
||||
2021-09-13 13:23:02 [35mSTATE:[39m test-node.js passed: warmup: body default
|
||||
2021-09-13 13:23:02 [32mDATA: [39m test-node.js result: face: 1 body: 0 hand: 0 gesture: 2 object: 0 person: 1 {"score":1,"age":29.5,"gender":"female"} {} {}
|
||||
2021-09-13 13:23:02 [32mDATA: [39m test-node.js result: performance: load: 126 total: 367
|
||||
2021-09-13 13:23:02 [36mINFO: [39m test-node.js test default
|
||||
2021-09-13 13:23:03 [35mSTATE:[39m test-node.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-09-13 13:23:03 [35mSTATE:[39m test-node.js event: image
|
||||
2021-09-13 13:23:03 [35mSTATE:[39m test-node.js event: detect
|
||||
2021-09-13 13:23:03 [35mSTATE:[39m test-node.js passed: detect: samples/ai-body.jpg default
|
||||
2021-09-13 13:23:03 [32mDATA: [39m test-node.js result: face: 1 body: 0 hand: 0 gesture: 2 object: 0 person: 1 {"score":1,"age":29.5,"gender":"female"} {} {}
|
||||
2021-09-13 13:23:03 [32mDATA: [39m test-node.js result: performance: load: 126 total: 287
|
||||
2021-09-13 13:23:03 [36mINFO: [39m test-node.js test body variants
|
||||
2021-09-13 13:23:04 [35mSTATE:[39m test-node.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-09-13 13:23:04 [35mSTATE:[39m test-node.js event: image
|
||||
2021-09-13 13:23:04 [35mSTATE:[39m test-node.js event: detect
|
||||
2021-09-13 13:23:04 [35mSTATE:[39m test-node.js passed: detect: samples/ai-body.jpg posenet
|
||||
2021-09-13 13:23:04 [32mDATA: [39m test-node.js result: face: 1 body: 0 hand: 0 gesture: 2 object: 0 person: 1 {"score":1,"age":29.5,"gender":"female"} {} {}
|
||||
2021-09-13 13:23:04 [32mDATA: [39m test-node.js result: performance: load: 126 total: 188
|
||||
2021-09-13 13:23:05 [35mSTATE:[39m test-node.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-09-13 13:23:05 [35mSTATE:[39m test-node.js event: image
|
||||
2021-09-13 13:23:05 [35mSTATE:[39m test-node.js event: detect
|
||||
2021-09-13 13:23:05 [35mSTATE:[39m test-node.js passed: detect: samples/ai-body.jpg movenet
|
||||
2021-09-13 13:23:05 [32mDATA: [39m test-node.js result: face: 1 body: 0 hand: 0 gesture: 2 object: 0 person: 1 {"score":1,"age":29.5,"gender":"female"} {} {}
|
||||
2021-09-13 13:23:05 [32mDATA: [39m test-node.js result: performance: load: 126 total: 149
|
||||
2021-09-13 13:23:05 [35mSTATE:[39m test-node.js event: image
|
||||
2021-09-13 13:23:05 [35mSTATE:[39m test-node.js event: detect
|
||||
2021-09-13 13:23:05 [35mSTATE:[39m test-node.js passed: detect: random default
|
||||
2021-09-13 13:23:05 [32mDATA: [39m test-node.js result: face: 0 body: 0 hand: 0 gesture: 0 object: 0 person: 0 {} {} {}
|
||||
2021-09-13 13:23:05 [32mDATA: [39m test-node.js result: performance: load: 126 total: 79
|
||||
2021-09-13 13:23:05 [36mINFO: [39m test-node.js test: first instance
|
||||
2021-09-13 13:23:06 [35mSTATE:[39m test-node.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-09-13 13:23:06 [35mSTATE:[39m test-node.js event: image
|
||||
2021-09-13 13:23:06 [35mSTATE:[39m test-node.js event: detect
|
||||
2021-09-13 13:23:06 [35mSTATE:[39m test-node.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-09-13 13:23:06 [32mDATA: [39m test-node.js result: face: 1 body: 0 hand: 0 gesture: 2 object: 0 person: 1 {"score":1,"age":29.5,"gender":"female"} {} {}
|
||||
2021-09-13 13:23:06 [32mDATA: [39m test-node.js result: performance: load: 126 total: 348
|
||||
2021-09-13 13:23:06 [36mINFO: [39m test-node.js test: second instance
|
||||
2021-09-13 13:23:06 [35mSTATE:[39m test-node.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-09-13 13:23:07 [35mSTATE:[39m test-node.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-09-13 13:23:07 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 0 person: 1 {"score":1,"age":29.5,"gender":"female"} {} {"score":0.69,"keypoints":10}
|
||||
2021-09-13 13:23:07 [32mDATA: [39m test-node.js result: performance: load: 50 total: 292
|
||||
2021-09-13 13:23:07 [36mINFO: [39m test-node.js test: concurrent
|
||||
2021-09-13 13:23:07 [35mSTATE:[39m test-node.js passed: load image: samples/ai-face.jpg [1,256,256,3]
|
||||
2021-09-13 13:23:07 [35mSTATE:[39m test-node.js passed: load image: samples/ai-face.jpg [1,256,256,3]
|
||||
2021-09-13 13:23:08 [35mSTATE:[39m test-node.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-09-13 13:23:08 [35mSTATE:[39m test-node.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-09-13 13:23:09 [35mSTATE:[39m test-node.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-09-13 13:23:09 [35mSTATE:[39m test-node.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-09-13 13:23:09 [35mSTATE:[39m test-node.js event: image
|
||||
2021-09-13 13:23:09 [35mSTATE:[39m test-node.js event: image
|
||||
2021-09-13 13:23:09 [35mSTATE:[39m test-node.js event: image
|
||||
2021-09-13 13:23:10 [35mSTATE:[39m test-node.js passed: detect: samples/ai-body.jpg default
|
||||
2021-09-13 13:23:10 [32mDATA: [39m test-node.js result: face: 0 body: 1 hand: 0 gesture: 1 object: 0 person: 0 {} {} {"score":0.69,"keypoints":10}
|
||||
2021-09-13 13:23:10 [32mDATA: [39m test-node.js result: performance: load: 50 total: 585
|
||||
2021-09-13 13:23:10 [35mSTATE:[39m test-node.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-09-13 13:23:10 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 0 person: 1 {"score":1,"age":29.5,"gender":"female"} {} {"score":0.69,"keypoints":10}
|
||||
2021-09-13 13:23:10 [32mDATA: [39m test-node.js result: performance: load: 50 total: 668
|
||||
2021-09-13 13:23:10 [35mSTATE:[39m test-node.js passed: detect: samples/ai-face.jpg default
|
||||
2021-09-13 13:23:10 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 5 object: 0 person: 1 {"score":0.42,"age":29.5,"gender":"female"} {} {"score":0.47,"keypoints":4}
|
||||
2021-09-13 13:23:10 [32mDATA: [39m test-node.js result: performance: load: 50 total: 779
|
||||
2021-09-13 13:23:10 [35mSTATE:[39m test-node.js event: detect
|
||||
2021-09-13 13:23:10 [35mSTATE:[39m test-node.js passed: detect: samples/ai-body.jpg default
|
||||
2021-09-13 13:23:10 [32mDATA: [39m test-node.js result: face: 0 body: 0 hand: 0 gesture: 0 object: 0 person: 0 {} {} {}
|
||||
2021-09-13 13:23:10 [32mDATA: [39m test-node.js result: performance: load: 126 total: 1111
|
||||
2021-09-13 13:23:10 [35mSTATE:[39m test-node.js event: detect
|
||||
2021-09-13 13:23:10 [35mSTATE:[39m test-node.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-09-13 13:23:10 [32mDATA: [39m test-node.js result: face: 1 body: 0 hand: 0 gesture: 2 object: 0 person: 1 {"score":1,"age":29.5,"gender":"female"} {} {}
|
||||
2021-09-13 13:23:10 [32mDATA: [39m test-node.js result: performance: load: 126 total: 1335
|
||||
2021-09-13 13:23:10 [35mSTATE:[39m test-node.js event: detect
|
||||
2021-09-13 13:23:10 [35mSTATE:[39m test-node.js passed: detect: samples/ai-face.jpg default
|
||||
2021-09-13 13:23:10 [32mDATA: [39m test-node.js result: face: 2 body: 0 hand: 0 gesture: 9 object: 0 person: 2 {"score":1,"age":23.6,"gender":"female"} {} {}
|
||||
2021-09-13 13:23:10 [32mDATA: [39m test-node.js result: performance: load: 126 total: 1416
|
||||
2021-09-13 13:23:10 [35mSTATE:[39m test-node.js passeed: no memory leak
|
||||
2021-09-13 13:23:10 [36mINFO: [39m test-node.js events: {"image":10,"detect":10,"warmup":2}
|
||||
2021-09-13 13:23:10 [36mINFO: [39m test-node.js test complete: 9089 ms
|
||||
2021-09-13 13:23:10 [36mINFO: [39m
|
||||
2021-09-13 13:23:10 [36mINFO: [39m test-node-gpu.js start
|
||||
2021-09-13 13:23:11 [33mWARN: [39m test-node-gpu.js stderr: 2021-09-13 13:23:11.218646: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory
|
||||
2021-09-13 13:23:11 [33mWARN: [39m test-node-gpu.js stderr: 2021-09-13 13:23:11.265489: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcuda.so.1'; dlerror: libcuda.so.1: cannot open shared object file: No such file or directory
|
||||
2021-09-13 13:23:11 [33mWARN: [39m test-node-gpu.js stderr: 2021-09-13 13:23:11.265541: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:156] kernel driver does not appear to be running on this host (wyse): /proc/driver/nvidia/version does not exist
|
||||
2021-09-13 13:23:11 [35mSTATE:[39m test-node-gpu.js passed: create human
|
||||
2021-09-13 13:23:11 [36mINFO: [39m test-node-gpu.js human version: 2.2.0
|
||||
2021-09-13 13:23:11 [36mINFO: [39m test-node-gpu.js platform: linux x64 agent: NodeJS v16.5.0
|
||||
2021-09-13 13:23:11 [36mINFO: [39m test-node-gpu.js tfjs version: 3.9.0
|
||||
2021-09-13 13:23:11 [35mSTATE:[39m test-node-gpu.js passed: set backend: tensorflow
|
||||
2021-09-13 13:23:11 [35mSTATE:[39m test-node-gpu.js tensors 1456
|
||||
2021-09-13 13:23:11 [35mSTATE:[39m test-node-gpu.js passed: load models
|
||||
2021-09-13 13:23:11 [35mSTATE:[39m test-node-gpu.js result: defined models: 14 loaded models: 7
|
||||
2021-09-13 13:23:11 [35mSTATE:[39m test-node-gpu.js passed: warmup: none default
|
||||
2021-09-13 13:23:11 [35mSTATE:[39m test-node-gpu.js event: image
|
||||
2021-09-13 13:23:12 [35mSTATE:[39m test-node-gpu.js event: detect
|
||||
2021-09-13 13:23:12 [35mSTATE:[39m test-node-gpu.js event: warmup
|
||||
2021-09-13 13:23:12 [35mSTATE:[39m test-node-gpu.js passed: warmup: face default
|
||||
2021-09-13 13:23:12 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"score":1,"age":23.6,"gender":"female"} {"score":0.82,"class":"person"} {"score":0.42,"keypoints":4}
|
||||
2021-09-13 13:23:12 [32mDATA: [39m test-node-gpu.js result: performance: load: 285 total: 1003
|
||||
2021-09-13 13:23:12 [35mSTATE:[39m test-node-gpu.js event: image
|
||||
2021-09-13 13:23:13 [35mSTATE:[39m test-node-gpu.js event: detect
|
||||
2021-09-13 13:23:13 [35mSTATE:[39m test-node-gpu.js event: warmup
|
||||
2021-09-13 13:23:13 [35mSTATE:[39m test-node-gpu.js passed: warmup: body default
|
||||
2021-09-13 13:23:13 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"score":1,"age":29.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.92,"keypoints":17}
|
||||
2021-09-13 13:23:13 [32mDATA: [39m test-node-gpu.js result: performance: load: 285 total: 1026
|
||||
2021-09-13 13:23:13 [36mINFO: [39m test-node-gpu.js test default
|
||||
2021-09-13 13:23:14 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-09-13 13:23:14 [35mSTATE:[39m test-node-gpu.js event: image
|
||||
2021-09-13 13:23:15 [35mSTATE:[39m test-node-gpu.js event: detect
|
||||
2021-09-13 13:23:15 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-body.jpg default
|
||||
2021-09-13 13:23:15 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"score":1,"age":29.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.92,"keypoints":17}
|
||||
2021-09-13 13:23:15 [32mDATA: [39m test-node-gpu.js result: performance: load: 285 total: 578
|
||||
2021-09-13 13:23:15 [36mINFO: [39m test-node-gpu.js test body variants
|
||||
2021-09-13 13:23:15 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-09-13 13:23:15 [35mSTATE:[39m test-node-gpu.js event: image
|
||||
2021-09-13 13:23:16 [35mSTATE:[39m test-node-gpu.js event: detect
|
||||
2021-09-13 13:23:16 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-body.jpg posenet
|
||||
2021-09-13 13:23:16 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"score":1,"age":29.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.96,"keypoints":16}
|
||||
2021-09-13 13:23:16 [32mDATA: [39m test-node-gpu.js result: performance: load: 285 total: 256
|
||||
2021-09-13 13:23:17 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-09-13 13:23:17 [35mSTATE:[39m test-node-gpu.js event: image
|
||||
2021-09-13 13:23:17 [35mSTATE:[39m test-node-gpu.js event: detect
|
||||
2021-09-13 13:23:17 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-body.jpg movenet
|
||||
2021-09-13 13:23:17 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"score":1,"age":29.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.92,"keypoints":17}
|
||||
2021-09-13 13:23:17 [32mDATA: [39m test-node-gpu.js result: performance: load: 285 total: 188
|
||||
2021-09-13 13:23:17 [35mSTATE:[39m test-node-gpu.js event: image
|
||||
2021-09-13 13:23:18 [35mSTATE:[39m test-node-gpu.js event: detect
|
||||
2021-09-13 13:23:18 [35mSTATE:[39m test-node-gpu.js passed: detect: random default
|
||||
2021-09-13 13:23:18 [32mDATA: [39m test-node-gpu.js result: face: 0 body: 1 hand: 0 gesture: 0 object: 0 person: 0 {} {} {"score":0,"keypoints":0}
|
||||
2021-09-13 13:23:18 [32mDATA: [39m test-node-gpu.js result: performance: load: 285 total: 601
|
||||
2021-09-13 13:23:18 [36mINFO: [39m test-node-gpu.js test: first instance
|
||||
2021-09-13 13:23:18 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-09-13 13:23:18 [35mSTATE:[39m test-node-gpu.js event: image
|
||||
2021-09-13 13:23:19 [35mSTATE:[39m test-node-gpu.js event: detect
|
||||
2021-09-13 13:23:19 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-09-13 13:23:19 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"score":1,"age":29.5,"gender":"female"} {"score":0.71,"class":"person"} {"score":0.69,"keypoints":10}
|
||||
2021-09-13 13:23:19 [32mDATA: [39m test-node-gpu.js result: performance: load: 285 total: 882
|
||||
2021-09-13 13:23:19 [36mINFO: [39m test-node-gpu.js test: second instance
|
||||
2021-09-13 13:23:19 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-09-13 13:23:19 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-09-13 13:23:19 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"score":1,"age":29.5,"gender":"female"} {"score":0.71,"class":"person"} {"score":0.69,"keypoints":10}
|
||||
2021-09-13 13:23:19 [32mDATA: [39m test-node-gpu.js result: performance: load: 5 total: 364
|
||||
2021-09-13 13:23:19 [36mINFO: [39m test-node-gpu.js test: concurrent
|
||||
2021-09-13 13:23:19 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-face.jpg [1,256,256,3]
|
||||
2021-09-13 13:23:19 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-face.jpg [1,256,256,3]
|
||||
2021-09-13 13:23:20 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-09-13 13:23:21 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-09-13 13:23:22 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-09-13 13:23:22 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-09-13 13:23:22 [35mSTATE:[39m test-node-gpu.js event: image
|
||||
2021-09-13 13:23:22 [35mSTATE:[39m test-node-gpu.js event: image
|
||||
2021-09-13 13:23:22 [35mSTATE:[39m test-node-gpu.js event: image
|
||||
2021-09-13 13:23:24 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-body.jpg default
|
||||
2021-09-13 13:23:24 [32mDATA: [39m test-node-gpu.js result: face: 0 body: 1 hand: 0 gesture: 1 object: 1 person: 0 {} {"score":0.71,"class":"person"} {"score":0.69,"keypoints":10}
|
||||
2021-09-13 13:23:24 [32mDATA: [39m test-node-gpu.js result: performance: load: 5 total: 1886
|
||||
2021-09-13 13:23:24 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-face.jpg default
|
||||
2021-09-13 13:23:24 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 6 object: 1 person: 1 {"score":0.42,"age":29.5,"gender":"female"} {"score":0.71,"class":"person"} {"score":0.69,"keypoints":10}
|
||||
2021-09-13 13:23:24 [32mDATA: [39m test-node-gpu.js result: performance: load: 5 total: 1893
|
||||
2021-09-13 13:23:24 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-09-13 13:23:24 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"score":1,"age":29.5,"gender":"female"} {"score":0.71,"class":"person"} {"score":0.69,"keypoints":10}
|
||||
2021-09-13 13:23:24 [32mDATA: [39m test-node-gpu.js result: performance: load: 5 total: 1893
|
||||
2021-09-13 13:23:24 [35mSTATE:[39m test-node-gpu.js event: detect
|
||||
2021-09-13 13:23:24 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-body.jpg default
|
||||
2021-09-13 13:23:24 [32mDATA: [39m test-node-gpu.js result: face: 0 body: 1 hand: 0 gesture: 1 object: 1 person: 0 {} {"score":0.72,"class":"person"} {"score":0.92,"keypoints":10}
|
||||
2021-09-13 13:23:24 [32mDATA: [39m test-node-gpu.js result: performance: load: 285 total: 2630
|
||||
2021-09-13 13:23:25 [35mSTATE:[39m test-node-gpu.js event: detect
|
||||
2021-09-13 13:23:25 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-09-13 13:23:25 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"score":1,"age":29.5,"gender":"female"} {"score":0.71,"class":"person"} {"score":0.69,"keypoints":10}
|
||||
2021-09-13 13:23:25 [32mDATA: [39m test-node-gpu.js result: performance: load: 285 total: 2849
|
||||
2021-09-13 13:23:25 [35mSTATE:[39m test-node-gpu.js event: detect
|
||||
2021-09-13 13:23:25 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-face.jpg default
|
||||
2021-09-13 13:23:25 [32mDATA: [39m test-node-gpu.js result: face: 2 body: 1 hand: 0 gesture: 10 object: 1 person: 2 {"score":1,"age":23.6,"gender":"female"} {"score":0.82,"class":"person"} {"score":0.47,"keypoints":10}
|
||||
2021-09-13 13:23:25 [32mDATA: [39m test-node-gpu.js result: performance: load: 285 total: 2930
|
||||
2021-09-13 13:23:25 [35mSTATE:[39m test-node-gpu.js passeed: no memory leak
|
||||
2021-09-13 13:23:25 [36mINFO: [39m test-node-gpu.js events: {"image":10,"detect":10,"warmup":2}
|
||||
2021-09-13 13:23:25 [36mINFO: [39m test-node-gpu.js test complete: 13900 ms
|
||||
2021-09-13 13:23:25 [36mINFO: [39m
|
||||
2021-09-13 13:23:25 [36mINFO: [39m test-node-wasm.js start
|
||||
2021-09-13 13:23:25 [35mSTATE:[39m test-node-wasm.js passed: model server: https://vladmandic.github.io/human/models/
|
||||
2021-09-13 13:23:25 [35mSTATE:[39m test-node-wasm.js passed: create human
|
||||
2021-09-13 13:23:25 [36mINFO: [39m test-node-wasm.js human version: 2.2.0
|
||||
2021-09-13 13:23:25 [36mINFO: [39m test-node-wasm.js platform: linux x64 agent: NodeJS v16.5.0
|
||||
2021-09-13 13:23:25 [36mINFO: [39m test-node-wasm.js tfjs version: 3.9.0
|
||||
2021-09-13 13:23:27 [35mSTATE:[39m test-node-wasm.js passed: set backend: wasm
|
||||
2021-09-13 13:23:27 [35mSTATE:[39m test-node-wasm.js tensors 1189
|
||||
2021-09-13 13:23:27 [35mSTATE:[39m test-node-wasm.js passed: load models
|
||||
2021-09-13 13:23:27 [35mSTATE:[39m test-node-wasm.js result: defined models: 14 loaded models: 6
|
||||
2021-09-13 13:23:27 [35mSTATE:[39m test-node-wasm.js passed: warmup: none default
|
||||
2021-09-13 13:23:27 [35mSTATE:[39m test-node-wasm.js event: image
|
||||
2021-09-13 13:23:28 [35mSTATE:[39m test-node-wasm.js event: detect
|
||||
2021-09-13 13:23:28 [35mSTATE:[39m test-node-wasm.js event: warmup
|
||||
2021-09-13 13:23:28 [35mSTATE:[39m test-node-wasm.js passed: warmup: face default
|
||||
2021-09-13 13:23:28 [32mDATA: [39m test-node-wasm.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 0 person: 1 {"score":1,"age":23.6,"gender":"female"} {} {"score":0.47,"keypoints":4}
|
||||
2021-09-13 13:23:28 [32mDATA: [39m test-node-wasm.js result: performance: load: 1483 total: 1112
|
||||
2021-09-13 13:23:31 [35mSTATE:[39m test-node-wasm.js event: image
|
||||
2021-09-13 13:23:32 [35mSTATE:[39m test-node-wasm.js event: detect
|
||||
2021-09-13 13:23:32 [35mSTATE:[39m test-node-wasm.js event: warmup
|
||||
2021-09-13 13:23:32 [35mSTATE:[39m test-node-wasm.js passed: warmup: body default
|
||||
2021-09-13 13:23:32 [32mDATA: [39m test-node-wasm.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 0 person: 1 {"score":1,"age":28.5,"gender":"female"} {} {"score":0.92,"keypoints":17}
|
||||
2021-09-13 13:23:32 [32mDATA: [39m test-node-wasm.js result: performance: load: 1483 total: 2773
|
||||
2021-09-13 13:23:32 [36mINFO: [39m test-node-wasm.js test default
|
||||
2021-09-13 13:23:34 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-09-13 13:23:36 [35mSTATE:[39m test-node-wasm.js event: image
|
||||
2021-09-13 13:23:36 [35mSTATE:[39m test-node-wasm.js event: detect
|
||||
2021-09-13 13:23:36 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-body.jpg default
|
||||
2021-09-13 13:23:36 [32mDATA: [39m test-node-wasm.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 0 person: 1 {"score":1,"age":28.5,"gender":"female"} {} {"score":0.92,"keypoints":17}
|
||||
2021-09-13 13:23:36 [32mDATA: [39m test-node-wasm.js result: performance: load: 1483 total: 2276
|
||||
2021-09-13 13:23:36 [36mINFO: [39m test-node-wasm.js test body variants
|
||||
2021-09-13 13:23:38 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-09-13 13:23:40 [35mSTATE:[39m test-node-wasm.js event: image
|
||||
2021-09-13 13:23:40 [35mSTATE:[39m test-node-wasm.js event: detect
|
||||
2021-09-13 13:23:40 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-body.jpg posenet
|
||||
2021-09-13 13:23:40 [32mDATA: [39m test-node-wasm.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 0 person: 1 {"score":1,"age":28.5,"gender":"female"} {} {"score":0.96,"keypoints":16}
|
||||
2021-09-13 13:23:40 [32mDATA: [39m test-node-wasm.js result: performance: load: 1483 total: 1821
|
||||
2021-09-13 13:23:41 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-09-13 13:23:43 [35mSTATE:[39m test-node-wasm.js event: image
|
||||
2021-09-13 13:23:43 [35mSTATE:[39m test-node-wasm.js event: detect
|
||||
2021-09-13 13:23:43 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-body.jpg movenet
|
||||
2021-09-13 13:23:43 [32mDATA: [39m test-node-wasm.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 0 person: 1 {"score":1,"age":28.5,"gender":"female"} {} {"score":0.92,"keypoints":17}
|
||||
2021-09-13 13:23:43 [32mDATA: [39m test-node-wasm.js result: performance: load: 1483 total: 1806
|
||||
2021-09-13 13:23:45 [35mSTATE:[39m test-node-wasm.js event: image
|
||||
2021-09-13 13:23:45 [35mSTATE:[39m test-node-wasm.js event: detect
|
||||
2021-09-13 13:23:45 [35mSTATE:[39m test-node-wasm.js passed: detect: random default
|
||||
2021-09-13 13:23:45 [32mDATA: [39m test-node-wasm.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 0 person: 1 {"score":1,"age":28.5,"gender":"female"} {} {"score":0.92,"keypoints":17}
|
||||
2021-09-13 13:23:45 [32mDATA: [39m test-node-wasm.js result: performance: load: 1483 total: 1659
|
||||
2021-09-13 13:23:45 [36mINFO: [39m test-node-wasm.js test: first instance
|
||||
2021-09-13 13:23:46 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-09-13 13:23:47 [35mSTATE:[39m test-node-wasm.js event: image
|
||||
2021-09-13 13:23:47 [35mSTATE:[39m test-node-wasm.js event: detect
|
||||
2021-09-13 13:23:47 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-09-13 13:23:47 [32mDATA: [39m test-node-wasm.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 0 person: 1 {"score":1,"age":28.5,"gender":"female"} {} {"score":0.92,"keypoints":17}
|
||||
2021-09-13 13:23:47 [32mDATA: [39m test-node-wasm.js result: performance: load: 1483 total: 1751
|
||||
2021-09-13 13:23:47 [36mINFO: [39m test-node-wasm.js test: second instance
|
||||
2021-09-13 13:23:48 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-09-13 13:23:50 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-09-13 13:23:50 [32mDATA: [39m test-node-wasm.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 0 person: 1 {"score":1,"age":28.5,"gender":"female"} {} {"score":0.92,"keypoints":17}
|
||||
2021-09-13 13:23:50 [32mDATA: [39m test-node-wasm.js result: performance: load: 5 total: 2299
|
||||
2021-09-13 13:23:50 [36mINFO: [39m test-node-wasm.js test: concurrent
|
||||
2021-09-13 13:23:50 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-face.jpg [1,256,256,3]
|
||||
2021-09-13 13:23:50 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-face.jpg [1,256,256,3]
|
||||
2021-09-13 13:23:52 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-09-13 13:23:53 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-09-13 13:23:54 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-09-13 13:23:55 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-09-13 13:23:56 [35mSTATE:[39m test-node-wasm.js event: image
|
||||
2021-09-13 13:24:00 [35mSTATE:[39m test-node-wasm.js event: image
|
||||
2021-09-13 13:24:03 [35mSTATE:[39m test-node-wasm.js event: image
|
||||
2021-09-13 13:24:05 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-face.jpg default
|
||||
2021-09-13 13:24:05 [32mDATA: [39m test-node-wasm.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 0 person: 1 {"score":1,"age":28.5,"gender":"female"} {} {"score":0.92,"keypoints":17}
|
||||
2021-09-13 13:24:05 [32mDATA: [39m test-node-wasm.js result: performance: load: 5 total: 10293
|
||||
2021-09-13 13:24:05 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-body.jpg default
|
||||
2021-09-13 13:24:05 [32mDATA: [39m test-node-wasm.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 0 person: 1 {"score":1,"age":28.5,"gender":"female"} {} {"score":0.92,"keypoints":17}
|
||||
2021-09-13 13:24:05 [32mDATA: [39m test-node-wasm.js result: performance: load: 5 total: 10293
|
||||
2021-09-13 13:24:05 [35mSTATE:[39m test-node-wasm.js event: detect
|
||||
2021-09-13 13:24:05 [35mSTATE:[39m test-node-wasm.js event: detect
|
||||
2021-09-13 13:24:05 [35mSTATE:[39m test-node-wasm.js event: detect
|
||||
2021-09-13 13:24:05 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-face.jpg default
|
||||
2021-09-13 13:24:05 [32mDATA: [39m test-node-wasm.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 0 person: 1 {"score":1,"age":28.5,"gender":"female"} {} {"score":0.92,"keypoints":17}
|
||||
2021-09-13 13:24:05 [32mDATA: [39m test-node-wasm.js result: performance: load: 1483 total: 10296
|
||||
2021-09-13 13:24:05 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-body.jpg default
|
||||
2021-09-13 13:24:05 [32mDATA: [39m test-node-wasm.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 0 person: 1 {"score":1,"age":28.5,"gender":"female"} {} {"score":0.92,"keypoints":17}
|
||||
2021-09-13 13:24:05 [32mDATA: [39m test-node-wasm.js result: performance: load: 1483 total: 10296
|
||||
2021-09-13 13:24:05 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-09-13 13:24:05 [32mDATA: [39m test-node-wasm.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 0 person: 1 {"score":1,"age":28.5,"gender":"female"} {} {"score":0.92,"keypoints":17}
|
||||
2021-09-13 13:24:05 [32mDATA: [39m test-node-wasm.js result: performance: load: 1483 total: 10296
|
||||
2021-09-13 13:24:05 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-09-13 13:24:05 [32mDATA: [39m test-node-wasm.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 0 person: 1 {"score":1,"age":28.5,"gender":"female"} {} {"score":0.92,"keypoints":17}
|
||||
2021-09-13 13:24:05 [32mDATA: [39m test-node-wasm.js result: performance: load: 5 total: 10297
|
||||
2021-09-13 13:24:05 [35mSTATE:[39m test-node-wasm.js passeed: no memory leak
|
||||
2021-09-13 13:24:05 [36mINFO: [39m test-node-wasm.js events: {"image":10,"detect":10,"warmup":2}
|
||||
2021-09-13 13:24:05 [36mINFO: [39m test-node-wasm.js test complete: 39567 ms
|
||||
2021-09-13 13:24:05 [36mINFO: [39m
|
||||
2021-09-13 13:24:05 [36mINFO: [39m status: {"passed":88,"failed":0}
|
||||
|
|
|
@ -851,7 +851,7 @@
|
|||
<a name="segmentation-1" class="tsd-anchor"></a>
|
||||
<h3>segmentation</h3>
|
||||
<ul class="tsd-signatures tsd-kind-method tsd-parent-kind-class">
|
||||
<li class="tsd-signature tsd-kind-icon">segmentation<span class="tsd-signature-symbol">(</span>input<span class="tsd-signature-symbol">: </span><a href="../index.html#Input" class="tsd-signature-type" data-tsd-kind="Type alias">Input</a>, background<span class="tsd-signature-symbol">?: </span><a href="../index.html#Input" class="tsd-signature-type" data-tsd-kind="Type alias">Input</a><span class="tsd-signature-symbol">)</span><span class="tsd-signature-symbol">: </span><span class="tsd-signature-type">Promise</span><span class="tsd-signature-symbol"><</span><span class="tsd-signature-type">null</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">HTMLCanvasElement</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">OffscreenCanvas</span><span class="tsd-signature-symbol">></span></li>
|
||||
<li class="tsd-signature tsd-kind-icon">segmentation<span class="tsd-signature-symbol">(</span>input<span class="tsd-signature-symbol">: </span><a href="../index.html#Input" class="tsd-signature-type" data-tsd-kind="Type alias">Input</a>, background<span class="tsd-signature-symbol">?: </span><a href="../index.html#Input" class="tsd-signature-type" data-tsd-kind="Type alias">Input</a><span class="tsd-signature-symbol">)</span><span class="tsd-signature-symbol">: </span><span class="tsd-signature-type">null</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">Promise</span><span class="tsd-signature-symbol"><</span><span class="tsd-signature-type">null</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">HTMLCanvasElement</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">OffscreenCanvas</span><span class="tsd-signature-symbol">></span></li>
|
||||
</ul>
|
||||
<ul class="tsd-descriptions">
|
||||
<li class="tsd-description">
|
||||
|
@ -876,7 +876,7 @@
|
|||
<h5><span class="tsd-flag ts-flagOptional">Optional</span> background: <a href="../index.html#Input" class="tsd-signature-type" data-tsd-kind="Type alias">Input</a></h5>
|
||||
</li>
|
||||
</ul>
|
||||
<h4 class="tsd-returns-title">Returns <span class="tsd-signature-type">Promise</span><span class="tsd-signature-symbol"><</span><span class="tsd-signature-type">null</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">HTMLCanvasElement</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">OffscreenCanvas</span><span class="tsd-signature-symbol">></span></h4>
|
||||
<h4 class="tsd-returns-title">Returns <span class="tsd-signature-type">null</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">Promise</span><span class="tsd-signature-symbol"><</span><span class="tsd-signature-type">null</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">HTMLCanvasElement</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">OffscreenCanvas</span><span class="tsd-signature-symbol">></span></h4>
|
||||
<p>Canvas</p>
|
||||
</li>
|
||||
</ul>
|
||||
|
|
|
@ -189,7 +189,7 @@
|
|||
<section class="tsd-panel tsd-member tsd-kind-type-alias">
|
||||
<a name="Input" class="tsd-anchor"></a>
|
||||
<h3>Input</h3>
|
||||
<div class="tsd-signature tsd-kind-icon">Input<span class="tsd-signature-symbol">:</span> <span class="tsd-signature-type">Tensor</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-symbol">typeof </span><span class="tsd-signature-type">Image</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">ImageData</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">ImageBitmap</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">HTMLImageElement</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">HTMLMediaElement</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">HTMLVideoElement</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">HTMLCanvasElement</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">OffscreenCanvas</span></div>
|
||||
<div class="tsd-signature tsd-kind-icon">Input<span class="tsd-signature-symbol">:</span> <span class="tsd-signature-type">Tensor</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">ImageData</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">ImageBitmap</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">HTMLImageElement</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">HTMLMediaElement</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">HTMLVideoElement</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">HTMLCanvasElement</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">OffscreenCanvas</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-symbol">typeof </span><span class="tsd-signature-type">Image</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-symbol">typeof </span><span class="tsd-signature-type">env.env.Canvas</span></div>
|
||||
<aside class="tsd-sources">
|
||||
<ul>
|
||||
<li>Defined in <a href="https://github.com/vladmandic/human/blob/main/src/human.ts#L41">human.ts:41</a></li>
|
||||
|
@ -244,7 +244,7 @@
|
|||
<div class="tsd-signature tsd-kind-icon">env<span class="tsd-signature-symbol">:</span> <span class="tsd-signature-type">Env</span><span class="tsd-signature-symbol"> = ...</span></div>
|
||||
<aside class="tsd-sources">
|
||||
<ul>
|
||||
<li>Defined in <a href="https://github.com/vladmandic/human/blob/main/src/env.ts#L31">env.ts:31</a></li>
|
||||
<li>Defined in <a href="https://github.com/vladmandic/human/blob/main/src/env.ts#L33">env.ts:33</a></li>
|
||||
</ul>
|
||||
</aside>
|
||||
</section>
|
||||
|
|
|
@ -126,10 +126,10 @@
|
|||
<section class="tsd-panel tsd-member tsd-kind-property tsd-parent-kind-interface">
|
||||
<a name="backend" class="tsd-anchor"></a>
|
||||
<h3>backend</h3>
|
||||
<div class="tsd-signature tsd-kind-icon">backend<span class="tsd-signature-symbol">:</span> <span class="tsd-signature-type">string</span></div>
|
||||
<div class="tsd-signature tsd-kind-icon">backend<span class="tsd-signature-symbol">:</span> <span class="tsd-signature-type">""</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"cpu"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"wasm"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"webgl"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"humangl"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"tensorflow"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"webgpu"</span></div>
|
||||
<aside class="tsd-sources">
|
||||
<ul>
|
||||
<li>Defined in <a href="https://github.com/vladmandic/human/blob/main/src/config.ts#L190">config.ts:190</a></li>
|
||||
<li>Defined in <a href="https://github.com/vladmandic/human/blob/main/src/config.ts#L189">config.ts:189</a></li>
|
||||
</ul>
|
||||
</aside>
|
||||
<div class="tsd-comment tsd-typography">
|
||||
|
@ -290,10 +290,10 @@
|
|||
<section class="tsd-panel tsd-member tsd-kind-property tsd-parent-kind-interface">
|
||||
<a name="warmup" class="tsd-anchor"></a>
|
||||
<h3>warmup</h3>
|
||||
<div class="tsd-signature tsd-kind-icon">warmup<span class="tsd-signature-symbol">:</span> <span class="tsd-signature-type">string</span></div>
|
||||
<div class="tsd-signature tsd-kind-icon">warmup<span class="tsd-signature-symbol">:</span> <span class="tsd-signature-type">"none"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"face"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"full"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"body"</span></div>
|
||||
<aside class="tsd-sources">
|
||||
<ul>
|
||||
<li>Defined in <a href="https://github.com/vladmandic/human/blob/main/src/config.ts#L206">config.ts:206</a></li>
|
||||
<li>Defined in <a href="https://github.com/vladmandic/human/blob/main/src/config.ts#L205">config.ts:205</a></li>
|
||||
</ul>
|
||||
</aside>
|
||||
<div class="tsd-comment tsd-typography">
|
||||
|
|
|
@ -162,7 +162,7 @@
|
|||
<section class="tsd-panel tsd-member tsd-kind-property tsd-parent-kind-interface">
|
||||
<a name="landmarks" class="tsd-anchor"></a>
|
||||
<h3>landmarks</h3>
|
||||
<div class="tsd-signature tsd-kind-icon">landmarks<span class="tsd-signature-symbol">:</span> <span class="tsd-signature-type">Record</span><span class="tsd-signature-symbol"><</span><span class="tsd-signature-type">"index"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"middle"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"pinky"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"ring"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"thumb"</span><span class="tsd-signature-symbol">, </span><span class="tsd-signature-symbol">{ </span>curl<span class="tsd-signature-symbol">: </span><span class="tsd-signature-type">"none"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"half"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"full"</span><span class="tsd-signature-symbol">; </span>direction<span class="tsd-signature-symbol">: </span><span class="tsd-signature-type">"verticalUp"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"verticalDown"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"horizontalLeft"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"horizontalRight"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"diagonalUpRight"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"diagonalUpLeft"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"diagonalDownRight"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"diagonalDownLeft"</span><span class="tsd-signature-symbol"> }</span><span class="tsd-signature-symbol">></span></div>
|
||||
<div class="tsd-signature tsd-kind-icon">landmarks<span class="tsd-signature-symbol">:</span> <span class="tsd-signature-type">Record</span><span class="tsd-signature-symbol"><</span><span class="tsd-signature-type">"index"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"middle"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"pinky"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"ring"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"thumb"</span><span class="tsd-signature-symbol">, </span><span class="tsd-signature-symbol">{ </span>curl<span class="tsd-signature-symbol">: </span><span class="tsd-signature-type">"none"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"full"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"half"</span><span class="tsd-signature-symbol">; </span>direction<span class="tsd-signature-symbol">: </span><span class="tsd-signature-type">"verticalUp"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"verticalDown"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"horizontalLeft"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"horizontalRight"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"diagonalUpRight"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"diagonalUpLeft"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"diagonalDownRight"</span><span class="tsd-signature-symbol"> | </span><span class="tsd-signature-type">"diagonalDownLeft"</span><span class="tsd-signature-symbol"> }</span><span class="tsd-signature-symbol">></span></div>
|
||||
<aside class="tsd-sources">
|
||||
<ul>
|
||||
<li>Defined in <a href="https://github.com/vladmandic/human/blob/main/src/result.ts#L107">result.ts:107</a></li>
|
||||
|
|
|
@ -2635,3 +2635,4 @@ declare var OptimizerConstructors: {
|
|||
};
|
||||
};
|
||||
export { add2 as add, backend_util_exports as backend_util, browser_exports as browser, exports_constraints_exports as constraints, dist_exports as data, device_util_exports as device_util, fused_ops_exports as fused, gather_nd_util_exports as gather_util, gpgpu_util_exports as gpgpu_util, exports_initializers_exports as initializers, io_exports as io, isFinite2 as isFinite, isNaN2 as isNaN, kernel_impls_exports as kernel_impls, exports_layers_exports as layers, log5 as log, math_exports as math, exports_metrics_exports as metrics, exports_models_exports as models, ones2 as ones, print2 as print, exports_regularizers_exports as regularizers, round2 as round, scatter_nd_util_exports as scatter_util, serialization_exports as serialization, shared_exports as shared, slice_util_exports as slice_util, sum2 as sum, tanh2 as tanh, tensor_util_exports as tensor_util, test_util_exports as test_util, util_exports as util, version16 as version, version11 as version_converter, version9 as version_core, version13 as version_cpu, version10 as version_layers, version15 as version_wasm, version14 as version_webgl, webgl_util_exports as webgl_util };
|
||||
//# sourceMappingURL=tfjs.esm.d.ts.map
|
File diff suppressed because one or more lines are too long
|
@ -1,5 +1,5 @@
|
|||
import { Config } from '../config';
|
||||
import { Tensor, GraphModel } from '../tfjs/types';
|
||||
import type { Config } from '../config';
|
||||
import type { Tensor, GraphModel } from '../tfjs/types';
|
||||
export declare class BlazeFaceModel {
|
||||
model: GraphModel;
|
||||
anchorsData: [number, number][];
|
||||
|
@ -8,16 +8,20 @@ export declare class BlazeFaceModel {
|
|||
config: Config;
|
||||
constructor(model: any, config: Config);
|
||||
getBoundingBoxes(inputImage: Tensor, userConfig: Config): Promise<{
|
||||
boxes: never[];
|
||||
scaleFactor?: never;
|
||||
} | {
|
||||
boxes: {
|
||||
box: {
|
||||
startPoint: Tensor;
|
||||
endPoint: Tensor;
|
||||
};
|
||||
landmarks: Tensor;
|
||||
anchor: number[];
|
||||
anchor: [number, number] | undefined;
|
||||
confidence: number;
|
||||
}[];
|
||||
scaleFactor: number[];
|
||||
} | null>;
|
||||
}>;
|
||||
}
|
||||
export declare function load(config: Config): Promise<BlazeFaceModel>;
|
||||
//# sourceMappingURL=blazeface.d.ts.map
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"blazeface.d.ts","sourceRoot":"","sources":["../../../src/blazeface/blazeface.ts"],"names":[],"mappings":"AAIA,OAAO,KAAK,EAAE,MAAM,EAAE,MAAM,WAAW,CAAC;AACxC,OAAO,KAAK,EAAE,MAAM,EAAE,UAAU,EAAE,MAAM,eAAe,CAAC;AAmBxD,qBAAa,cAAc;IACzB,KAAK,EAAE,UAAU,CAAC;IAClB,WAAW,EAAE,CAAC,MAAM,EAAE,MAAM,CAAC,EAAE,CAAC;IAChC,OAAO,EAAE,MAAM,CAAC;IAChB,SAAS,EAAE,MAAM,CAAC;IAClB,MAAM,EAAE,MAAM,CAAC;gBAEH,KAAK,KAAA,EAAE,MAAM,EAAE,MAAM;IAQ3B,gBAAgB,CAAC,UAAU,EAAE,MAAM,EAAE,UAAU,EAAE,MAAM;;;;;iBA4BxB;gBAAE,UAAU,EAAE,MAAM,CAAC;gBAAC,QAAQ,EAAE,MAAM,CAAA;aAAE;uBAAa,MAAM;oBAAU,CAAC,MAAM,EAAE,MAAM,CAAC,GAAG,SAAS;wBAAc,MAAM;;;;CAoB3J;AAED,wBAAsB,IAAI,CAAC,MAAM,EAAE,MAAM,2BAMxC"}
|
|
@ -2,12 +2,12 @@ export declare function scaleBoxCoordinates(box: any, factor: any): {
|
|||
startPoint: number[];
|
||||
endPoint: number[];
|
||||
};
|
||||
export declare function getBoxSize(box: any): number[];
|
||||
export declare function getBoxCenter(box: any): any[];
|
||||
export declare function getBoxSize(box: any): [number, number];
|
||||
export declare function getBoxCenter(box: any): [number, number];
|
||||
export declare function cutBoxFromImageAndResize(box: any, image: any, cropSize: any): any;
|
||||
export declare function enlargeBox(box: any, factor?: number): {
|
||||
startPoint: number[];
|
||||
endPoint: any[];
|
||||
endPoint: number[];
|
||||
landmarks: any;
|
||||
};
|
||||
export declare function squarifyBox(box: any): {
|
||||
|
@ -25,3 +25,4 @@ export declare const createBox: (startEndTensor: any) => {
|
|||
startPoint: any;
|
||||
endPoint: any;
|
||||
};
|
||||
//# sourceMappingURL=box.d.ts.map
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"box.d.ts","sourceRoot":"","sources":["../../../src/blazeface/box.ts"],"names":[],"mappings":"AAEA,wBAAgB,mBAAmB,CAAC,GAAG,KAAA,EAAE,MAAM,KAAA;;;EAI9C;AAED,wBAAgB,UAAU,CAAC,GAAG,KAAA,GAAG,CAAC,MAAM,EAAE,MAAM,CAAC,CAKhD;AAED,wBAAgB,YAAY,CAAC,GAAG,KAAA,GAAG,CAAC,MAAM,EAAE,MAAM,CAAC,CAKlD;AAED,wBAAgB,wBAAwB,CAAC,GAAG,KAAA,EAAE,KAAK,KAAA,EAAE,QAAQ,KAAA,OAU5D;AAED,wBAAgB,UAAU,CAAC,GAAG,KAAA,EAAE,MAAM,SAAM;;;;EAO3C;AAED,wBAAgB,WAAW,CAAC,GAAG,KAAA;;;;EAQ9B;AAED,wBAAgB,6BAA6B,CAAC,SAAS,KAAA;;;;EAMtD;AAED,eAAO,MAAM,UAAU,kBAGtB,CAAC;AAEF,eAAO,MAAM,SAAS;;;CAGpB,CAAC"}
|
|
@ -47,3 +47,4 @@ export declare const VTX7: number[];
|
|||
export declare const UV68: number[][];
|
||||
export declare const UV33: number[][];
|
||||
export declare const UV7: number[][];
|
||||
//# sourceMappingURL=coords.d.ts.map
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"coords.d.ts","sourceRoot":"","sources":["../../../src/blazeface/coords.ts"],"names":[],"mappings":"AAAA,eAAO,MAAM,gBAAgB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAqC5B,CAAC;AAEF,eAAO,MAAM,wBAAwB;;;GAUpC,CAAC;AAEF,eAAO,MAAM,KAAK,YAqdjB,CAAC;AAEF,eAAO,MAAM,MAAM,UAoF+H,CAAC;AAEnJ,eAAO,MAAM,KAAK,UAOuI,CAAC;AAE1J,eAAO,MAAM,KAAK,UASjB,CAAC;AAEF,eAAO,MAAM,IAAI,UAA8B,CAAC;AAEhD,eAAO,MAAM,KAAK,UAOjB,CAAC;AAEF,eAAO,MAAM,KAAK,UAAyJ,CAAC;AAE5K,eAAO,MAAM,IAAI,UAAkC,CAAC;AAEpD,eAAO,MAAM,IAAI,YAA6B,CAAC;AAE/C,eAAO,MAAM,IAAI,YAA6B,CAAC;AAE/C,eAAO,MAAM,GAAG,YAA4B,CAAC"}
|
|
@ -1,10 +1,11 @@
|
|||
/**
|
||||
* FaceMesh & BlazeFace Module entry point
|
||||
*/
|
||||
import { GraphModel, Tensor } from '../tfjs/types';
|
||||
import { FaceResult } from '../result';
|
||||
import { Config } from '../config';
|
||||
import type { GraphModel, Tensor } from '../tfjs/types';
|
||||
import type { FaceResult } from '../result';
|
||||
import type { Config } from '../config';
|
||||
export declare function predict(input: Tensor, config: Config): Promise<FaceResult[]>;
|
||||
export declare function load(config: any): Promise<[unknown, GraphModel | null, GraphModel | null]>;
|
||||
export declare const triangulation: number[];
|
||||
export declare const uvmap: number[][];
|
||||
//# sourceMappingURL=facemesh.d.ts.map
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"facemesh.d.ts","sourceRoot":"","sources":["../../../src/blazeface/facemesh.ts"],"names":[],"mappings":"AAAA;;GAEG;AAOH,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,EAAE,MAAM,eAAe,CAAC;AACxD,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,WAAW,CAAC;AAC5C,OAAO,KAAK,EAAE,MAAM,EAAE,MAAM,WAAW,CAAC;AAKxC,wBAAsB,OAAO,CAAC,KAAK,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,GAAG,OAAO,CAAC,UAAU,EAAE,CAAC,CAyClF;AAED,wBAAsB,IAAI,CAAC,MAAM,KAAA,GAAG,OAAO,CAAC,CAAC,OAAO,EAAE,UAAU,GAAG,IAAI,EAAE,UAAU,GAAG,IAAI,CAAC,CAAC,CAsB3F;AAED,eAAO,MAAM,aAAa,UAAgB,CAAC;AAC3C,eAAO,MAAM,KAAK,YAAe,CAAC"}
|
|
@ -1,12 +1,12 @@
|
|||
import { GraphModel } from '../tfjs/types';
|
||||
import { BlazeFaceModel } from './blazeface';
|
||||
import type { GraphModel } from '../tfjs/types';
|
||||
import type { BlazeFaceModel } from './blazeface';
|
||||
export declare class Pipeline {
|
||||
storedBoxes: Array<{
|
||||
startPoint: number[];
|
||||
endPoint: number[];
|
||||
landmarks: Array<number>;
|
||||
confidence: number;
|
||||
faceConfidence?: number;
|
||||
faceConfidence?: number | undefined;
|
||||
}>;
|
||||
boundingBoxDetector: BlazeFaceModel;
|
||||
meshDetector: GraphModel;
|
||||
|
@ -26,7 +26,7 @@ export declare class Pipeline {
|
|||
endPoint: number[];
|
||||
landmarks: any;
|
||||
};
|
||||
boxSize: number[];
|
||||
boxSize: [number, number];
|
||||
crop: any;
|
||||
};
|
||||
getEyeCoords(eyeData: any, eyeBox: any, eyeBoxSize: any, flip?: boolean): {
|
||||
|
@ -45,3 +45,4 @@ export declare class Pipeline {
|
|||
image: any;
|
||||
}[] | null>;
|
||||
}
|
||||
//# sourceMappingURL=facepipeline.d.ts.map
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"facepipeline.d.ts","sourceRoot":"","sources":["../../../src/blazeface/facepipeline.ts"],"names":[],"mappings":"AAIA,OAAO,KAAK,EAAU,UAAU,EAAE,MAAM,eAAe,CAAC;AACxD,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAoDlD,qBAAa,QAAQ;IACnB,WAAW,EAAE,KAAK,CAAC;QAAE,UAAU,EAAE,MAAM,EAAE,CAAC;QAAC,QAAQ,EAAE,MAAM,EAAE,CAAC;QAAC,SAAS,EAAE,KAAK,CAAC,MAAM,CAAC,CAAC;QAAC,UAAU,EAAE,MAAM,CAAC;QAAC,cAAc,CAAC,EAAE,MAAM,GAAG,SAAS,CAAA;KAAE,CAAC,CAAC;IACpJ,mBAAmB,EAAE,cAAc,CAAC;IACpC,YAAY,EAAE,UAAU,CAAC;IACzB,SAAS,EAAE,UAAU,CAAC;IACtB,OAAO,EAAE,MAAM,CAAC;IAChB,QAAQ,EAAE,MAAM,CAAC;IACjB,QAAQ,EAAE,MAAM,CAAC;IACjB,WAAW,EAAE,MAAM,CAAC;IACpB,OAAO,EAAE,MAAM,CAAC;IAChB,aAAa,EAAE,MAAM,CAAC;gBAEV,mBAAmB,KAAA,EAAE,YAAY,KAAA,EAAE,SAAS,KAAA;IAcxD,kBAAkB,CAAC,SAAS,KAAA,EAAE,GAAG,KAAA,EAAE,KAAK,KAAA,EAAE,cAAc,KAAA;IAmBxD,gCAAgC,CAAC,SAAS,KAAA;IAO1C,SAAS,CAAC,SAAS,KAAA,EAAE,IAAI,KAAA,EAAE,mBAAmB,KAAA,EAAE,mBAAmB,KAAA,EAAE,IAAI,UAAQ;;;;;;;;;IAiBjF,YAAY,CAAC,OAAO,KAAA,EAAE,MAAM,KAAA,EAAE,UAAU,KAAA,EAAE,IAAI,UAAQ;;;;IAgBtD,qBAAqB,CAAC,SAAS,KAAA,EAAE,UAAU,KAAA,EAAE,SAAS,KAAA;IAgBtD,mBAAmB,CAAC,MAAM,KAAA,EAAE,GAAG,KAAA,EAAE,KAAK,KAAA;IAgBhC,WAAW,CAAC,SAAS,KAAA,EAAE,IAAI,KAAA;IA+B3B,OAAO,CAAC,KAAK,KAAA,EAAE,MAAM,KAAA;;;;;;;;CA+H5B"}
|
|
@ -20,3 +20,4 @@ export declare function invertTransformMatrix(matrix: any): any[][];
|
|||
export declare function rotatePoint(homogeneousCoordinate: any, rotationMatrix: any): number[];
|
||||
export declare function xyDistanceBetweenPoints(a: any, b: any): number;
|
||||
export declare function generateAnchors(inputSize: any): [number, number][];
|
||||
//# sourceMappingURL=util.d.ts.map
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"util.d.ts","sourceRoot":"","sources":["../../../src/blazeface/util.ts"],"names":[],"mappings":"AAAA,eAAO,MAAM,eAAe,YAAoC,CAAC;AACjE;;;GAGG;AACH,wBAAgB,gBAAgB,CAAC,KAAK,KAAA,UAErC;AAED;;;;GAIG;AACH,wBAAgB,eAAe,CAAC,MAAM,KAAA,EAAE,MAAM,KAAA,UAG7C;AAED,wBAAgB,YAAY,CAAC,GAAG,KAAA,UAE/B;AAED,wBAAgB,sBAAsB,CAAC,CAAC,KAAA,EAAE,CAAC,KAAA,WAE1C;AAED,wBAAgB,GAAG,CAAC,EAAE,KAAA,EAAE,EAAE,KAAA,UAMzB;AAED,wBAAgB,kBAAkB,CAAC,GAAG,KAAA,EAAE,WAAW,KAAA,YAMlD;AAED,wBAAgB,yBAAyB,CAAC,IAAI,KAAA,EAAE,IAAI,KAAA,cAUnD;AAED,wBAAgB,mBAAmB,CAAC,QAAQ,KAAA,EAAE,MAAM,KAAA,cAQnD;AAED,wBAAgB,qBAAqB,CAAC,MAAM,KAAA,WAY3C;AAED,wBAAgB,WAAW,CAAC,qBAAqB,KAAA,EAAE,cAAc,KAAA,YAKhE;AAED,wBAAgB,uBAAuB,CAAC,CAAC,KAAA,EAAE,CAAC,KAAA,UAE3C;AAED,wBAAgB,eAAe,CAAC,SAAS,KAAA,sBAmBxC"}
|
|
@ -1,2 +1,3 @@
|
|||
export declare const full: string[];
|
||||
export declare const upper: string[];
|
||||
//# sourceMappingURL=annotations.d.ts.map
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"annotations.d.ts","sourceRoot":"","sources":["../../../src/blazepose/annotations.ts"],"names":[],"mappings":"AAAA,eAAO,MAAM,IAAI,UAwChB,CAAC;AAEF,eAAO,MAAM,KAAK,UAgCjB,CAAC"}
|
|
@ -1,8 +1,9 @@
|
|||
/**
|
||||
* BlazePose Module
|
||||
*/
|
||||
import { Tensor, GraphModel } from '../tfjs/types';
|
||||
import { BodyResult } from '../result';
|
||||
import { Config } from '../config';
|
||||
import type { Tensor, GraphModel } from '../tfjs/types';
|
||||
import type { BodyResult } from '../result';
|
||||
import type { Config } from '../config';
|
||||
export declare function load(config: Config): Promise<GraphModel>;
|
||||
export declare function predict(image: Tensor, config: Config): Promise<BodyResult[]>;
|
||||
//# sourceMappingURL=blazepose.d.ts.map
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"blazepose.d.ts","sourceRoot":"","sources":["../../../src/blazepose/blazepose.ts"],"names":[],"mappings":"AAAA;;GAEG;AAOH,OAAO,KAAK,EAAE,MAAM,EAAE,UAAU,EAAE,MAAM,eAAe,CAAC;AACxD,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,WAAW,CAAC;AAC5C,OAAO,KAAK,EAAE,MAAM,EAAE,MAAM,WAAW,CAAC;AAIxC,wBAAsB,IAAI,CAAC,MAAM,EAAE,MAAM,GAAG,OAAO,CAAC,UAAU,CAAC,CAS9D;AAED,wBAAsB,OAAO,CAAC,KAAK,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,GAAG,OAAO,CAAC,UAAU,EAAE,CAAC,CA4ClF"}
|
|
@ -171,7 +171,7 @@ export interface GestureConfig {
|
|||
*/
|
||||
export interface Config {
|
||||
/** Backend used for TFJS operations */
|
||||
backend: string;
|
||||
backend: '' | 'cpu' | 'wasm' | 'webgl' | 'humangl' | 'tensorflow' | 'webgpu';
|
||||
/** Path to *.wasm files if backend is set to `wasm` */
|
||||
wasmPath: string;
|
||||
/** Print debug statements to console */
|
||||
|
@ -182,7 +182,7 @@ export interface Config {
|
|||
* - warmup pre-initializes all models for faster inference but can take significant time on startup
|
||||
* - only used for `webgl` and `humangl` backends
|
||||
*/
|
||||
warmup: string;
|
||||
warmup: 'none' | 'face' | 'full' | 'body';
|
||||
/** Base model path (typically starting with file://, http:// or https://) for all models
|
||||
* - individual modelPath values are relative to this path
|
||||
*/
|
||||
|
@ -214,3 +214,4 @@ export interface Config {
|
|||
*/
|
||||
declare const config: Config;
|
||||
export { config as defaults };
|
||||
//# sourceMappingURL=config.d.ts.map
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"config.d.ts","sourceRoot":"","sources":["../../src/config.ts"],"names":[],"mappings":"AAGA,MAAM,WAAW,kBAAkB;IACjC,SAAS,EAAE,MAAM,CAAC;IAClB,QAAQ,EAAE,OAAO,CAAC;IAClB,WAAW,EAAE,MAAM,CAAC;IACpB,UAAU,EAAE,MAAM,CAAC;IACnB,aAAa,EAAE,MAAM,CAAC;IACtB,YAAY,EAAE,MAAM,CAAC;IACrB,MAAM,EAAE,OAAO,CAAC;CACjB;AAED,MAAM,WAAW,cAAc;IAC7B,OAAO,EAAE,OAAO,CAAC;IACjB,SAAS,EAAE,MAAM,CAAC;CACnB;AAED,MAAM,WAAW,cAAc;IAC7B,OAAO,EAAE,OAAO,CAAC;IACjB,SAAS,EAAE,MAAM,CAAC;CACnB;AAED,MAAM,WAAW,qBAAqB;IACpC,OAAO,EAAE,OAAO,CAAC;IACjB,SAAS,EAAE,MAAM,CAAC;IAClB,UAAU,EAAE,MAAM,CAAC;IACnB,aAAa,EAAE,MAAM,CAAC;CACvB;AAED,MAAM,WAAW,iBAAiB;IAChC,OAAO,EAAE,OAAO,CAAC;IACjB,aAAa,EAAE,MAAM,CAAC;IACtB,UAAU,EAAE,MAAM,CAAC;IACnB,SAAS,EAAE,MAAM,CAAC;CACnB;AAED;;;;;;;;;;EAUE;AACF,MAAM,WAAW,UAAU;IACzB,OAAO,EAAE,OAAO,CAAC;IACjB,QAAQ,EAAE,OAAO,CAAC,kBAAkB,CAAC,CAAC;IACtC,IAAI,EAAE,OAAO,CAAC,cAAc,CAAC,CAAC;IAC9B,IAAI,EAAE,OAAO,CAAC,cAAc,CAAC,CAAC;IAC9B,WAAW,EAAE,OAAO,CAAC,qBAAqB,CAAC,CAAC;IAC5C,OAAO,EAAE,OAAO,CAAC,iBAAiB,CAAC,CAAC;CACrC;AAED;;;;;EAKE;AACF,MAAM,WAAW,UAAU;IACzB,OAAO,EAAE,OAAO,CAAC;IACjB,SAAS,EAAE,MAAM,CAAC;IAClB,WAAW,EAAE,MAAM,CAAC;IACpB,aAAa,EAAE,MAAM,CAAC;IACtB,UAAU,EAAE,MAAM,CAAC;CACpB;AAED;;;;;;;;EAQE;AACF,MAAM,WAAW,UAAU;IACzB,OAAO,EAAE,OAAO,CAAC;IACjB,QAAQ,EAAE,OAAO,CAAC;IAClB,UAAU,EAAE,MAAM,CAAC;IACnB,aAAa,EAAE,MAAM,CAAC;IACtB,YAAY,EAAE,MAAM,CAAC;IACrB,WAAW,EAAE,MAAM,CAAC;IACpB,SAAS,EAAE,OAAO,CAAC;IACnB,QAAQ,EAAE;QACR,SAAS,CAAC,EAAE,MAAM,CAAC;KACpB,CAAC;IACF,QAAQ,EAAE;QACR,SAAS,CAAC,EAAE,MAAM,CAAC;KACpB,CAAC;CACH;AAED;;;;;;EAME;AACF,MAAM,WAAW,YAAY;IAC3B,OAAO,EAAE,OAAO,CAAC;IACjB,SAAS,EAAE,MAAM,CAAC;IAClB,aAAa,EAAE,MAAM,CAAC;IACtB,YAAY,EAAE,MAAM,CAAC;IACrB,WAAW,EAAE,MAAM,CAAC;IACpB,UAAU,EAAE,MAAM,CAAC;CACpB;AAED;;;;;;;;EAQE;AACF,MAAM,WAAW,kBAAkB;IACjC,OAAO,EAAE,OAAO,CAAC;IACjB,SAAS,EAAE,MAAM,CAAC;CACnB;AAED;;EAEE;AACF,MAAM,WAAW,YAAY;IAC3B,OAAO,EAAE,OAAO,CAAC;IACjB;;;;MAIE;IACF,KAAK,EAAE,MAAM,CAAC;IACd;;;;MAIE;IACF,MAAM,EAAE,MAAM,CAAC;IACf,kDAAkD;IAClD,MAAM,EAAE,OAAO,CAAC;IAChB,iCAAiC;IACjC,IAAI,EAAE,OAAO,CAAC;IACd,wCAAwC;IACxC,UAAU,EAAE,MAAM,CAAC;IACnB,2DAA2D;IAC3D,QAAQ,EAAE,MAAM,CAAC;IACjB,yDAAyD;IACzD,SAAS,EAAE,MAAM,CAAC;IAClB,sDAAsD;IACtD,IAAI,EAAE,MAAM,CAAA;IACZ,+DAA+D;IAC/D,UAAU,EAAE,MAAM,CAAC;IACnB,4DAA4D;IAC5D,GAAG,EAAE,MAAM,CAAC;IACZ,qBAAqB;IACrB,QAAQ,EAAE,OAAO,CAAC;IAClB,yBAAyB;IACzB,KAAK,EAAE,OAAO,CAAC;IACf,2BAA2B;IAC3B,OAAO,EAAE,OAAO,CAAC;IACjB,8BAA8B;IAC9B,UAAU,EAAE,OAAO,CAAC;IACpB,+BAA+B;IAC/B,WAAW,EAAE,OAAO,CAAC;IACrB,mCAAmC;IACnC,QAAQ,EAAE,OAAO,CAAC;IAClB,iEAAiE;IACjE,QAAQ,EAAE,MAAM,CAAC;CAClB;AAED,kCAAkC;AAClC,MAAM,WAAW,aAAa;IAC5B,OAAO,EAAE,OAAO,CAAC;CAClB;AAED;;;;;GAKG;AACH,MAAM,WAAW,MAAM;IACrB,uCAAuC;IACvC,OAAO,EAAE,EAAE,GAAG,KAAK,GAAG,MAAM,GAAG,OAAO,GAAG,SAAS,GAAG,YAAY,GAAG,QAAQ,CAAC;IAG7E,uDAAuD;IACvD,QAAQ,EAAE,MAAM,CAAC;IAEjB,wCAAwC;IACxC,KAAK,EAAE,OAAO,CAAC;IAEf,uEAAuE;IACvE,KAAK,EAAE,OAAO,CAAC;IAEf;;;MAGE;IACF,MAAM,EAAE,MAAM,GAAG,MAAM,GAAG,MAAM,GAAG,MAAM,CAAC;IAG1C;;MAEE;IACF,aAAa,EAAE,MAAM,CAAC;IAEtB;;;MAGE;IACF,gBAAgB,EAAE,MAAM,CAAC;IAEzB;;;MAGE;IACF,SAAS,EAAE,OAAO,CAAC;IAEnB;;MAEE;IACF,MAAM,EAAE,OAAO,CAAC,YAAY,CAAC,CAAC;IAG9B,OAAO,EAAE,OAAO,CAAC,aAAa,CAAC,CAAC;IAEhC,IAAI,EAAE,OAAO,CAAC,UAAU,CAAC,CAAC;IAE1B,IAAI,EAAE,OAAO,CAAC,UAAU,CAAC,CAAC;IAE1B,IAAI,EAAE,OAAO,CAAC,UAAU,CAAC,CAAC;IAE1B,MAAM,EAAE,OAAO,CAAC,YAAY,CAAC,CAAC;IAE9B,YAAY,EAAE,OAAO,CAAC,kBAAkB,CAAC,CAAC;CAC3C;AAED;;;GAGG;AACH,QAAA,MAAM,MAAM,EAAE,MA0Jb,CAAC;AACF,OAAO,EAAE,MAAM,IAAI,QAAQ,EAAE,CAAC"}
|
|
@ -50,3 +50,4 @@ export declare function object(inCanvas: HTMLCanvasElement, result: Array<Object
|
|||
export declare function person(inCanvas: HTMLCanvasElement, result: Array<PersonResult>, drawOptions?: DrawOptions): Promise<void>;
|
||||
export declare function canvas(inCanvas: HTMLCanvasElement, outCanvas: HTMLCanvasElement): Promise<void>;
|
||||
export declare function all(inCanvas: HTMLCanvasElement, result: Result, drawOptions?: DrawOptions): Promise<[void, void, void, void, void] | null>;
|
||||
//# sourceMappingURL=draw.d.ts.map
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"draw.d.ts","sourceRoot":"","sources":["../../../src/draw/draw.ts"],"names":[],"mappings":"AAAA;;GAEG;AAIH,OAAO,KAAK,EAAE,MAAM,EAAE,UAAU,EAAE,UAAU,EAAE,UAAU,EAAE,YAAY,EAAE,aAAa,EAAE,YAAY,EAAE,MAAM,WAAW,CAAC;AAEvH;;;;;;;;;;;;;;;;;;;GAmBG;AACH,MAAM,WAAW,WAAW;IAC1B,KAAK,EAAE,MAAM,CAAC;IACd,UAAU,EAAE,MAAM,CAAC;IACnB,WAAW,EAAE,MAAM,CAAC;IACpB,IAAI,EAAE,MAAM,CAAC;IACb,UAAU,EAAE,MAAM,CAAC;IACnB,SAAS,EAAE,MAAM,CAAC;IAClB,SAAS,EAAE,MAAM,CAAC;IAClB,SAAS,EAAE,MAAM,CAAC;IAClB,UAAU,EAAE,OAAO,CAAC;IACpB,UAAU,EAAE,OAAO,CAAC;IACpB,SAAS,EAAE,OAAO,CAAC;IACnB,YAAY,EAAE,OAAO,CAAC;IACtB,QAAQ,EAAE,OAAO,CAAC;IAClB,YAAY,EAAE,OAAO,CAAC;IACtB,QAAQ,EAAE,OAAO,CAAC;IAClB,SAAS,EAAE,OAAO,CAAC;IACnB,cAAc,EAAE,OAAO,CAAC;CACzB;AAED,eAAO,MAAM,OAAO,EAAE,WAkBrB,CAAC;AA2EF,wBAAsB,OAAO,CAAC,QAAQ,EAAE,iBAAiB,EAAE,MAAM,EAAE,KAAK,CAAC,aAAa,CAAC,EAAE,WAAW,CAAC,EAAE,WAAW,iBAuBjH;AAED,wBAAsB,IAAI,CAAC,QAAQ,EAAE,iBAAiB,EAAE,MAAM,EAAE,KAAK,CAAC,UAAU,CAAC,EAAE,WAAW,CAAC,EAAE,WAAW,iBAoG3G;AAED,wBAAsB,IAAI,CAAC,QAAQ,EAAE,iBAAiB,EAAE,MAAM,EAAE,KAAK,CAAC,UAAU,CAAC,EAAE,WAAW,CAAC,EAAE,WAAW,iBA4G3G;AAED,wBAAsB,IAAI,CAAC,QAAQ,EAAE,iBAAiB,EAAE,MAAM,EAAE,KAAK,CAAC,UAAU,CAAC,EAAE,WAAW,CAAC,EAAE,WAAW,iBA+D3G;AAED,wBAAsB,MAAM,CAAC,QAAQ,EAAE,iBAAiB,EAAE,MAAM,EAAE,KAAK,CAAC,YAAY,CAAC,EAAE,WAAW,CAAC,EAAE,WAAW,iBAuB/G;AAED,wBAAsB,MAAM,CAAC,QAAQ,EAAE,iBAAiB,EAAE,MAAM,EAAE,KAAK,CAAC,YAAY,CAAC,EAAE,WAAW,CAAC,EAAE,WAAW,iBAwB/G;AAED,wBAAsB,MAAM,CAAC,QAAQ,EAAE,iBAAiB,EAAE,SAAS,EAAE,iBAAiB,iBAKrF;AAED,wBAAsB,GAAG,CAAC,QAAQ,EAAE,iBAAiB,EAAE,MAAM,EAAE,MAAM,EAAE,WAAW,CAAC,EAAE,WAAW,kDA4B/F"}
|
|
@ -1,8 +1,9 @@
|
|||
/**
|
||||
* EfficientPose Module
|
||||
*/
|
||||
import { BodyResult } from '../result';
|
||||
import { GraphModel, Tensor } from '../tfjs/types';
|
||||
import { Config } from '../config';
|
||||
import type { BodyResult } from '../result';
|
||||
import type { GraphModel, Tensor } from '../tfjs/types';
|
||||
import type { Config } from '../config';
|
||||
export declare function load(config: Config): Promise<GraphModel>;
|
||||
export declare function predict(image: Tensor, config: Config): Promise<BodyResult[]>;
|
||||
//# sourceMappingURL=efficientpose.d.ts.map
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"efficientpose.d.ts","sourceRoot":"","sources":["../../../src/efficientpose/efficientpose.ts"],"names":[],"mappings":"AAAA;;GAEG;AAIH,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,WAAW,CAAC;AAC5C,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,EAAE,MAAM,eAAe,CAAC;AACxD,OAAO,KAAK,EAAE,MAAM,EAAE,MAAM,WAAW,CAAC;AAcxC,wBAAsB,IAAI,CAAC,MAAM,EAAE,MAAM,GAAG,OAAO,CAAC,UAAU,CAAC,CAO9D;AAmBD,wBAAsB,OAAO,CAAC,KAAK,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,GAAG,OAAO,CAAC,UAAU,EAAE,CAAC,CAkElF"}
|
|
@ -1,7 +1,8 @@
|
|||
/**
|
||||
* Emotion Module
|
||||
*/
|
||||
import { Config } from '../config';
|
||||
import { Tensor, GraphModel } from '../tfjs/types';
|
||||
import type { Config } from '../config';
|
||||
import type { Tensor, GraphModel } from '../tfjs/types';
|
||||
export declare function load(config: Config): Promise<GraphModel>;
|
||||
export declare function predict(image: Tensor, config: Config, idx: any, count: any): Promise<unknown>;
|
||||
//# sourceMappingURL=emotion.d.ts.map
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"emotion.d.ts","sourceRoot":"","sources":["../../../src/emotion/emotion.ts"],"names":[],"mappings":"AAAA;;GAEG;AAGH,OAAO,KAAK,EAAE,MAAM,EAAE,MAAM,WAAW,CAAC;AACxC,OAAO,KAAK,EAAE,MAAM,EAAE,UAAU,EAAE,MAAM,eAAe,CAAC;AAaxD,wBAAsB,IAAI,CAAC,MAAM,EAAE,MAAM,GAAG,OAAO,CAAC,UAAU,CAAC,CAO9D;AAED,wBAAsB,OAAO,CAAC,KAAK,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,EAAE,GAAG,KAAA,EAAE,KAAK,KAAA,oBAuCtE"}
|
|
@ -24,7 +24,10 @@ export interface Env {
|
|||
adapter: undefined | string;
|
||||
};
|
||||
kernels: string[];
|
||||
Canvas: undefined;
|
||||
Image: undefined;
|
||||
}
|
||||
export declare const env: Env;
|
||||
export declare function cpuinfo(): void;
|
||||
export declare function get(): Promise<void>;
|
||||
//# sourceMappingURL=env.d.ts.map
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"env.d.ts","sourceRoot":"","sources":["../../src/env.ts"],"names":[],"mappings":"AAEA,MAAM,WAAW,GAAG;IAClB,OAAO,EAAE,SAAS,GAAG,OAAO,CAAC;IAC7B,IAAI,EAAE,SAAS,GAAG,OAAO,CAAC;IAC1B,MAAM,EAAE,SAAS,GAAG,OAAO,CAAC;IAC5B,QAAQ,EAAE,SAAS,GAAG,MAAM,CAAC;IAC7B,KAAK,EAAE,SAAS,GAAG,MAAM,CAAC;IAC1B,QAAQ,EAAE,MAAM,EAAE,CAAC;IACnB,IAAI,EAAE;QACJ,OAAO,EAAE,SAAS,GAAG,MAAM,CAAC;QAC5B,QAAQ,EAAE,SAAS,GAAG,OAAO,CAAC;KAC/B,CAAC;IACF,IAAI,EAAE;QACJ,SAAS,EAAE,SAAS,GAAG,OAAO,CAAC;QAC/B,IAAI,EAAE,SAAS,GAAG,OAAO,CAAC;QAC1B,WAAW,EAAE,SAAS,GAAG,OAAO,CAAC;KAClC,CAAC;IACF,KAAK,EAAE;QACL,SAAS,EAAE,SAAS,GAAG,OAAO,CAAC;QAC/B,OAAO,EAAE,SAAS,GAAG,MAAM,CAAC;QAC5B,QAAQ,EAAE,SAAS,GAAG,MAAM,CAAC;KAC9B,CAAC;IACF,MAAM,EAAE;QACN,SAAS,EAAE,SAAS,GAAG,OAAO,CAAC;QAC/B,OAAO,EAAE,SAAS,GAAG,MAAM,CAAC;KAC7B,CAAC;IACF,OAAO,EAAE,MAAM,EAAE,CAAC;IAClB,MAAM,EAAE,SAAS,CAAC;IAClB,KAAK,EAAE,SAAS,CAAC;CAClB;AAED,eAAO,MAAM,GAAG,EAAE,GA4BjB,CAAC;AAEF,wBAAgB,OAAO,SAmBtB;AAED,wBAAsB,GAAG,kBAgDxB"}
|
|
@ -2,6 +2,7 @@
|
|||
* Module that analyzes person age
|
||||
* Obsolete
|
||||
*/
|
||||
import { FaceResult } from './result';
|
||||
import { Tensor } from './tfjs/types';
|
||||
import type { FaceResult } from './result';
|
||||
import type { Tensor } from './tfjs/types';
|
||||
export declare const detectFace: (parent: any, input: Tensor) => Promise<FaceResult[]>;
|
||||
//# sourceMappingURL=face.d.ts.map
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"face.d.ts","sourceRoot":"","sources":["../../src/face.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAOH,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,UAAU,CAAC;AAC3C,OAAO,KAAK,EAAE,MAAM,EAAE,MAAM,cAAc,CAAC;AAwI3C,eAAO,MAAM,UAAU,uBAAiD,MAAM,KAAG,QAAQ,UAAU,EAAE,CAmHpG,CAAC"}
|
|
@ -3,8 +3,8 @@
|
|||
* Returns Age, Gender, Descriptor
|
||||
* Implements Face simmilarity function
|
||||
*/
|
||||
import { Tensor, GraphModel } from '../tfjs/types';
|
||||
import { Config } from '../config';
|
||||
import type { Tensor, GraphModel } from '../tfjs/types';
|
||||
import type { Config } from '../config';
|
||||
declare type DB = Array<{
|
||||
name: string;
|
||||
source: string;
|
||||
|
@ -21,3 +21,4 @@ export declare function match(embedding: Array<number>, db: DB, threshold?: numb
|
|||
export declare function enhance(input: any): Tensor;
|
||||
export declare function predict(image: Tensor, config: Config, idx: any, count: any): Promise<unknown>;
|
||||
export {};
|
||||
//# sourceMappingURL=faceres.d.ts.map
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"faceres.d.ts","sourceRoot":"","sources":["../../../src/faceres/faceres.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAIH,OAAO,KAAK,EAAE,MAAM,EAAE,UAAU,EAAE,MAAM,eAAe,CAAC;AACxD,OAAO,KAAK,EAAE,MAAM,EAAE,MAAM,WAAW,CAAC;AAaxC,aAAK,EAAE,GAAG,KAAK,CAAC;IAAE,IAAI,EAAE,MAAM,CAAC;IAAC,MAAM,EAAE,MAAM,CAAC;IAAC,SAAS,EAAE,MAAM,EAAE,CAAA;CAAE,CAAC,CAAC;AAEvE,wBAAsB,IAAI,CAAC,MAAM,EAAE,MAAM,GAAG,OAAO,CAAC,UAAU,CAAC,CAQ9D;AAED,wBAAgB,UAAU,CAAC,UAAU,EAAE,KAAK,CAAC,MAAM,CAAC,EAAE,UAAU,EAAE,KAAK,CAAC,MAAM,CAAC,EAAE,KAAK,SAAI,GAAG,MAAM,CAWlG;AAED,wBAAgB,KAAK,CAAC,SAAS,EAAE,KAAK,CAAC,MAAM,CAAC,EAAE,EAAE,EAAE,EAAE,EAAE,SAAS,SAAI;;;;;EAUpE;AAED,wBAAgB,OAAO,CAAC,KAAK,KAAA,GAAG,MAAM,CAmDrC;AAED,wBAAsB,OAAO,CAAC,KAAK,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,EAAE,GAAG,KAAA,EAAE,KAAK,KAAA,oBA8CtE"}
|
|
@ -55,3 +55,4 @@ declare const FingerDirection: {
|
|||
getName: (value: any) => any;
|
||||
};
|
||||
export { Finger, FingerCurl, FingerDirection };
|
||||
//# sourceMappingURL=description.d.ts.map
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"description.d.ts","sourceRoot":"","sources":["../../../src/fingerpose/description.ts"],"names":[],"mappings":"AAAA,QAAA,MAAM,MAAM;;;;;;;;;;;;;;;;;;;;;;;CAwBX,CAAC;AAEF,QAAA,MAAM,UAAU;;;;;;;;;;CAMf,CAAC;AAEF,QAAA,MAAM,eAAe;;;;;;;;;;;;;;;;;;;;CAWpB,CAAC;AAEF,OAAO,EAAE,MAAM,EAAE,UAAU,EAAE,eAAe,EAAE,CAAC"}
|
|
@ -2,3 +2,4 @@ export declare function estimate(landmarks: any): {
|
|||
curls: number[];
|
||||
directions: number[];
|
||||
};
|
||||
//# sourceMappingURL=estimator.d.ts.map
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"estimator.d.ts","sourceRoot":"","sources":["../../../src/fingerpose/estimator.ts"],"names":[],"mappings":"AAqKA,wBAAgB,QAAQ,CAAC,SAAS,KAAA;;;EA0CjC"}
|
|
@ -3,3 +3,4 @@ export declare function match(keypoints: any): {
|
|||
name: string;
|
||||
confidence: number;
|
||||
}[];
|
||||
//# sourceMappingURL=fingerpose.d.ts.map
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"fingerpose.d.ts","sourceRoot":"","sources":["../../../src/fingerpose/fingerpose.ts"],"names":[],"mappings":"AAQA,wBAAgB,OAAO,CAAC,SAAS,KAAA,MAWhC;AAED,wBAAgB,KAAK,CAAC,SAAS,KAAA;UAEF,MAAM;gBAAc,MAAM;IAOtD"}
|
|
@ -10,3 +10,4 @@ export default class Gesture {
|
|||
setWeight(finger: any, weight: any): void;
|
||||
matchAgainst(detectedCurls: any, detectedDirections: any): number;
|
||||
}
|
||||
//# sourceMappingURL=gesture.d.ts.map
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"gesture.d.ts","sourceRoot":"","sources":["../../../src/fingerpose/gesture.ts"],"names":[],"mappings":"AAAA,MAAM,CAAC,OAAO,OAAO,OAAO;IAC1B,IAAI,MAAC;IACL,KAAK,MAAC;IACN,UAAU,MAAC;IACX,OAAO,MAAC;IACR,eAAe,MAAC;gBAEJ,IAAI,KAAA;IAShB,OAAO,CAAC,MAAM,KAAA,EAAE,IAAI,KAAA,EAAE,UAAU,KAAA;IAKhC,YAAY,CAAC,MAAM,KAAA,EAAE,QAAQ,KAAA,EAAE,UAAU,KAAA;IAKzC,SAAS,CAAC,MAAM,KAAA,EAAE,MAAM,KAAA;IAOxB,YAAY,CAAC,aAAa,KAAA,EAAE,kBAAkB,KAAA;CAyC/C"}
|
|
@ -1,3 +1,4 @@
|
|||
import Gesture from './gesture';
|
||||
declare const _default: Gesture[];
|
||||
export default _default;
|
||||
//# sourceMappingURL=gestures.d.ts.map
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"gestures.d.ts","sourceRoot":"","sources":["../../../src/fingerpose/gestures.ts"],"names":[],"mappings":"AACA,OAAO,OAAO,MAAM,WAAW,CAAC;;AAqChC,wBAAmC"}
|
|
@ -1,7 +1,7 @@
|
|||
/**
|
||||
* Gesture detection module
|
||||
*/
|
||||
import { GestureResult } from '../result';
|
||||
import type { GestureResult } from '../result';
|
||||
/**
|
||||
* @typedef FaceGesture
|
||||
*/
|
||||
|
@ -22,3 +22,4 @@ export declare const body: (res: any) => GestureResult[];
|
|||
export declare const face: (res: any) => GestureResult[];
|
||||
export declare const iris: (res: any) => GestureResult[];
|
||||
export declare const hand: (res: any) => GestureResult[];
|
||||
//# sourceMappingURL=gesture.d.ts.map
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"gesture.d.ts","sourceRoot":"","sources":["../../../src/gesture/gesture.ts"],"names":[],"mappings":"AAAA;;GAEG;AAEH,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,WAAW,CAAC;AAG/C;;GAEG;AACH,oBAAY,WAAW,GACrB,UAAU,MAAM,GAAG,QAAQ,GAAG,OAAO,EAAE,GACrC,SAAS,MAAM,GAAG,OAAO,MAAM,GAC/B,SAAS,MAAM,QAAQ,GACvB,QAAQ,IAAI,GAAG,MAAM,EAAE,CAAC;AAE5B;;GAEG;AACH,oBAAY,WAAW,GACrB,eAAe,GACb,WAAW,MAAM,GAAG,OAAO,GAAG,IAAI,GAAG,MAAM,EAAE,GAC7C,gBAAgB,CAAC;AAErB;;GAEG;AACH,oBAAY,WAAW,GACrB,WAAW,MAAM,GAAG,OAAO,EAAE,GAC3B,SAAS,MAAM,GAAG,OAAO,OAAO,GAChC,WAAW,CAAC;AAEhB;;GAEG;AACH,oBAAY,WAAW,GACrB,GAAG,OAAO,GAAG,OAAO,GAAG,QAAQ,GAAG,MAAM,GAAG,OAAO,UAAU,GAC1D,GAAG,OAAO,GAAG,OAAO,GAAG,QAAQ,GAAG,MAAM,GAAG,OAAO,KAAK,GACvD,SAAS,GACT,WAAW,CAAC;AAEhB,eAAO,MAAM,IAAI,gBAAU,aAAa,EAkBvC,CAAC;AAEF,eAAO,MAAM,IAAI,gBAAU,aAAa,EAmBvC,CAAC;AAEF,eAAO,MAAM,IAAI,gBAAU,aAAa,EAoCvC,CAAC;AAEF,eAAO,MAAM,IAAI,gBAAU,aAAa,EAkBvC,CAAC"}
|
|
@ -2,3 +2,4 @@ export declare const anchors: {
|
|||
x: number;
|
||||
y: number;
|
||||
}[];
|
||||
//# sourceMappingURL=anchors.d.ts.map
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"anchors.d.ts","sourceRoot":"","sources":["../../../src/handpose/anchors.ts"],"names":[],"mappings":"AAAA,eAAO,MAAM,OAAO;;;GAi4FnB,CAAC"}
|
|
@ -22,3 +22,4 @@ export declare function shiftBox(box: any, shiftFactor: any): {
|
|||
endPoint: any[];
|
||||
palmLandmarks: any;
|
||||
};
|
||||
//# sourceMappingURL=box.d.ts.map
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"box.d.ts","sourceRoot":"","sources":["../../../src/handpose/box.ts"],"names":[],"mappings":"AAEA,wBAAgB,UAAU,CAAC,GAAG,KAAA,YAK7B;AAED,wBAAgB,YAAY,CAAC,GAAG,KAAA,SAK/B;AAED,wBAAgB,wBAAwB,CAAC,GAAG,KAAA,EAAE,KAAK,KAAA,EAAE,QAAQ,KAAA,OAU5D;AAED,wBAAgB,mBAAmB,CAAC,GAAG,KAAA,EAAE,MAAM,KAAA;;;;;EAQ9C;AAED,wBAAgB,UAAU,CAAC,GAAG,KAAA,EAAE,MAAM,SAAM;;;;EAO3C;AAED,wBAAgB,WAAW,CAAC,GAAG,KAAA;;;;EAQ9B;AAED,wBAAgB,QAAQ,CAAC,GAAG,KAAA,EAAE,WAAW,KAAA;;;;EASxC"}
|
|
@ -1,4 +1,4 @@
|
|||
import { Tensor, GraphModel } from '../tfjs/types';
|
||||
import type { Tensor, GraphModel } from '../tfjs/types';
|
||||
export declare class HandDetector {
|
||||
model: GraphModel;
|
||||
anchors: number[][];
|
||||
|
@ -21,3 +21,4 @@ export declare class HandDetector {
|
|||
confidence: number;
|
||||
}[]>;
|
||||
}
|
||||
//# sourceMappingURL=handdetector.d.ts.map
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"handdetector.d.ts","sourceRoot":"","sources":["../../../src/handpose/handdetector.ts"],"names":[],"mappings":"AAGA,OAAO,KAAK,EAAE,MAAM,EAAE,UAAU,EAAE,MAAM,eAAe,CAAC;AAExD,qBAAa,YAAY;IACvB,KAAK,EAAE,UAAU,CAAC;IAClB,OAAO,EAAE,MAAM,EAAE,EAAE,CAAC;IACpB,aAAa,EAAE,MAAM,CAAC;IACtB,SAAS,EAAE,MAAM,CAAC;IAClB,eAAe,EAAE,MAAM,CAAC;IACxB,qBAAqB,EAAE,MAAM,CAAC;gBAElB,KAAK,KAAA;IASjB,cAAc,CAAC,KAAK,KAAA;IAYpB,kBAAkB,CAAC,gBAAgB,KAAA,EAAE,KAAK,KAAA;IAOpC,QAAQ,CAAC,KAAK,KAAA,EAAE,MAAM,KAAA;aAUA,MAAM;uBAAiB,MAAM;oBAAc,MAAM;;IAWvE,kBAAkB,CAAC,KAAK,KAAA,EAAE,MAAM,KAAA,GAAG,OAAO,CAAC;QAAE,UAAU,EAAE,MAAM,EAAE,CAAC;QAAC,QAAQ,EAAE,MAAM,EAAE,CAAC;QAAC,aAAa,EAAE,MAAM,EAAE,CAAC;QAAC,UAAU,EAAE,MAAM,CAAA;KAAE,EAAE,CAAC;CAmB9I"}
|
|
@ -1,5 +1,5 @@
|
|||
import * as detector from './handdetector';
|
||||
import { GraphModel } from '../tfjs/types';
|
||||
import type * as detector from './handdetector';
|
||||
import type { GraphModel } from '../tfjs/types';
|
||||
export declare class HandPipeline {
|
||||
handDetector: detector.HandDetector;
|
||||
handPoseModel: GraphModel;
|
||||
|
@ -29,7 +29,7 @@ export declare class HandPipeline {
|
|||
};
|
||||
transformRawCoords(rawCoords: any, box2: any, angle: any, rotationMatrix: any): any;
|
||||
estimateHands(image: any, config: any): Promise<{
|
||||
landmarks?: number[] | undefined;
|
||||
landmarks?: number[];
|
||||
confidence: number;
|
||||
box: {
|
||||
topLeft: number[];
|
||||
|
@ -37,3 +37,4 @@ export declare class HandPipeline {
|
|||
};
|
||||
}[]>;
|
||||
}
|
||||
//# sourceMappingURL=handpipeline.d.ts.map
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue