mirror of https://github.com/vladmandic/human
add meet and selfie models
parent
b04d39e9d3
commit
0deb783bc7
2
TODO.md
2
TODO.md
|
@ -11,7 +11,7 @@ N/A
|
||||||
## In Progress
|
## In Progress
|
||||||
|
|
||||||
- Switch to TypeScript 4.3
|
- Switch to TypeScript 4.3
|
||||||
- Add hints to Demo app
|
- Implement segmentation model
|
||||||
|
|
||||||
## Known Issues
|
## Known Issues
|
||||||
|
|
||||||
|
|
|
@ -27,9 +27,18 @@ onmessage = async (msg) => {
|
||||||
result.error = err.message;
|
result.error = err.message;
|
||||||
log('worker thread error:', err.message);
|
log('worker thread error:', err.message);
|
||||||
}
|
}
|
||||||
// must strip canvas from return value as it cannot be transfered from worker thread
|
|
||||||
if (result.canvas) result.canvas = null;
|
if (result.canvas) { // convert canvas to imageData and send it by reference
|
||||||
|
const ctx = result.canvas.getContext('2d');
|
||||||
|
const img = ctx?.getImageData(0, 0, result.canvas.width, result.canvas.height);
|
||||||
|
result.canvas = null; // must strip original canvas from return value as it cannot be transfered from worker thread
|
||||||
|
// @ts-ignore tslint wrong type matching for worker
|
||||||
|
if (img) postMessage({ result, image: img.data.buffer, width: msg.data.width, height: msg.data.height }, [img?.data.buffer]);
|
||||||
|
// @ts-ignore tslint wrong type matching for worker
|
||||||
|
else postMessage({ result });
|
||||||
|
} else {
|
||||||
// @ts-ignore tslint wrong type matching for worker
|
// @ts-ignore tslint wrong type matching for worker
|
||||||
postMessage({ result });
|
postMessage({ result });
|
||||||
|
}
|
||||||
busy = false;
|
busy = false;
|
||||||
};
|
};
|
||||||
|
|
|
@ -38,19 +38,21 @@ const userConfig = {
|
||||||
enabled: false,
|
enabled: false,
|
||||||
flip: false,
|
flip: false,
|
||||||
},
|
},
|
||||||
face: { enabled: true,
|
face: { enabled: false,
|
||||||
detector: { return: true },
|
detector: { return: true },
|
||||||
mesh: { enabled: true },
|
mesh: { enabled: true },
|
||||||
iris: { enabled: false },
|
iris: { enabled: false },
|
||||||
description: { enabled: false },
|
description: { enabled: false },
|
||||||
emotion: { enabled: false },
|
emotion: { enabled: false },
|
||||||
},
|
},
|
||||||
hand: { enabled: false },
|
|
||||||
// body: { enabled: true, modelPath: 'posenet.json' },
|
|
||||||
// body: { enabled: true, modelPath: 'blazepose.json' },
|
|
||||||
body: { enabled: false },
|
|
||||||
object: { enabled: false },
|
object: { enabled: false },
|
||||||
gesture: { enabled: true },
|
gesture: { enabled: true },
|
||||||
|
hand: { enabled: false },
|
||||||
|
body: { enabled: false },
|
||||||
|
// body: { enabled: true, modelPath: 'posenet.json' },
|
||||||
|
// body: { enabled: true, modelPath: 'blazepose.json' },
|
||||||
|
// segmentation: { enabled: true, modelPath: 'meet.json' },
|
||||||
|
// segmentation: { enabled: true, modelPath: 'selfie.json' },
|
||||||
*/
|
*/
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -267,11 +269,13 @@ async function drawResults(input) {
|
||||||
if (ui.buffered) {
|
if (ui.buffered) {
|
||||||
ui.drawThread = requestAnimationFrame(() => drawResults(input));
|
ui.drawThread = requestAnimationFrame(() => drawResults(input));
|
||||||
} else {
|
} else {
|
||||||
|
if (ui.drawThread) {
|
||||||
log('stopping buffered refresh');
|
log('stopping buffered refresh');
|
||||||
if (ui.drawThread) cancelAnimationFrame(ui.drawThread);
|
cancelAnimationFrame(ui.drawThread);
|
||||||
ui.drawThread = null;
|
ui.drawThread = null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// setup webcam
|
// setup webcam
|
||||||
let initialCameraAccess = true;
|
let initialCameraAccess = true;
|
||||||
|
@ -350,6 +354,8 @@ async function setupCamera() {
|
||||||
video.onloadeddata = () => {
|
video.onloadeddata = () => {
|
||||||
if (settings.width > settings.height) canvas.style.width = '100vw';
|
if (settings.width > settings.height) canvas.style.width = '100vw';
|
||||||
else canvas.style.height = '100vh';
|
else canvas.style.height = '100vh';
|
||||||
|
canvas.width = video.videoWidth;
|
||||||
|
canvas.height = video.videoHeight;
|
||||||
ui.menuWidth.input.setAttribute('value', video.videoWidth);
|
ui.menuWidth.input.setAttribute('value', video.videoWidth);
|
||||||
ui.menuHeight.input.setAttribute('value', video.videoHeight);
|
ui.menuHeight.input.setAttribute('value', video.videoHeight);
|
||||||
if (live) video.play();
|
if (live) video.play();
|
||||||
|
@ -400,6 +406,16 @@ function webWorker(input, image, canvas, timestamp) {
|
||||||
}
|
}
|
||||||
if (document.getElementById('gl-bench')) document.getElementById('gl-bench').style.display = ui.bench ? 'block' : 'none';
|
if (document.getElementById('gl-bench')) document.getElementById('gl-bench').style.display = ui.bench ? 'block' : 'none';
|
||||||
lastDetectedResult = msg.data.result;
|
lastDetectedResult = msg.data.result;
|
||||||
|
|
||||||
|
if (msg.data.image) {
|
||||||
|
lastDetectedResult.canvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(msg.data.width, msg.data.height) : document.createElement('canvas');
|
||||||
|
lastDetectedResult.canvas.width = msg.data.width;
|
||||||
|
lastDetectedResult.canvas.height = msg.data.height;
|
||||||
|
const ctx = lastDetectedResult.canvas.getContext('2d');
|
||||||
|
const imageData = new ImageData(new Uint8ClampedArray(msg.data.image), msg.data.width, msg.data.height);
|
||||||
|
ctx.putImageData(imageData, 0, 0);
|
||||||
|
}
|
||||||
|
|
||||||
ui.framesDetect++;
|
ui.framesDetect++;
|
||||||
if (!ui.drawThread) drawResults(input);
|
if (!ui.drawThread) drawResults(input);
|
||||||
// eslint-disable-next-line no-use-before-define
|
// eslint-disable-next-line no-use-before-define
|
||||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -213,6 +213,10 @@ var config = {
|
||||||
iouThreshold: 0.4,
|
iouThreshold: 0.4,
|
||||||
maxDetected: 10,
|
maxDetected: 10,
|
||||||
skipFrames: 19
|
skipFrames: 19
|
||||||
|
},
|
||||||
|
segmentation: {
|
||||||
|
enabled: false,
|
||||||
|
modelPath: "selfie.json"
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -238,7 +242,7 @@ function info() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// src/human.ts
|
// src/human.ts
|
||||||
var tf20 = __toModule(require_tfjs_esm());
|
var tf21 = __toModule(require_tfjs_esm());
|
||||||
|
|
||||||
// src/tfjs/backend.ts
|
// src/tfjs/backend.ts
|
||||||
var tf = __toModule(require_tfjs_esm());
|
var tf = __toModule(require_tfjs_esm());
|
||||||
|
@ -312,13 +316,6 @@ function register() {
|
||||||
var tf8 = __toModule(require_tfjs_esm());
|
var tf8 = __toModule(require_tfjs_esm());
|
||||||
|
|
||||||
// src/blazeface/facemesh.ts
|
// src/blazeface/facemesh.ts
|
||||||
var facemesh_exports = {};
|
|
||||||
__export(facemesh_exports, {
|
|
||||||
load: () => load2,
|
|
||||||
predict: () => predict,
|
|
||||||
triangulation: () => triangulation,
|
|
||||||
uvmap: () => uvmap
|
|
||||||
});
|
|
||||||
var tf5 = __toModule(require_tfjs_esm());
|
var tf5 = __toModule(require_tfjs_esm());
|
||||||
|
|
||||||
// src/blazeface/blazeface.ts
|
// src/blazeface/blazeface.ts
|
||||||
|
@ -343,16 +340,16 @@ function getBoxCenter(box6) {
|
||||||
box6.startPoint[1] + (box6.endPoint[1] - box6.startPoint[1]) / 2
|
box6.startPoint[1] + (box6.endPoint[1] - box6.startPoint[1]) / 2
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
function cutBoxFromImageAndResize(box6, image15, cropSize) {
|
function cutBoxFromImageAndResize(box6, image16, cropSize) {
|
||||||
const h = image15.shape[1];
|
const h = image16.shape[1];
|
||||||
const w = image15.shape[2];
|
const w = image16.shape[2];
|
||||||
const boxes = [[
|
const boxes = [[
|
||||||
box6.startPoint[1] / h,
|
box6.startPoint[1] / h,
|
||||||
box6.startPoint[0] / w,
|
box6.startPoint[0] / w,
|
||||||
box6.endPoint[1] / h,
|
box6.endPoint[1] / h,
|
||||||
box6.endPoint[0] / w
|
box6.endPoint[0] / w
|
||||||
]];
|
]];
|
||||||
return tf2.image.cropAndResize(image15, boxes, [0], cropSize);
|
return tf2.image.cropAndResize(image16, boxes, [0], cropSize);
|
||||||
}
|
}
|
||||||
function enlargeBox(box6, factor = 1.5) {
|
function enlargeBox(box6, factor = 1.5) {
|
||||||
const center = getBoxCenter(box6);
|
const center = getBoxCenter(box6);
|
||||||
|
@ -486,11 +483,11 @@ function decodeBounds(boxOutputs, anchors3, inputSize) {
|
||||||
return tf3.concat2d([startNormalized, endNormalized], concatAxis);
|
return tf3.concat2d([startNormalized, endNormalized], concatAxis);
|
||||||
}
|
}
|
||||||
var BlazeFaceModel = class {
|
var BlazeFaceModel = class {
|
||||||
constructor(model9, config3) {
|
constructor(model10, config3) {
|
||||||
this.model = model9;
|
this.model = model10;
|
||||||
this.anchorsData = generateAnchors(model9.inputs[0].shape[1]);
|
this.anchorsData = generateAnchors(model10.inputs[0].shape[1]);
|
||||||
this.anchors = tf3.tensor2d(this.anchorsData);
|
this.anchors = tf3.tensor2d(this.anchorsData);
|
||||||
this.inputSize = model9.inputs[0].shape[2];
|
this.inputSize = model10.inputs[0].shape[2];
|
||||||
this.config = config3;
|
this.config = config3;
|
||||||
}
|
}
|
||||||
async getBoundingBoxes(inputImage) {
|
async getBoundingBoxes(inputImage) {
|
||||||
|
@ -539,12 +536,12 @@ var BlazeFaceModel = class {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
async function load(config3) {
|
async function load(config3) {
|
||||||
const model9 = await tf3.loadGraphModel(join(config3.modelBasePath, config3.face.detector.modelPath), { fromTFHub: config3.face.detector.modelPath.includes("tfhub.dev") });
|
const model10 = await tf3.loadGraphModel(join(config3.modelBasePath, config3.face.detector.modelPath), { fromTFHub: config3.face.detector.modelPath.includes("tfhub.dev") });
|
||||||
const blazeFace = new BlazeFaceModel(model9, config3);
|
const blazeFace = new BlazeFaceModel(model10, config3);
|
||||||
if (!model9 || !model9.modelUrl)
|
if (!model10 || !model10.modelUrl)
|
||||||
log("load model failed:", config3.face.detector.modelPath);
|
log("load model failed:", config3.face.detector.modelPath);
|
||||||
else if (config3.debug)
|
else if (config3.debug)
|
||||||
log("load model:", model9.modelUrl);
|
log("load model:", model10.modelUrl);
|
||||||
return blazeFace;
|
return blazeFace;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4159,11 +4156,6 @@ var triangulation = TRI468;
|
||||||
var uvmap = UV468;
|
var uvmap = UV468;
|
||||||
|
|
||||||
// src/emotion/emotion.ts
|
// src/emotion/emotion.ts
|
||||||
var emotion_exports = {};
|
|
||||||
__export(emotion_exports, {
|
|
||||||
load: () => load3,
|
|
||||||
predict: () => predict2
|
|
||||||
});
|
|
||||||
var tf6 = __toModule(require_tfjs_esm());
|
var tf6 = __toModule(require_tfjs_esm());
|
||||||
var annotations = ["angry", "disgust", "fear", "happy", "sad", "surprise", "neutral"];
|
var annotations = ["angry", "disgust", "fear", "happy", "sad", "surprise", "neutral"];
|
||||||
var model;
|
var model;
|
||||||
|
@ -4182,7 +4174,7 @@ async function load3(config3) {
|
||||||
log("cached model:", model.modelUrl);
|
log("cached model:", model.modelUrl);
|
||||||
return model;
|
return model;
|
||||||
}
|
}
|
||||||
async function predict2(image15, config3, idx, count2) {
|
async function predict2(image16, config3, idx, count2) {
|
||||||
if (!model)
|
if (!model)
|
||||||
return null;
|
return null;
|
||||||
if (skipped < config3.face.emotion.skipFrames && config3.skipFrame && lastCount === count2 && last[idx] && last[idx].length > 0) {
|
if (skipped < config3.face.emotion.skipFrames && config3.skipFrame && lastCount === count2 && last[idx] && last[idx].length > 0) {
|
||||||
|
@ -4191,7 +4183,7 @@ async function predict2(image15, config3, idx, count2) {
|
||||||
}
|
}
|
||||||
skipped = 0;
|
skipped = 0;
|
||||||
return new Promise(async (resolve) => {
|
return new Promise(async (resolve) => {
|
||||||
const resize = tf6.image.resizeBilinear(image15, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
|
const resize = tf6.image.resizeBilinear(image16, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
|
||||||
const [red, green, blue] = tf6.split(resize, 3, 3);
|
const [red, green, blue] = tf6.split(resize, 3, 3);
|
||||||
resize.dispose();
|
resize.dispose();
|
||||||
const redNorm = tf6.mul(red, rgb[0]);
|
const redNorm = tf6.mul(red, rgb[0]);
|
||||||
|
@ -4225,14 +4217,6 @@ async function predict2(image15, config3, idx, count2) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// src/faceres/faceres.ts
|
// src/faceres/faceres.ts
|
||||||
var faceres_exports = {};
|
|
||||||
__export(faceres_exports, {
|
|
||||||
enhance: () => enhance,
|
|
||||||
load: () => load4,
|
|
||||||
match: () => match,
|
|
||||||
predict: () => predict3,
|
|
||||||
similarity: () => similarity
|
|
||||||
});
|
|
||||||
var tf7 = __toModule(require_tfjs_esm());
|
var tf7 = __toModule(require_tfjs_esm());
|
||||||
var model2;
|
var model2;
|
||||||
var last2 = [];
|
var last2 = [];
|
||||||
|
@ -4275,7 +4259,7 @@ function match(embedding, db, threshold = 0) {
|
||||||
return best;
|
return best;
|
||||||
}
|
}
|
||||||
function enhance(input) {
|
function enhance(input) {
|
||||||
const image15 = tf7.tidy(() => {
|
const image16 = tf7.tidy(() => {
|
||||||
const tensor2 = input.image || input.tensor || input;
|
const tensor2 = input.image || input.tensor || input;
|
||||||
if (!(tensor2 instanceof tf7.Tensor))
|
if (!(tensor2 instanceof tf7.Tensor))
|
||||||
return null;
|
return null;
|
||||||
|
@ -4286,9 +4270,9 @@ function enhance(input) {
|
||||||
const norm = crop.mul(255);
|
const norm = crop.mul(255);
|
||||||
return norm;
|
return norm;
|
||||||
});
|
});
|
||||||
return image15;
|
return image16;
|
||||||
}
|
}
|
||||||
async function predict3(image15, config3, idx, count2) {
|
async function predict3(image16, config3, idx, count2) {
|
||||||
var _a, _b;
|
var _a, _b;
|
||||||
if (!model2)
|
if (!model2)
|
||||||
return null;
|
return null;
|
||||||
|
@ -4298,7 +4282,7 @@ async function predict3(image15, config3, idx, count2) {
|
||||||
}
|
}
|
||||||
skipped2 = 0;
|
skipped2 = 0;
|
||||||
return new Promise(async (resolve) => {
|
return new Promise(async (resolve) => {
|
||||||
const enhanced = enhance(image15);
|
const enhanced = enhance(image16);
|
||||||
let resT;
|
let resT;
|
||||||
const obj = {
|
const obj = {
|
||||||
age: 0,
|
age: 0,
|
||||||
|
@ -4513,11 +4497,6 @@ var detectFace = async (parent, input) => {
|
||||||
};
|
};
|
||||||
|
|
||||||
// src/posenet/posenet.ts
|
// src/posenet/posenet.ts
|
||||||
var posenet_exports = {};
|
|
||||||
__export(posenet_exports, {
|
|
||||||
load: () => load5,
|
|
||||||
predict: () => predict4
|
|
||||||
});
|
|
||||||
var tf9 = __toModule(require_tfjs_esm());
|
var tf9 = __toModule(require_tfjs_esm());
|
||||||
|
|
||||||
// src/posenet/keypoints.ts
|
// src/posenet/keypoints.ts
|
||||||
|
@ -4862,11 +4841,6 @@ async function load5(config3) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// src/handpose/handpose.ts
|
// src/handpose/handpose.ts
|
||||||
var handpose_exports = {};
|
|
||||||
__export(handpose_exports, {
|
|
||||||
load: () => load6,
|
|
||||||
predict: () => predict5
|
|
||||||
});
|
|
||||||
var tf13 = __toModule(require_tfjs_esm());
|
var tf13 = __toModule(require_tfjs_esm());
|
||||||
|
|
||||||
// src/handpose/handdetector.ts
|
// src/handpose/handdetector.ts
|
||||||
|
@ -4886,16 +4860,16 @@ function getBoxCenter2(box6) {
|
||||||
box6.startPoint[1] + (box6.endPoint[1] - box6.startPoint[1]) / 2
|
box6.startPoint[1] + (box6.endPoint[1] - box6.startPoint[1]) / 2
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
function cutBoxFromImageAndResize2(box6, image15, cropSize) {
|
function cutBoxFromImageAndResize2(box6, image16, cropSize) {
|
||||||
const h = image15.shape[1];
|
const h = image16.shape[1];
|
||||||
const w = image15.shape[2];
|
const w = image16.shape[2];
|
||||||
const boxes = [[
|
const boxes = [[
|
||||||
box6.startPoint[1] / h,
|
box6.startPoint[1] / h,
|
||||||
box6.startPoint[0] / w,
|
box6.startPoint[0] / w,
|
||||||
box6.endPoint[1] / h,
|
box6.endPoint[1] / h,
|
||||||
box6.endPoint[0] / w
|
box6.endPoint[0] / w
|
||||||
]];
|
]];
|
||||||
return tf10.image.cropAndResize(image15, boxes, [0], cropSize);
|
return tf10.image.cropAndResize(image16, boxes, [0], cropSize);
|
||||||
}
|
}
|
||||||
function scaleBoxCoordinates2(box6, factor) {
|
function scaleBoxCoordinates2(box6, factor) {
|
||||||
const startPoint = [box6.startPoint[0] * factor[0], box6.startPoint[1] * factor[1]];
|
const startPoint = [box6.startPoint[0] * factor[0], box6.startPoint[1] * factor[1]];
|
||||||
|
@ -7874,9 +7848,9 @@ var anchors = [
|
||||||
|
|
||||||
// src/handpose/handdetector.ts
|
// src/handpose/handdetector.ts
|
||||||
var HandDetector = class {
|
var HandDetector = class {
|
||||||
constructor(model9) {
|
constructor(model10) {
|
||||||
var _a;
|
var _a;
|
||||||
this.model = model9;
|
this.model = model10;
|
||||||
this.anchors = anchors.map((anchor) => [anchor.x, anchor.y]);
|
this.anchors = anchors.map((anchor) => [anchor.x, anchor.y]);
|
||||||
this.anchorsTensor = tf11.tensor2d(this.anchors);
|
this.anchorsTensor = tf11.tensor2d(this.anchors);
|
||||||
this.inputSize = (_a = this.model) == null ? void 0 : _a.inputs[0].shape[2];
|
this.inputSize = (_a = this.model) == null ? void 0 : _a.inputs[0].shape[2];
|
||||||
|
@ -7930,9 +7904,9 @@ var HandDetector = class {
|
||||||
async estimateHandBounds(input, config3) {
|
async estimateHandBounds(input, config3) {
|
||||||
const inputHeight = input.shape[1];
|
const inputHeight = input.shape[1];
|
||||||
const inputWidth = input.shape[2];
|
const inputWidth = input.shape[2];
|
||||||
const image15 = tf11.tidy(() => input.resizeBilinear([this.inputSize, this.inputSize]).div(127.5).sub(1));
|
const image16 = tf11.tidy(() => input.resizeBilinear([this.inputSize, this.inputSize]).div(127.5).sub(1));
|
||||||
const predictions = await this.getBoxes(image15, config3);
|
const predictions = await this.getBoxes(image16, config3);
|
||||||
image15.dispose();
|
image16.dispose();
|
||||||
const hands = [];
|
const hands = [];
|
||||||
if (!predictions || predictions.length === 0)
|
if (!predictions || predictions.length === 0)
|
||||||
return hands;
|
return hands;
|
||||||
|
@ -8077,11 +8051,11 @@ var HandPipeline = class {
|
||||||
Math.trunc(coord[2])
|
Math.trunc(coord[2])
|
||||||
]);
|
]);
|
||||||
}
|
}
|
||||||
async estimateHands(image15, config3) {
|
async estimateHands(image16, config3) {
|
||||||
let useFreshBox = false;
|
let useFreshBox = false;
|
||||||
let boxes;
|
let boxes;
|
||||||
if (this.skipped === 0 || this.skipped > config3.hand.skipFrames || !config3.hand.landmarks || !config3.skipFrame) {
|
if (this.skipped === 0 || this.skipped > config3.hand.skipFrames || !config3.hand.landmarks || !config3.skipFrame) {
|
||||||
boxes = await this.handDetector.estimateHandBounds(image15, config3);
|
boxes = await this.handDetector.estimateHandBounds(image16, config3);
|
||||||
this.skipped = 0;
|
this.skipped = 0;
|
||||||
}
|
}
|
||||||
if (config3.skipFrame)
|
if (config3.skipFrame)
|
||||||
|
@ -8100,8 +8074,8 @@ var HandPipeline = class {
|
||||||
if (config3.hand.landmarks) {
|
if (config3.hand.landmarks) {
|
||||||
const angle = config3.hand.rotation ? computeRotation2(currentBox.palmLandmarks[palmLandmarksPalmBase], currentBox.palmLandmarks[palmLandmarksMiddleFingerBase]) : 0;
|
const angle = config3.hand.rotation ? computeRotation2(currentBox.palmLandmarks[palmLandmarksPalmBase], currentBox.palmLandmarks[palmLandmarksMiddleFingerBase]) : 0;
|
||||||
const palmCenter = getBoxCenter2(currentBox);
|
const palmCenter = getBoxCenter2(currentBox);
|
||||||
const palmCenterNormalized = [palmCenter[0] / image15.shape[2], palmCenter[1] / image15.shape[1]];
|
const palmCenterNormalized = [palmCenter[0] / image16.shape[2], palmCenter[1] / image16.shape[1]];
|
||||||
const rotatedImage = config3.hand.rotation && tf12.ENV.flags.IS_BROWSER ? tf12.image.rotateWithOffset(image15, angle, 0, palmCenterNormalized) : image15.clone();
|
const rotatedImage = config3.hand.rotation && tf12.ENV.flags.IS_BROWSER ? tf12.image.rotateWithOffset(image16, angle, 0, palmCenterNormalized) : image16.clone();
|
||||||
const rotationMatrix = buildRotationMatrix2(-angle, palmCenter);
|
const rotationMatrix = buildRotationMatrix2(-angle, palmCenter);
|
||||||
const newBox = useFreshBox ? this.getBoxForPalmLandmarks(currentBox.palmLandmarks, rotationMatrix) : currentBox;
|
const newBox = useFreshBox ? this.getBoxForPalmLandmarks(currentBox.palmLandmarks, rotationMatrix) : currentBox;
|
||||||
const croppedInput = cutBoxFromImageAndResize2(newBox, rotatedImage, [this.inputSize, this.inputSize]);
|
const croppedInput = cutBoxFromImageAndResize2(newBox, rotatedImage, [this.inputSize, this.inputSize]);
|
||||||
|
@ -8232,11 +8206,6 @@ async function load6(config3) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// src/blazepose/blazepose.ts
|
// src/blazepose/blazepose.ts
|
||||||
var blazepose_exports = {};
|
|
||||||
__export(blazepose_exports, {
|
|
||||||
load: () => load7,
|
|
||||||
predict: () => predict6
|
|
||||||
});
|
|
||||||
var tf14 = __toModule(require_tfjs_esm());
|
var tf14 = __toModule(require_tfjs_esm());
|
||||||
|
|
||||||
// src/blazepose/annotations.ts
|
// src/blazepose/annotations.ts
|
||||||
|
@ -8330,14 +8299,14 @@ async function load7(config3) {
|
||||||
log("cached model:", model4["modelUrl"]);
|
log("cached model:", model4["modelUrl"]);
|
||||||
return model4;
|
return model4;
|
||||||
}
|
}
|
||||||
async function predict6(image15, config3) {
|
async function predict6(image16, config3) {
|
||||||
var _a;
|
var _a;
|
||||||
if (!model4)
|
if (!model4)
|
||||||
return [];
|
return [];
|
||||||
if (!config3.body.enabled)
|
if (!config3.body.enabled)
|
||||||
return [];
|
return [];
|
||||||
const imgSize = { width: image15.shape[2] || 0, height: image15.shape[1] || 0 };
|
const imgSize = { width: image16.shape[2] || 0, height: image16.shape[1] || 0 };
|
||||||
const resize = tf14.image.resizeBilinear(image15, [model4["width"], model4["height"]], false);
|
const resize = tf14.image.resizeBilinear(image16, [model4["width"], model4["height"]], false);
|
||||||
const normalize = tf14.div(resize, [255]);
|
const normalize = tf14.div(resize, [255]);
|
||||||
resize.dispose();
|
resize.dispose();
|
||||||
const resT = await model4.predict(normalize);
|
const resT = await model4.predict(normalize);
|
||||||
|
@ -8413,7 +8382,7 @@ function max2d(inputs, minScore) {
|
||||||
return [0, 0, newScore];
|
return [0, 0, newScore];
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
async function predict7(image15, config3) {
|
async function predict7(image16, config3) {
|
||||||
if (skipped3 < config3.body.skipFrames && config3.skipFrame && Object.keys(keypoints).length > 0) {
|
if (skipped3 < config3.body.skipFrames && config3.skipFrame && Object.keys(keypoints).length > 0) {
|
||||||
skipped3++;
|
skipped3++;
|
||||||
return [{ id: 0, score, box: box4, boxRaw, keypoints }];
|
return [{ id: 0, score, box: box4, boxRaw, keypoints }];
|
||||||
|
@ -8423,7 +8392,7 @@ async function predict7(image15, config3) {
|
||||||
const tensor2 = tf15.tidy(() => {
|
const tensor2 = tf15.tidy(() => {
|
||||||
if (!model5.inputs[0].shape)
|
if (!model5.inputs[0].shape)
|
||||||
return null;
|
return null;
|
||||||
const resize = tf15.image.resizeBilinear(image15, [model5.inputs[0].shape[2], model5.inputs[0].shape[1]], false);
|
const resize = tf15.image.resizeBilinear(image16, [model5.inputs[0].shape[2], model5.inputs[0].shape[1]], false);
|
||||||
const enhance2 = tf15.mul(resize, 2);
|
const enhance2 = tf15.mul(resize, 2);
|
||||||
const norm = enhance2.sub(1);
|
const norm = enhance2.sub(1);
|
||||||
return norm;
|
return norm;
|
||||||
|
@ -8434,10 +8403,10 @@ async function predict7(image15, config3) {
|
||||||
tensor2.dispose();
|
tensor2.dispose();
|
||||||
if (resT) {
|
if (resT) {
|
||||||
keypoints.length = 0;
|
keypoints.length = 0;
|
||||||
const squeeze3 = resT.squeeze();
|
const squeeze4 = resT.squeeze();
|
||||||
tf15.dispose(resT);
|
tf15.dispose(resT);
|
||||||
const stack2 = squeeze3.unstack(2);
|
const stack2 = squeeze4.unstack(2);
|
||||||
tf15.dispose(squeeze3);
|
tf15.dispose(squeeze4);
|
||||||
for (let id = 0; id < stack2.length; id++) {
|
for (let id = 0; id < stack2.length; id++) {
|
||||||
const [x2, y2, partScore] = max2d(stack2[id], config3.body.minConfidence);
|
const [x2, y2, partScore] = max2d(stack2[id], config3.body.minConfidence);
|
||||||
if (score > config3.body.minConfidence) {
|
if (score > config3.body.minConfidence) {
|
||||||
|
@ -8449,8 +8418,8 @@ async function predict7(image15, config3) {
|
||||||
y2 / model5.inputs[0].shape[1]
|
y2 / model5.inputs[0].shape[1]
|
||||||
],
|
],
|
||||||
position: [
|
position: [
|
||||||
Math.round(image15.shape[2] * x2 / model5.inputs[0].shape[2]),
|
Math.round(image16.shape[2] * x2 / model5.inputs[0].shape[2]),
|
||||||
Math.round(image15.shape[1] * y2 / model5.inputs[0].shape[1])
|
Math.round(image16.shape[1] * y2 / model5.inputs[0].shape[1])
|
||||||
]
|
]
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -8498,7 +8467,7 @@ async function load9(config3) {
|
||||||
log("cached model:", model6["modelUrl"]);
|
log("cached model:", model6["modelUrl"]);
|
||||||
return model6;
|
return model6;
|
||||||
}
|
}
|
||||||
async function predict8(image15, config3) {
|
async function predict8(image16, config3) {
|
||||||
if (skipped4 < config3.body.skipFrames && config3.skipFrame && Object.keys(keypoints2).length > 0) {
|
if (skipped4 < config3.body.skipFrames && config3.skipFrame && Object.keys(keypoints2).length > 0) {
|
||||||
skipped4++;
|
skipped4++;
|
||||||
return [{ id: 0, score: score2, box: box5, boxRaw: boxRaw2, keypoints: keypoints2 }];
|
return [{ id: 0, score: score2, box: box5, boxRaw: boxRaw2, keypoints: keypoints2 }];
|
||||||
|
@ -8508,7 +8477,7 @@ async function predict8(image15, config3) {
|
||||||
const tensor2 = tf16.tidy(() => {
|
const tensor2 = tf16.tidy(() => {
|
||||||
if (!model6.inputs[0].shape)
|
if (!model6.inputs[0].shape)
|
||||||
return null;
|
return null;
|
||||||
const resize = tf16.image.resizeBilinear(image15, [model6.inputs[0].shape[2], model6.inputs[0].shape[1]], false);
|
const resize = tf16.image.resizeBilinear(image16, [model6.inputs[0].shape[2], model6.inputs[0].shape[1]], false);
|
||||||
const cast2 = tf16.cast(resize, "int32");
|
const cast2 = tf16.cast(resize, "int32");
|
||||||
return cast2;
|
return cast2;
|
||||||
});
|
});
|
||||||
|
@ -8532,8 +8501,8 @@ async function predict8(image15, config3) {
|
||||||
kpt3[id][0]
|
kpt3[id][0]
|
||||||
],
|
],
|
||||||
position: [
|
position: [
|
||||||
Math.round((image15.shape[2] || 0) * kpt3[id][1]),
|
Math.round((image16.shape[2] || 0) * kpt3[id][1]),
|
||||||
Math.round((image15.shape[1] || 0) * kpt3[id][0])
|
Math.round((image16.shape[1] || 0) * kpt3[id][0])
|
||||||
]
|
]
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -8561,11 +8530,6 @@ async function predict8(image15, config3) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// src/object/nanodet.ts
|
// src/object/nanodet.ts
|
||||||
var nanodet_exports = {};
|
|
||||||
__export(nanodet_exports, {
|
|
||||||
load: () => load10,
|
|
||||||
predict: () => predict9
|
|
||||||
});
|
|
||||||
var tf17 = __toModule(require_tfjs_esm());
|
var tf17 = __toModule(require_tfjs_esm());
|
||||||
|
|
||||||
// src/object/labels.ts
|
// src/object/labels.ts
|
||||||
|
@ -8733,15 +8697,15 @@ async function process2(res, inputSize, outputShape, config3) {
|
||||||
results = results.filter((a, idx) => nmsIdx.includes(idx)).sort((a, b) => b.score - a.score);
|
results = results.filter((a, idx) => nmsIdx.includes(idx)).sort((a, b) => b.score - a.score);
|
||||||
return results;
|
return results;
|
||||||
}
|
}
|
||||||
async function predict9(image15, config3) {
|
async function predict9(image16, config3) {
|
||||||
if (skipped5 < config3.object.skipFrames && config3.skipFrame && last3.length > 0) {
|
if (skipped5 < config3.object.skipFrames && config3.skipFrame && last3.length > 0) {
|
||||||
skipped5++;
|
skipped5++;
|
||||||
return last3;
|
return last3;
|
||||||
}
|
}
|
||||||
skipped5 = 0;
|
skipped5 = 0;
|
||||||
return new Promise(async (resolve) => {
|
return new Promise(async (resolve) => {
|
||||||
const outputSize = [image15.shape[2], image15.shape[1]];
|
const outputSize = [image16.shape[2], image16.shape[1]];
|
||||||
const resize = tf17.image.resizeBilinear(image15, [model7.inputSize, model7.inputSize], false);
|
const resize = tf17.image.resizeBilinear(image16, [model7.inputSize, model7.inputSize], false);
|
||||||
const norm = resize.div(255);
|
const norm = resize.div(255);
|
||||||
const transpose = norm.transpose([0, 3, 1, 2]);
|
const transpose = norm.transpose([0, 3, 1, 2]);
|
||||||
norm.dispose();
|
norm.dispose();
|
||||||
|
@ -8757,11 +8721,6 @@ async function predict9(image15, config3) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// src/object/centernet.ts
|
// src/object/centernet.ts
|
||||||
var centernet_exports = {};
|
|
||||||
__export(centernet_exports, {
|
|
||||||
load: () => load11,
|
|
||||||
predict: () => predict10
|
|
||||||
});
|
|
||||||
var tf18 = __toModule(require_tfjs_esm());
|
var tf18 = __toModule(require_tfjs_esm());
|
||||||
var model8;
|
var model8;
|
||||||
var last4 = [];
|
var last4 = [];
|
||||||
|
@ -9100,8 +9059,8 @@ function GLImageFilter(params) {
|
||||||
gl.uniform1f(_currentProgram.uniform.flipY, flipY ? -1 : 1);
|
gl.uniform1f(_currentProgram.uniform.flipY, flipY ? -1 : 1);
|
||||||
gl.drawArrays(gl.TRIANGLES, 0, 6);
|
gl.drawArrays(gl.TRIANGLES, 0, 6);
|
||||||
};
|
};
|
||||||
this.apply = function(image15) {
|
this.apply = function(image16) {
|
||||||
_resize(image15.width, image15.height);
|
_resize(image16.width, image16.height);
|
||||||
_drawCount = 0;
|
_drawCount = 0;
|
||||||
if (!_sourceTexture)
|
if (!_sourceTexture)
|
||||||
_sourceTexture = gl.createTexture();
|
_sourceTexture = gl.createTexture();
|
||||||
|
@ -9110,7 +9069,7 @@ function GLImageFilter(params) {
|
||||||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
|
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
|
||||||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
|
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
|
||||||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
|
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
|
||||||
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, image15);
|
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, image16);
|
||||||
if (_filterChain.length === 0) {
|
if (_filterChain.length === 0) {
|
||||||
_draw();
|
_draw();
|
||||||
return _canvas;
|
return _canvas;
|
||||||
|
@ -9768,14 +9727,14 @@ function process4(input, config3) {
|
||||||
const shape = [outCanvas.height, outCanvas.width, 3];
|
const shape = [outCanvas.height, outCanvas.width, 3];
|
||||||
pixels = tf19.tensor3d(outCanvas.data, shape, "int32");
|
pixels = tf19.tensor3d(outCanvas.data, shape, "int32");
|
||||||
} else if (outCanvas instanceof ImageData) {
|
} else if (outCanvas instanceof ImageData) {
|
||||||
pixels = tf19.browser.fromPixels(outCanvas);
|
pixels = tf19.browser ? tf19.browser.fromPixels(outCanvas) : null;
|
||||||
} else if (config3.backend === "webgl" || config3.backend === "humangl") {
|
} else if (config3.backend === "webgl" || config3.backend === "humangl") {
|
||||||
const tempCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement("canvas");
|
const tempCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement("canvas");
|
||||||
tempCanvas.width = targetWidth;
|
tempCanvas.width = targetWidth;
|
||||||
tempCanvas.height = targetHeight;
|
tempCanvas.height = targetHeight;
|
||||||
const tempCtx = tempCanvas.getContext("2d");
|
const tempCtx = tempCanvas.getContext("2d");
|
||||||
tempCtx == null ? void 0 : tempCtx.drawImage(outCanvas, 0, 0);
|
tempCtx == null ? void 0 : tempCtx.drawImage(outCanvas, 0, 0);
|
||||||
pixels = tf19.browser.fromPixels(tempCanvas);
|
pixels = tf19.browser ? tf19.browser.fromPixels(tempCanvas) : null;
|
||||||
} else {
|
} else {
|
||||||
const tempCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement("canvas");
|
const tempCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement("canvas");
|
||||||
tempCanvas.width = targetWidth;
|
tempCanvas.width = targetWidth;
|
||||||
|
@ -9783,13 +9742,15 @@ function process4(input, config3) {
|
||||||
const tempCtx = tempCanvas.getContext("2d");
|
const tempCtx = tempCanvas.getContext("2d");
|
||||||
tempCtx == null ? void 0 : tempCtx.drawImage(outCanvas, 0, 0);
|
tempCtx == null ? void 0 : tempCtx.drawImage(outCanvas, 0, 0);
|
||||||
const data = tempCtx == null ? void 0 : tempCtx.getImageData(0, 0, targetWidth, targetHeight);
|
const data = tempCtx == null ? void 0 : tempCtx.getImageData(0, 0, targetWidth, targetHeight);
|
||||||
pixels = tf19.browser.fromPixels(data);
|
pixels = tf19.browser ? tf19.browser.fromPixels(data) : null;
|
||||||
}
|
}
|
||||||
|
if (pixels) {
|
||||||
const casted = pixels.toFloat();
|
const casted = pixels.toFloat();
|
||||||
tensor2 = casted.expandDims(0);
|
tensor2 = casted.expandDims(0);
|
||||||
pixels.dispose();
|
pixels.dispose();
|
||||||
casted.dispose();
|
casted.dispose();
|
||||||
}
|
}
|
||||||
|
}
|
||||||
const canvas2 = config3.filter.return ? outCanvas : null;
|
const canvas2 = config3.filter.return ? outCanvas : null;
|
||||||
return { tensor: tensor2, canvas: canvas2 };
|
return { tensor: tensor2, canvas: canvas2 };
|
||||||
}
|
}
|
||||||
|
@ -9945,10 +9906,10 @@ async function face2(inCanvas2, result, drawOptions) {
|
||||||
if (f.iris)
|
if (f.iris)
|
||||||
labels2.push(`distance: ${f.iris}`);
|
labels2.push(`distance: ${f.iris}`);
|
||||||
if (f.emotion && f.emotion.length > 0) {
|
if (f.emotion && f.emotion.length > 0) {
|
||||||
const emotion2 = f.emotion.map((a) => `${Math.trunc(100 * a.score)}% ${a.emotion}`);
|
const emotion3 = f.emotion.map((a) => `${Math.trunc(100 * a.score)}% ${a.emotion}`);
|
||||||
if (emotion2.length > 3)
|
if (emotion3.length > 3)
|
||||||
emotion2.length = 3;
|
emotion3.length = 3;
|
||||||
labels2.push(emotion2.join(" "));
|
labels2.push(emotion3.join(" "));
|
||||||
}
|
}
|
||||||
if (f.rotation && f.rotation.angle && f.rotation.gaze) {
|
if (f.rotation && f.rotation.angle && f.rotation.gaze) {
|
||||||
if (f.rotation.angle.roll)
|
if (f.rotation.angle.roll)
|
||||||
|
@ -10376,6 +10337,7 @@ function calc(newResult) {
|
||||||
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u;
|
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u;
|
||||||
const elapsed = Date.now() - newResult.timestamp;
|
const elapsed = Date.now() - newResult.timestamp;
|
||||||
const bufferedFactor = elapsed < 1e3 ? 8 - Math.log(elapsed) : 1;
|
const bufferedFactor = elapsed < 1e3 ? 8 - Math.log(elapsed) : 1;
|
||||||
|
bufferedResult.canvas = newResult.canvas;
|
||||||
if (!bufferedResult.body || newResult.body.length !== bufferedResult.body.length) {
|
if (!bufferedResult.body || newResult.body.length !== bufferedResult.body.length) {
|
||||||
bufferedResult.body = JSON.parse(JSON.stringify(newResult.body));
|
bufferedResult.body = JSON.parse(JSON.stringify(newResult.body));
|
||||||
} else {
|
} else {
|
||||||
|
@ -10454,6 +10416,60 @@ function calc(newResult) {
|
||||||
return bufferedResult;
|
return bufferedResult;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// src/segmentation/segmentation.ts
|
||||||
|
var tf20 = __toModule(require_tfjs_esm());
|
||||||
|
var model9;
|
||||||
|
async function load12(config3) {
|
||||||
|
if (!model9) {
|
||||||
|
model9 = await tf20.loadGraphModel(join(config3.modelBasePath, config3.segmentation.modelPath));
|
||||||
|
if (!model9 || !model9["modelUrl"])
|
||||||
|
log("load model failed:", config3.segmentation.modelPath);
|
||||||
|
else if (config3.debug)
|
||||||
|
log("load model:", model9["modelUrl"]);
|
||||||
|
} else if (config3.debug)
|
||||||
|
log("cached model:", model9["modelUrl"]);
|
||||||
|
return model9;
|
||||||
|
}
|
||||||
|
async function predict11(input, config3) {
|
||||||
|
var _a, _b, _c, _d;
|
||||||
|
if (!config3.segmentation.enabled || !input.tensor || !input.canvas)
|
||||||
|
return false;
|
||||||
|
if (!model9 || !model9.inputs[0].shape)
|
||||||
|
return false;
|
||||||
|
const resizeInput = tf20.image.resizeBilinear(input.tensor, [model9.inputs[0].shape[1], model9.inputs[0].shape[2]], false);
|
||||||
|
const norm = resizeInput.div(255);
|
||||||
|
const res = model9.predict(norm);
|
||||||
|
tf20.dispose(resizeInput);
|
||||||
|
tf20.dispose(norm);
|
||||||
|
const overlay = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(input.canvas.width, input.canvas.height) : document.createElement("canvas");
|
||||||
|
overlay.width = input.canvas.width;
|
||||||
|
overlay.height = input.canvas.height;
|
||||||
|
const squeeze4 = tf20.squeeze(res, 0);
|
||||||
|
let resizeOutput;
|
||||||
|
if (squeeze4.shape[2] === 2) {
|
||||||
|
const softmax = squeeze4.softmax();
|
||||||
|
const [bg, fg] = tf20.unstack(softmax, 2);
|
||||||
|
tf20.dispose(softmax);
|
||||||
|
const expand = fg.expandDims(2);
|
||||||
|
tf20.dispose(bg);
|
||||||
|
tf20.dispose(fg);
|
||||||
|
resizeOutput = tf20.image.resizeBilinear(expand, [(_a = input.tensor) == null ? void 0 : _a.shape[1], (_b = input.tensor) == null ? void 0 : _b.shape[2]]);
|
||||||
|
tf20.dispose(expand);
|
||||||
|
} else {
|
||||||
|
resizeOutput = tf20.image.resizeBilinear(squeeze4, [(_c = input.tensor) == null ? void 0 : _c.shape[1], (_d = input.tensor) == null ? void 0 : _d.shape[2]]);
|
||||||
|
}
|
||||||
|
if (tf20.browser)
|
||||||
|
await tf20.browser.toPixels(resizeOutput, overlay);
|
||||||
|
tf20.dispose(resizeOutput);
|
||||||
|
tf20.dispose(squeeze4);
|
||||||
|
tf20.dispose(res);
|
||||||
|
const ctx = input.canvas.getContext("2d");
|
||||||
|
ctx.globalCompositeOperation = "darken";
|
||||||
|
await (ctx == null ? void 0 : ctx.drawImage(overlay, 0, 0));
|
||||||
|
ctx.globalCompositeOperation = "source-in";
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
// src/sample.ts
|
// src/sample.ts
|
||||||
var face3 = `
|
var face3 = `
|
||||||
/9j/4AAQSkZJRgABAQEAYABgAAD/4QBoRXhpZgAATU0AKgAAAAgABAEaAAUAAAABAAAAPgEbAAUA
|
/9j/4AAQSkZJRgABAQEAYABgAAD/4QBoRXhpZgAATU0AKgAAAAgABAEaAAUAAAABAAAAPgEbAAUA
|
||||||
|
@ -11205,7 +11221,7 @@ var Human = class {
|
||||||
return null;
|
return null;
|
||||||
if (!input)
|
if (!input)
|
||||||
return "input is not defined";
|
return "input is not defined";
|
||||||
if (this.tf.ENV.flags.IS_NODE && !(input instanceof tf20.Tensor))
|
if (this.tf.ENV.flags.IS_NODE && !(input instanceof tf21.Tensor))
|
||||||
return "input must be a tensor";
|
return "input must be a tensor";
|
||||||
try {
|
try {
|
||||||
this.tf.getBackend();
|
this.tf.getBackend();
|
||||||
|
@ -11348,8 +11364,8 @@ var Human = class {
|
||||||
if (!img)
|
if (!img)
|
||||||
return null;
|
return null;
|
||||||
let res;
|
let res;
|
||||||
if (typeof tf20["node"] !== "undefined") {
|
if (typeof tf21["node"] !== "undefined") {
|
||||||
const data = tf20["node"].decodeJpeg(img);
|
const data = tf21["node"].decodeJpeg(img);
|
||||||
const expanded = data.expandDims(0);
|
const expanded = data.expandDims(0);
|
||||||
this.tf.dispose(data);
|
this.tf.dispose(data);
|
||||||
res = await this.detect(expanded, this.config);
|
res = await this.detect(expanded, this.config);
|
||||||
|
@ -11361,7 +11377,7 @@ var Human = class {
|
||||||
return res;
|
return res;
|
||||||
});
|
});
|
||||||
this.config = mergeDeep(config, userConfig || {});
|
this.config = mergeDeep(config, userConfig || {});
|
||||||
this.tf = tf20;
|
this.tf = tf21;
|
||||||
this.draw = draw_exports;
|
this.draw = draw_exports;
|
||||||
this.version = version;
|
this.version = version;
|
||||||
this.state = "idle";
|
this.state = "idle";
|
||||||
|
@ -11384,18 +11400,10 @@ var Human = class {
|
||||||
embedding: null,
|
embedding: null,
|
||||||
nanodet: null,
|
nanodet: null,
|
||||||
centernet: null,
|
centernet: null,
|
||||||
faceres: null
|
faceres: null,
|
||||||
|
segmentation: null
|
||||||
};
|
};
|
||||||
this.image = (input) => process4(input, this.config);
|
this.image = (input) => process4(input, this.config);
|
||||||
this.classes = {
|
|
||||||
facemesh: facemesh_exports,
|
|
||||||
emotion: emotion_exports,
|
|
||||||
faceres: faceres_exports,
|
|
||||||
body: this.config.body.modelPath.includes("posenet") ? posenet_exports : blazepose_exports,
|
|
||||||
hand: handpose_exports,
|
|
||||||
nanodet: nanodet_exports,
|
|
||||||
centernet: centernet_exports
|
|
||||||
};
|
|
||||||
this.faceTriangulation = triangulation;
|
this.faceTriangulation = triangulation;
|
||||||
this.faceUVMap = uvmap;
|
this.faceUVMap = uvmap;
|
||||||
this.sysinfo = info();
|
this.sysinfo = info();
|
||||||
|
@ -11443,7 +11451,8 @@ var Human = class {
|
||||||
this.models.movenet,
|
this.models.movenet,
|
||||||
this.models.nanodet,
|
this.models.nanodet,
|
||||||
this.models.centernet,
|
this.models.centernet,
|
||||||
this.models.faceres
|
this.models.faceres,
|
||||||
|
this.models.segmentation
|
||||||
] = await Promise.all([
|
] = await Promise.all([
|
||||||
this.models.face || (this.config.face.enabled ? load2(this.config) : null),
|
this.models.face || (this.config.face.enabled ? load2(this.config) : null),
|
||||||
this.models.emotion || (this.config.face.enabled && this.config.face.emotion.enabled ? load3(this.config) : null),
|
this.models.emotion || (this.config.face.enabled && this.config.face.emotion.enabled ? load3(this.config) : null),
|
||||||
|
@ -11454,7 +11463,8 @@ var Human = class {
|
||||||
this.models.movenet || (this.config.body.enabled && this.config.body.modelPath.includes("movenet") ? load9(this.config) : null),
|
this.models.movenet || (this.config.body.enabled && this.config.body.modelPath.includes("movenet") ? load9(this.config) : null),
|
||||||
this.models.nanodet || (this.config.object.enabled && this.config.object.modelPath.includes("nanodet") ? load10(this.config) : null),
|
this.models.nanodet || (this.config.object.enabled && this.config.object.modelPath.includes("nanodet") ? load10(this.config) : null),
|
||||||
this.models.centernet || (this.config.object.enabled && this.config.object.modelPath.includes("centernet") ? load11(this.config) : null),
|
this.models.centernet || (this.config.object.enabled && this.config.object.modelPath.includes("centernet") ? load11(this.config) : null),
|
||||||
this.models.faceres || (this.config.face.enabled && this.config.face.description.enabled ? load4(this.config) : null)
|
this.models.faceres || (this.config.face.enabled && this.config.face.description.enabled ? load4(this.config) : null),
|
||||||
|
this.models.segmentation || (this.config.segmentation.enabled ? load12(this.config) : null)
|
||||||
]);
|
]);
|
||||||
} else {
|
} else {
|
||||||
if (this.config.face.enabled && !this.models.face)
|
if (this.config.face.enabled && !this.models.face)
|
||||||
|
@ -11477,6 +11487,8 @@ var Human = class {
|
||||||
this.models.centernet = await load11(this.config);
|
this.models.centernet = await load11(this.config);
|
||||||
if (this.config.face.enabled && this.config.face.description.enabled && !this.models.faceres)
|
if (this.config.face.enabled && this.config.face.description.enabled && !this.models.faceres)
|
||||||
this.models.faceres = await load4(this.config);
|
this.models.faceres = await load4(this.config);
|
||||||
|
if (this.config.segmentation.enabled && !this.models.segmentation)
|
||||||
|
this.models.segmentation = await load12(this.config);
|
||||||
}
|
}
|
||||||
if (__privateGet(this, _firstRun)) {
|
if (__privateGet(this, _firstRun)) {
|
||||||
if (this.config.debug)
|
if (this.config.debug)
|
||||||
|
@ -11611,6 +11623,16 @@ var Human = class {
|
||||||
else if (this.performance.gesture)
|
else if (this.performance.gesture)
|
||||||
delete this.performance.gesture;
|
delete this.performance.gesture;
|
||||||
}
|
}
|
||||||
|
if (this.config.segmentation.enabled) {
|
||||||
|
this.analyze("Start Segmentation:");
|
||||||
|
this.state = "run:segmentation";
|
||||||
|
timeStamp = now();
|
||||||
|
await predict11(process5, this.config);
|
||||||
|
elapsedTime = Math.trunc(now() - timeStamp);
|
||||||
|
if (elapsedTime > 0)
|
||||||
|
this.performance.segmentation = elapsedTime;
|
||||||
|
this.analyze("End Segmentation:");
|
||||||
|
}
|
||||||
this.performance.total = Math.trunc(now() - timeStart);
|
this.performance.total = Math.trunc(now() - timeStart);
|
||||||
this.state = "idle";
|
this.state = "idle";
|
||||||
this.result = {
|
this.result = {
|
||||||
|
@ -11627,7 +11649,7 @@ var Human = class {
|
||||||
return join2(faceRes, bodyRes, handRes, gestureRes, (_a = process5 == null ? void 0 : process5.tensor) == null ? void 0 : _a.shape);
|
return join2(faceRes, bodyRes, handRes, gestureRes, (_a = process5 == null ? void 0 : process5.tensor) == null ? void 0 : _a.shape);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
tf20.dispose(process5.tensor);
|
tf21.dispose(process5.tensor);
|
||||||
resolve(this.result);
|
resolve(this.result);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -214,6 +214,10 @@ var config = {
|
||||||
iouThreshold: 0.4,
|
iouThreshold: 0.4,
|
||||||
maxDetected: 10,
|
maxDetected: 10,
|
||||||
skipFrames: 19
|
skipFrames: 19
|
||||||
|
},
|
||||||
|
segmentation: {
|
||||||
|
enabled: false,
|
||||||
|
modelPath: "selfie.json"
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -239,7 +243,7 @@ function info() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// src/human.ts
|
// src/human.ts
|
||||||
var tf20 = __toModule(require_tfjs_esm());
|
var tf21 = __toModule(require_tfjs_esm());
|
||||||
|
|
||||||
// src/tfjs/backend.ts
|
// src/tfjs/backend.ts
|
||||||
var tf = __toModule(require_tfjs_esm());
|
var tf = __toModule(require_tfjs_esm());
|
||||||
|
@ -313,13 +317,6 @@ function register() {
|
||||||
var tf8 = __toModule(require_tfjs_esm());
|
var tf8 = __toModule(require_tfjs_esm());
|
||||||
|
|
||||||
// src/blazeface/facemesh.ts
|
// src/blazeface/facemesh.ts
|
||||||
var facemesh_exports = {};
|
|
||||||
__export(facemesh_exports, {
|
|
||||||
load: () => load2,
|
|
||||||
predict: () => predict,
|
|
||||||
triangulation: () => triangulation,
|
|
||||||
uvmap: () => uvmap
|
|
||||||
});
|
|
||||||
var tf5 = __toModule(require_tfjs_esm());
|
var tf5 = __toModule(require_tfjs_esm());
|
||||||
|
|
||||||
// src/blazeface/blazeface.ts
|
// src/blazeface/blazeface.ts
|
||||||
|
@ -344,16 +341,16 @@ function getBoxCenter(box6) {
|
||||||
box6.startPoint[1] + (box6.endPoint[1] - box6.startPoint[1]) / 2
|
box6.startPoint[1] + (box6.endPoint[1] - box6.startPoint[1]) / 2
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
function cutBoxFromImageAndResize(box6, image15, cropSize) {
|
function cutBoxFromImageAndResize(box6, image16, cropSize) {
|
||||||
const h = image15.shape[1];
|
const h = image16.shape[1];
|
||||||
const w = image15.shape[2];
|
const w = image16.shape[2];
|
||||||
const boxes = [[
|
const boxes = [[
|
||||||
box6.startPoint[1] / h,
|
box6.startPoint[1] / h,
|
||||||
box6.startPoint[0] / w,
|
box6.startPoint[0] / w,
|
||||||
box6.endPoint[1] / h,
|
box6.endPoint[1] / h,
|
||||||
box6.endPoint[0] / w
|
box6.endPoint[0] / w
|
||||||
]];
|
]];
|
||||||
return tf2.image.cropAndResize(image15, boxes, [0], cropSize);
|
return tf2.image.cropAndResize(image16, boxes, [0], cropSize);
|
||||||
}
|
}
|
||||||
function enlargeBox(box6, factor = 1.5) {
|
function enlargeBox(box6, factor = 1.5) {
|
||||||
const center = getBoxCenter(box6);
|
const center = getBoxCenter(box6);
|
||||||
|
@ -487,11 +484,11 @@ function decodeBounds(boxOutputs, anchors3, inputSize) {
|
||||||
return tf3.concat2d([startNormalized, endNormalized], concatAxis);
|
return tf3.concat2d([startNormalized, endNormalized], concatAxis);
|
||||||
}
|
}
|
||||||
var BlazeFaceModel = class {
|
var BlazeFaceModel = class {
|
||||||
constructor(model9, config3) {
|
constructor(model10, config3) {
|
||||||
this.model = model9;
|
this.model = model10;
|
||||||
this.anchorsData = generateAnchors(model9.inputs[0].shape[1]);
|
this.anchorsData = generateAnchors(model10.inputs[0].shape[1]);
|
||||||
this.anchors = tf3.tensor2d(this.anchorsData);
|
this.anchors = tf3.tensor2d(this.anchorsData);
|
||||||
this.inputSize = model9.inputs[0].shape[2];
|
this.inputSize = model10.inputs[0].shape[2];
|
||||||
this.config = config3;
|
this.config = config3;
|
||||||
}
|
}
|
||||||
async getBoundingBoxes(inputImage) {
|
async getBoundingBoxes(inputImage) {
|
||||||
|
@ -540,12 +537,12 @@ var BlazeFaceModel = class {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
async function load(config3) {
|
async function load(config3) {
|
||||||
const model9 = await tf3.loadGraphModel(join(config3.modelBasePath, config3.face.detector.modelPath), { fromTFHub: config3.face.detector.modelPath.includes("tfhub.dev") });
|
const model10 = await tf3.loadGraphModel(join(config3.modelBasePath, config3.face.detector.modelPath), { fromTFHub: config3.face.detector.modelPath.includes("tfhub.dev") });
|
||||||
const blazeFace = new BlazeFaceModel(model9, config3);
|
const blazeFace = new BlazeFaceModel(model10, config3);
|
||||||
if (!model9 || !model9.modelUrl)
|
if (!model10 || !model10.modelUrl)
|
||||||
log("load model failed:", config3.face.detector.modelPath);
|
log("load model failed:", config3.face.detector.modelPath);
|
||||||
else if (config3.debug)
|
else if (config3.debug)
|
||||||
log("load model:", model9.modelUrl);
|
log("load model:", model10.modelUrl);
|
||||||
return blazeFace;
|
return blazeFace;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4160,11 +4157,6 @@ var triangulation = TRI468;
|
||||||
var uvmap = UV468;
|
var uvmap = UV468;
|
||||||
|
|
||||||
// src/emotion/emotion.ts
|
// src/emotion/emotion.ts
|
||||||
var emotion_exports = {};
|
|
||||||
__export(emotion_exports, {
|
|
||||||
load: () => load3,
|
|
||||||
predict: () => predict2
|
|
||||||
});
|
|
||||||
var tf6 = __toModule(require_tfjs_esm());
|
var tf6 = __toModule(require_tfjs_esm());
|
||||||
var annotations = ["angry", "disgust", "fear", "happy", "sad", "surprise", "neutral"];
|
var annotations = ["angry", "disgust", "fear", "happy", "sad", "surprise", "neutral"];
|
||||||
var model;
|
var model;
|
||||||
|
@ -4183,7 +4175,7 @@ async function load3(config3) {
|
||||||
log("cached model:", model.modelUrl);
|
log("cached model:", model.modelUrl);
|
||||||
return model;
|
return model;
|
||||||
}
|
}
|
||||||
async function predict2(image15, config3, idx, count2) {
|
async function predict2(image16, config3, idx, count2) {
|
||||||
if (!model)
|
if (!model)
|
||||||
return null;
|
return null;
|
||||||
if (skipped < config3.face.emotion.skipFrames && config3.skipFrame && lastCount === count2 && last[idx] && last[idx].length > 0) {
|
if (skipped < config3.face.emotion.skipFrames && config3.skipFrame && lastCount === count2 && last[idx] && last[idx].length > 0) {
|
||||||
|
@ -4192,7 +4184,7 @@ async function predict2(image15, config3, idx, count2) {
|
||||||
}
|
}
|
||||||
skipped = 0;
|
skipped = 0;
|
||||||
return new Promise(async (resolve) => {
|
return new Promise(async (resolve) => {
|
||||||
const resize = tf6.image.resizeBilinear(image15, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
|
const resize = tf6.image.resizeBilinear(image16, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
|
||||||
const [red, green, blue] = tf6.split(resize, 3, 3);
|
const [red, green, blue] = tf6.split(resize, 3, 3);
|
||||||
resize.dispose();
|
resize.dispose();
|
||||||
const redNorm = tf6.mul(red, rgb[0]);
|
const redNorm = tf6.mul(red, rgb[0]);
|
||||||
|
@ -4226,14 +4218,6 @@ async function predict2(image15, config3, idx, count2) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// src/faceres/faceres.ts
|
// src/faceres/faceres.ts
|
||||||
var faceres_exports = {};
|
|
||||||
__export(faceres_exports, {
|
|
||||||
enhance: () => enhance,
|
|
||||||
load: () => load4,
|
|
||||||
match: () => match,
|
|
||||||
predict: () => predict3,
|
|
||||||
similarity: () => similarity
|
|
||||||
});
|
|
||||||
var tf7 = __toModule(require_tfjs_esm());
|
var tf7 = __toModule(require_tfjs_esm());
|
||||||
var model2;
|
var model2;
|
||||||
var last2 = [];
|
var last2 = [];
|
||||||
|
@ -4276,7 +4260,7 @@ function match(embedding, db, threshold = 0) {
|
||||||
return best;
|
return best;
|
||||||
}
|
}
|
||||||
function enhance(input) {
|
function enhance(input) {
|
||||||
const image15 = tf7.tidy(() => {
|
const image16 = tf7.tidy(() => {
|
||||||
const tensor2 = input.image || input.tensor || input;
|
const tensor2 = input.image || input.tensor || input;
|
||||||
if (!(tensor2 instanceof tf7.Tensor))
|
if (!(tensor2 instanceof tf7.Tensor))
|
||||||
return null;
|
return null;
|
||||||
|
@ -4287,9 +4271,9 @@ function enhance(input) {
|
||||||
const norm = crop.mul(255);
|
const norm = crop.mul(255);
|
||||||
return norm;
|
return norm;
|
||||||
});
|
});
|
||||||
return image15;
|
return image16;
|
||||||
}
|
}
|
||||||
async function predict3(image15, config3, idx, count2) {
|
async function predict3(image16, config3, idx, count2) {
|
||||||
var _a, _b;
|
var _a, _b;
|
||||||
if (!model2)
|
if (!model2)
|
||||||
return null;
|
return null;
|
||||||
|
@ -4299,7 +4283,7 @@ async function predict3(image15, config3, idx, count2) {
|
||||||
}
|
}
|
||||||
skipped2 = 0;
|
skipped2 = 0;
|
||||||
return new Promise(async (resolve) => {
|
return new Promise(async (resolve) => {
|
||||||
const enhanced = enhance(image15);
|
const enhanced = enhance(image16);
|
||||||
let resT;
|
let resT;
|
||||||
const obj = {
|
const obj = {
|
||||||
age: 0,
|
age: 0,
|
||||||
|
@ -4514,11 +4498,6 @@ var detectFace = async (parent, input) => {
|
||||||
};
|
};
|
||||||
|
|
||||||
// src/posenet/posenet.ts
|
// src/posenet/posenet.ts
|
||||||
var posenet_exports = {};
|
|
||||||
__export(posenet_exports, {
|
|
||||||
load: () => load5,
|
|
||||||
predict: () => predict4
|
|
||||||
});
|
|
||||||
var tf9 = __toModule(require_tfjs_esm());
|
var tf9 = __toModule(require_tfjs_esm());
|
||||||
|
|
||||||
// src/posenet/keypoints.ts
|
// src/posenet/keypoints.ts
|
||||||
|
@ -4863,11 +4842,6 @@ async function load5(config3) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// src/handpose/handpose.ts
|
// src/handpose/handpose.ts
|
||||||
var handpose_exports = {};
|
|
||||||
__export(handpose_exports, {
|
|
||||||
load: () => load6,
|
|
||||||
predict: () => predict5
|
|
||||||
});
|
|
||||||
var tf13 = __toModule(require_tfjs_esm());
|
var tf13 = __toModule(require_tfjs_esm());
|
||||||
|
|
||||||
// src/handpose/handdetector.ts
|
// src/handpose/handdetector.ts
|
||||||
|
@ -4887,16 +4861,16 @@ function getBoxCenter2(box6) {
|
||||||
box6.startPoint[1] + (box6.endPoint[1] - box6.startPoint[1]) / 2
|
box6.startPoint[1] + (box6.endPoint[1] - box6.startPoint[1]) / 2
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
function cutBoxFromImageAndResize2(box6, image15, cropSize) {
|
function cutBoxFromImageAndResize2(box6, image16, cropSize) {
|
||||||
const h = image15.shape[1];
|
const h = image16.shape[1];
|
||||||
const w = image15.shape[2];
|
const w = image16.shape[2];
|
||||||
const boxes = [[
|
const boxes = [[
|
||||||
box6.startPoint[1] / h,
|
box6.startPoint[1] / h,
|
||||||
box6.startPoint[0] / w,
|
box6.startPoint[0] / w,
|
||||||
box6.endPoint[1] / h,
|
box6.endPoint[1] / h,
|
||||||
box6.endPoint[0] / w
|
box6.endPoint[0] / w
|
||||||
]];
|
]];
|
||||||
return tf10.image.cropAndResize(image15, boxes, [0], cropSize);
|
return tf10.image.cropAndResize(image16, boxes, [0], cropSize);
|
||||||
}
|
}
|
||||||
function scaleBoxCoordinates2(box6, factor) {
|
function scaleBoxCoordinates2(box6, factor) {
|
||||||
const startPoint = [box6.startPoint[0] * factor[0], box6.startPoint[1] * factor[1]];
|
const startPoint = [box6.startPoint[0] * factor[0], box6.startPoint[1] * factor[1]];
|
||||||
|
@ -7875,9 +7849,9 @@ var anchors = [
|
||||||
|
|
||||||
// src/handpose/handdetector.ts
|
// src/handpose/handdetector.ts
|
||||||
var HandDetector = class {
|
var HandDetector = class {
|
||||||
constructor(model9) {
|
constructor(model10) {
|
||||||
var _a;
|
var _a;
|
||||||
this.model = model9;
|
this.model = model10;
|
||||||
this.anchors = anchors.map((anchor) => [anchor.x, anchor.y]);
|
this.anchors = anchors.map((anchor) => [anchor.x, anchor.y]);
|
||||||
this.anchorsTensor = tf11.tensor2d(this.anchors);
|
this.anchorsTensor = tf11.tensor2d(this.anchors);
|
||||||
this.inputSize = (_a = this.model) == null ? void 0 : _a.inputs[0].shape[2];
|
this.inputSize = (_a = this.model) == null ? void 0 : _a.inputs[0].shape[2];
|
||||||
|
@ -7931,9 +7905,9 @@ var HandDetector = class {
|
||||||
async estimateHandBounds(input, config3) {
|
async estimateHandBounds(input, config3) {
|
||||||
const inputHeight = input.shape[1];
|
const inputHeight = input.shape[1];
|
||||||
const inputWidth = input.shape[2];
|
const inputWidth = input.shape[2];
|
||||||
const image15 = tf11.tidy(() => input.resizeBilinear([this.inputSize, this.inputSize]).div(127.5).sub(1));
|
const image16 = tf11.tidy(() => input.resizeBilinear([this.inputSize, this.inputSize]).div(127.5).sub(1));
|
||||||
const predictions = await this.getBoxes(image15, config3);
|
const predictions = await this.getBoxes(image16, config3);
|
||||||
image15.dispose();
|
image16.dispose();
|
||||||
const hands = [];
|
const hands = [];
|
||||||
if (!predictions || predictions.length === 0)
|
if (!predictions || predictions.length === 0)
|
||||||
return hands;
|
return hands;
|
||||||
|
@ -8078,11 +8052,11 @@ var HandPipeline = class {
|
||||||
Math.trunc(coord[2])
|
Math.trunc(coord[2])
|
||||||
]);
|
]);
|
||||||
}
|
}
|
||||||
async estimateHands(image15, config3) {
|
async estimateHands(image16, config3) {
|
||||||
let useFreshBox = false;
|
let useFreshBox = false;
|
||||||
let boxes;
|
let boxes;
|
||||||
if (this.skipped === 0 || this.skipped > config3.hand.skipFrames || !config3.hand.landmarks || !config3.skipFrame) {
|
if (this.skipped === 0 || this.skipped > config3.hand.skipFrames || !config3.hand.landmarks || !config3.skipFrame) {
|
||||||
boxes = await this.handDetector.estimateHandBounds(image15, config3);
|
boxes = await this.handDetector.estimateHandBounds(image16, config3);
|
||||||
this.skipped = 0;
|
this.skipped = 0;
|
||||||
}
|
}
|
||||||
if (config3.skipFrame)
|
if (config3.skipFrame)
|
||||||
|
@ -8101,8 +8075,8 @@ var HandPipeline = class {
|
||||||
if (config3.hand.landmarks) {
|
if (config3.hand.landmarks) {
|
||||||
const angle = config3.hand.rotation ? computeRotation2(currentBox.palmLandmarks[palmLandmarksPalmBase], currentBox.palmLandmarks[palmLandmarksMiddleFingerBase]) : 0;
|
const angle = config3.hand.rotation ? computeRotation2(currentBox.palmLandmarks[palmLandmarksPalmBase], currentBox.palmLandmarks[palmLandmarksMiddleFingerBase]) : 0;
|
||||||
const palmCenter = getBoxCenter2(currentBox);
|
const palmCenter = getBoxCenter2(currentBox);
|
||||||
const palmCenterNormalized = [palmCenter[0] / image15.shape[2], palmCenter[1] / image15.shape[1]];
|
const palmCenterNormalized = [palmCenter[0] / image16.shape[2], palmCenter[1] / image16.shape[1]];
|
||||||
const rotatedImage = config3.hand.rotation && tf12.ENV.flags.IS_BROWSER ? tf12.image.rotateWithOffset(image15, angle, 0, palmCenterNormalized) : image15.clone();
|
const rotatedImage = config3.hand.rotation && tf12.ENV.flags.IS_BROWSER ? tf12.image.rotateWithOffset(image16, angle, 0, palmCenterNormalized) : image16.clone();
|
||||||
const rotationMatrix = buildRotationMatrix2(-angle, palmCenter);
|
const rotationMatrix = buildRotationMatrix2(-angle, palmCenter);
|
||||||
const newBox = useFreshBox ? this.getBoxForPalmLandmarks(currentBox.palmLandmarks, rotationMatrix) : currentBox;
|
const newBox = useFreshBox ? this.getBoxForPalmLandmarks(currentBox.palmLandmarks, rotationMatrix) : currentBox;
|
||||||
const croppedInput = cutBoxFromImageAndResize2(newBox, rotatedImage, [this.inputSize, this.inputSize]);
|
const croppedInput = cutBoxFromImageAndResize2(newBox, rotatedImage, [this.inputSize, this.inputSize]);
|
||||||
|
@ -8233,11 +8207,6 @@ async function load6(config3) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// src/blazepose/blazepose.ts
|
// src/blazepose/blazepose.ts
|
||||||
var blazepose_exports = {};
|
|
||||||
__export(blazepose_exports, {
|
|
||||||
load: () => load7,
|
|
||||||
predict: () => predict6
|
|
||||||
});
|
|
||||||
var tf14 = __toModule(require_tfjs_esm());
|
var tf14 = __toModule(require_tfjs_esm());
|
||||||
|
|
||||||
// src/blazepose/annotations.ts
|
// src/blazepose/annotations.ts
|
||||||
|
@ -8331,14 +8300,14 @@ async function load7(config3) {
|
||||||
log("cached model:", model4["modelUrl"]);
|
log("cached model:", model4["modelUrl"]);
|
||||||
return model4;
|
return model4;
|
||||||
}
|
}
|
||||||
async function predict6(image15, config3) {
|
async function predict6(image16, config3) {
|
||||||
var _a;
|
var _a;
|
||||||
if (!model4)
|
if (!model4)
|
||||||
return [];
|
return [];
|
||||||
if (!config3.body.enabled)
|
if (!config3.body.enabled)
|
||||||
return [];
|
return [];
|
||||||
const imgSize = { width: image15.shape[2] || 0, height: image15.shape[1] || 0 };
|
const imgSize = { width: image16.shape[2] || 0, height: image16.shape[1] || 0 };
|
||||||
const resize = tf14.image.resizeBilinear(image15, [model4["width"], model4["height"]], false);
|
const resize = tf14.image.resizeBilinear(image16, [model4["width"], model4["height"]], false);
|
||||||
const normalize = tf14.div(resize, [255]);
|
const normalize = tf14.div(resize, [255]);
|
||||||
resize.dispose();
|
resize.dispose();
|
||||||
const resT = await model4.predict(normalize);
|
const resT = await model4.predict(normalize);
|
||||||
|
@ -8414,7 +8383,7 @@ function max2d(inputs, minScore) {
|
||||||
return [0, 0, newScore];
|
return [0, 0, newScore];
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
async function predict7(image15, config3) {
|
async function predict7(image16, config3) {
|
||||||
if (skipped3 < config3.body.skipFrames && config3.skipFrame && Object.keys(keypoints).length > 0) {
|
if (skipped3 < config3.body.skipFrames && config3.skipFrame && Object.keys(keypoints).length > 0) {
|
||||||
skipped3++;
|
skipped3++;
|
||||||
return [{ id: 0, score, box: box4, boxRaw, keypoints }];
|
return [{ id: 0, score, box: box4, boxRaw, keypoints }];
|
||||||
|
@ -8424,7 +8393,7 @@ async function predict7(image15, config3) {
|
||||||
const tensor2 = tf15.tidy(() => {
|
const tensor2 = tf15.tidy(() => {
|
||||||
if (!model5.inputs[0].shape)
|
if (!model5.inputs[0].shape)
|
||||||
return null;
|
return null;
|
||||||
const resize = tf15.image.resizeBilinear(image15, [model5.inputs[0].shape[2], model5.inputs[0].shape[1]], false);
|
const resize = tf15.image.resizeBilinear(image16, [model5.inputs[0].shape[2], model5.inputs[0].shape[1]], false);
|
||||||
const enhance2 = tf15.mul(resize, 2);
|
const enhance2 = tf15.mul(resize, 2);
|
||||||
const norm = enhance2.sub(1);
|
const norm = enhance2.sub(1);
|
||||||
return norm;
|
return norm;
|
||||||
|
@ -8435,10 +8404,10 @@ async function predict7(image15, config3) {
|
||||||
tensor2.dispose();
|
tensor2.dispose();
|
||||||
if (resT) {
|
if (resT) {
|
||||||
keypoints.length = 0;
|
keypoints.length = 0;
|
||||||
const squeeze3 = resT.squeeze();
|
const squeeze4 = resT.squeeze();
|
||||||
tf15.dispose(resT);
|
tf15.dispose(resT);
|
||||||
const stack2 = squeeze3.unstack(2);
|
const stack2 = squeeze4.unstack(2);
|
||||||
tf15.dispose(squeeze3);
|
tf15.dispose(squeeze4);
|
||||||
for (let id = 0; id < stack2.length; id++) {
|
for (let id = 0; id < stack2.length; id++) {
|
||||||
const [x2, y2, partScore] = max2d(stack2[id], config3.body.minConfidence);
|
const [x2, y2, partScore] = max2d(stack2[id], config3.body.minConfidence);
|
||||||
if (score > config3.body.minConfidence) {
|
if (score > config3.body.minConfidence) {
|
||||||
|
@ -8450,8 +8419,8 @@ async function predict7(image15, config3) {
|
||||||
y2 / model5.inputs[0].shape[1]
|
y2 / model5.inputs[0].shape[1]
|
||||||
],
|
],
|
||||||
position: [
|
position: [
|
||||||
Math.round(image15.shape[2] * x2 / model5.inputs[0].shape[2]),
|
Math.round(image16.shape[2] * x2 / model5.inputs[0].shape[2]),
|
||||||
Math.round(image15.shape[1] * y2 / model5.inputs[0].shape[1])
|
Math.round(image16.shape[1] * y2 / model5.inputs[0].shape[1])
|
||||||
]
|
]
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -8499,7 +8468,7 @@ async function load9(config3) {
|
||||||
log("cached model:", model6["modelUrl"]);
|
log("cached model:", model6["modelUrl"]);
|
||||||
return model6;
|
return model6;
|
||||||
}
|
}
|
||||||
async function predict8(image15, config3) {
|
async function predict8(image16, config3) {
|
||||||
if (skipped4 < config3.body.skipFrames && config3.skipFrame && Object.keys(keypoints2).length > 0) {
|
if (skipped4 < config3.body.skipFrames && config3.skipFrame && Object.keys(keypoints2).length > 0) {
|
||||||
skipped4++;
|
skipped4++;
|
||||||
return [{ id: 0, score: score2, box: box5, boxRaw: boxRaw2, keypoints: keypoints2 }];
|
return [{ id: 0, score: score2, box: box5, boxRaw: boxRaw2, keypoints: keypoints2 }];
|
||||||
|
@ -8509,7 +8478,7 @@ async function predict8(image15, config3) {
|
||||||
const tensor2 = tf16.tidy(() => {
|
const tensor2 = tf16.tidy(() => {
|
||||||
if (!model6.inputs[0].shape)
|
if (!model6.inputs[0].shape)
|
||||||
return null;
|
return null;
|
||||||
const resize = tf16.image.resizeBilinear(image15, [model6.inputs[0].shape[2], model6.inputs[0].shape[1]], false);
|
const resize = tf16.image.resizeBilinear(image16, [model6.inputs[0].shape[2], model6.inputs[0].shape[1]], false);
|
||||||
const cast2 = tf16.cast(resize, "int32");
|
const cast2 = tf16.cast(resize, "int32");
|
||||||
return cast2;
|
return cast2;
|
||||||
});
|
});
|
||||||
|
@ -8533,8 +8502,8 @@ async function predict8(image15, config3) {
|
||||||
kpt3[id][0]
|
kpt3[id][0]
|
||||||
],
|
],
|
||||||
position: [
|
position: [
|
||||||
Math.round((image15.shape[2] || 0) * kpt3[id][1]),
|
Math.round((image16.shape[2] || 0) * kpt3[id][1]),
|
||||||
Math.round((image15.shape[1] || 0) * kpt3[id][0])
|
Math.round((image16.shape[1] || 0) * kpt3[id][0])
|
||||||
]
|
]
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -8562,11 +8531,6 @@ async function predict8(image15, config3) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// src/object/nanodet.ts
|
// src/object/nanodet.ts
|
||||||
var nanodet_exports = {};
|
|
||||||
__export(nanodet_exports, {
|
|
||||||
load: () => load10,
|
|
||||||
predict: () => predict9
|
|
||||||
});
|
|
||||||
var tf17 = __toModule(require_tfjs_esm());
|
var tf17 = __toModule(require_tfjs_esm());
|
||||||
|
|
||||||
// src/object/labels.ts
|
// src/object/labels.ts
|
||||||
|
@ -8734,15 +8698,15 @@ async function process2(res, inputSize, outputShape, config3) {
|
||||||
results = results.filter((a, idx) => nmsIdx.includes(idx)).sort((a, b) => b.score - a.score);
|
results = results.filter((a, idx) => nmsIdx.includes(idx)).sort((a, b) => b.score - a.score);
|
||||||
return results;
|
return results;
|
||||||
}
|
}
|
||||||
async function predict9(image15, config3) {
|
async function predict9(image16, config3) {
|
||||||
if (skipped5 < config3.object.skipFrames && config3.skipFrame && last3.length > 0) {
|
if (skipped5 < config3.object.skipFrames && config3.skipFrame && last3.length > 0) {
|
||||||
skipped5++;
|
skipped5++;
|
||||||
return last3;
|
return last3;
|
||||||
}
|
}
|
||||||
skipped5 = 0;
|
skipped5 = 0;
|
||||||
return new Promise(async (resolve) => {
|
return new Promise(async (resolve) => {
|
||||||
const outputSize = [image15.shape[2], image15.shape[1]];
|
const outputSize = [image16.shape[2], image16.shape[1]];
|
||||||
const resize = tf17.image.resizeBilinear(image15, [model7.inputSize, model7.inputSize], false);
|
const resize = tf17.image.resizeBilinear(image16, [model7.inputSize, model7.inputSize], false);
|
||||||
const norm = resize.div(255);
|
const norm = resize.div(255);
|
||||||
const transpose = norm.transpose([0, 3, 1, 2]);
|
const transpose = norm.transpose([0, 3, 1, 2]);
|
||||||
norm.dispose();
|
norm.dispose();
|
||||||
|
@ -8758,11 +8722,6 @@ async function predict9(image15, config3) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// src/object/centernet.ts
|
// src/object/centernet.ts
|
||||||
var centernet_exports = {};
|
|
||||||
__export(centernet_exports, {
|
|
||||||
load: () => load11,
|
|
||||||
predict: () => predict10
|
|
||||||
});
|
|
||||||
var tf18 = __toModule(require_tfjs_esm());
|
var tf18 = __toModule(require_tfjs_esm());
|
||||||
var model8;
|
var model8;
|
||||||
var last4 = [];
|
var last4 = [];
|
||||||
|
@ -9101,8 +9060,8 @@ function GLImageFilter(params) {
|
||||||
gl.uniform1f(_currentProgram.uniform.flipY, flipY ? -1 : 1);
|
gl.uniform1f(_currentProgram.uniform.flipY, flipY ? -1 : 1);
|
||||||
gl.drawArrays(gl.TRIANGLES, 0, 6);
|
gl.drawArrays(gl.TRIANGLES, 0, 6);
|
||||||
};
|
};
|
||||||
this.apply = function(image15) {
|
this.apply = function(image16) {
|
||||||
_resize(image15.width, image15.height);
|
_resize(image16.width, image16.height);
|
||||||
_drawCount = 0;
|
_drawCount = 0;
|
||||||
if (!_sourceTexture)
|
if (!_sourceTexture)
|
||||||
_sourceTexture = gl.createTexture();
|
_sourceTexture = gl.createTexture();
|
||||||
|
@ -9111,7 +9070,7 @@ function GLImageFilter(params) {
|
||||||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
|
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
|
||||||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
|
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
|
||||||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
|
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
|
||||||
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, image15);
|
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, image16);
|
||||||
if (_filterChain.length === 0) {
|
if (_filterChain.length === 0) {
|
||||||
_draw();
|
_draw();
|
||||||
return _canvas;
|
return _canvas;
|
||||||
|
@ -9769,14 +9728,14 @@ function process4(input, config3) {
|
||||||
const shape = [outCanvas.height, outCanvas.width, 3];
|
const shape = [outCanvas.height, outCanvas.width, 3];
|
||||||
pixels = tf19.tensor3d(outCanvas.data, shape, "int32");
|
pixels = tf19.tensor3d(outCanvas.data, shape, "int32");
|
||||||
} else if (outCanvas instanceof ImageData) {
|
} else if (outCanvas instanceof ImageData) {
|
||||||
pixels = tf19.browser.fromPixels(outCanvas);
|
pixels = tf19.browser ? tf19.browser.fromPixels(outCanvas) : null;
|
||||||
} else if (config3.backend === "webgl" || config3.backend === "humangl") {
|
} else if (config3.backend === "webgl" || config3.backend === "humangl") {
|
||||||
const tempCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement("canvas");
|
const tempCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement("canvas");
|
||||||
tempCanvas.width = targetWidth;
|
tempCanvas.width = targetWidth;
|
||||||
tempCanvas.height = targetHeight;
|
tempCanvas.height = targetHeight;
|
||||||
const tempCtx = tempCanvas.getContext("2d");
|
const tempCtx = tempCanvas.getContext("2d");
|
||||||
tempCtx == null ? void 0 : tempCtx.drawImage(outCanvas, 0, 0);
|
tempCtx == null ? void 0 : tempCtx.drawImage(outCanvas, 0, 0);
|
||||||
pixels = tf19.browser.fromPixels(tempCanvas);
|
pixels = tf19.browser ? tf19.browser.fromPixels(tempCanvas) : null;
|
||||||
} else {
|
} else {
|
||||||
const tempCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement("canvas");
|
const tempCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement("canvas");
|
||||||
tempCanvas.width = targetWidth;
|
tempCanvas.width = targetWidth;
|
||||||
|
@ -9784,13 +9743,15 @@ function process4(input, config3) {
|
||||||
const tempCtx = tempCanvas.getContext("2d");
|
const tempCtx = tempCanvas.getContext("2d");
|
||||||
tempCtx == null ? void 0 : tempCtx.drawImage(outCanvas, 0, 0);
|
tempCtx == null ? void 0 : tempCtx.drawImage(outCanvas, 0, 0);
|
||||||
const data = tempCtx == null ? void 0 : tempCtx.getImageData(0, 0, targetWidth, targetHeight);
|
const data = tempCtx == null ? void 0 : tempCtx.getImageData(0, 0, targetWidth, targetHeight);
|
||||||
pixels = tf19.browser.fromPixels(data);
|
pixels = tf19.browser ? tf19.browser.fromPixels(data) : null;
|
||||||
}
|
}
|
||||||
|
if (pixels) {
|
||||||
const casted = pixels.toFloat();
|
const casted = pixels.toFloat();
|
||||||
tensor2 = casted.expandDims(0);
|
tensor2 = casted.expandDims(0);
|
||||||
pixels.dispose();
|
pixels.dispose();
|
||||||
casted.dispose();
|
casted.dispose();
|
||||||
}
|
}
|
||||||
|
}
|
||||||
const canvas2 = config3.filter.return ? outCanvas : null;
|
const canvas2 = config3.filter.return ? outCanvas : null;
|
||||||
return { tensor: tensor2, canvas: canvas2 };
|
return { tensor: tensor2, canvas: canvas2 };
|
||||||
}
|
}
|
||||||
|
@ -9946,10 +9907,10 @@ async function face2(inCanvas2, result, drawOptions) {
|
||||||
if (f.iris)
|
if (f.iris)
|
||||||
labels2.push(`distance: ${f.iris}`);
|
labels2.push(`distance: ${f.iris}`);
|
||||||
if (f.emotion && f.emotion.length > 0) {
|
if (f.emotion && f.emotion.length > 0) {
|
||||||
const emotion2 = f.emotion.map((a) => `${Math.trunc(100 * a.score)}% ${a.emotion}`);
|
const emotion3 = f.emotion.map((a) => `${Math.trunc(100 * a.score)}% ${a.emotion}`);
|
||||||
if (emotion2.length > 3)
|
if (emotion3.length > 3)
|
||||||
emotion2.length = 3;
|
emotion3.length = 3;
|
||||||
labels2.push(emotion2.join(" "));
|
labels2.push(emotion3.join(" "));
|
||||||
}
|
}
|
||||||
if (f.rotation && f.rotation.angle && f.rotation.gaze) {
|
if (f.rotation && f.rotation.angle && f.rotation.gaze) {
|
||||||
if (f.rotation.angle.roll)
|
if (f.rotation.angle.roll)
|
||||||
|
@ -10377,6 +10338,7 @@ function calc(newResult) {
|
||||||
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u;
|
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u;
|
||||||
const elapsed = Date.now() - newResult.timestamp;
|
const elapsed = Date.now() - newResult.timestamp;
|
||||||
const bufferedFactor = elapsed < 1e3 ? 8 - Math.log(elapsed) : 1;
|
const bufferedFactor = elapsed < 1e3 ? 8 - Math.log(elapsed) : 1;
|
||||||
|
bufferedResult.canvas = newResult.canvas;
|
||||||
if (!bufferedResult.body || newResult.body.length !== bufferedResult.body.length) {
|
if (!bufferedResult.body || newResult.body.length !== bufferedResult.body.length) {
|
||||||
bufferedResult.body = JSON.parse(JSON.stringify(newResult.body));
|
bufferedResult.body = JSON.parse(JSON.stringify(newResult.body));
|
||||||
} else {
|
} else {
|
||||||
|
@ -10455,6 +10417,60 @@ function calc(newResult) {
|
||||||
return bufferedResult;
|
return bufferedResult;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// src/segmentation/segmentation.ts
|
||||||
|
var tf20 = __toModule(require_tfjs_esm());
|
||||||
|
var model9;
|
||||||
|
async function load12(config3) {
|
||||||
|
if (!model9) {
|
||||||
|
model9 = await tf20.loadGraphModel(join(config3.modelBasePath, config3.segmentation.modelPath));
|
||||||
|
if (!model9 || !model9["modelUrl"])
|
||||||
|
log("load model failed:", config3.segmentation.modelPath);
|
||||||
|
else if (config3.debug)
|
||||||
|
log("load model:", model9["modelUrl"]);
|
||||||
|
} else if (config3.debug)
|
||||||
|
log("cached model:", model9["modelUrl"]);
|
||||||
|
return model9;
|
||||||
|
}
|
||||||
|
async function predict11(input, config3) {
|
||||||
|
var _a, _b, _c, _d;
|
||||||
|
if (!config3.segmentation.enabled || !input.tensor || !input.canvas)
|
||||||
|
return false;
|
||||||
|
if (!model9 || !model9.inputs[0].shape)
|
||||||
|
return false;
|
||||||
|
const resizeInput = tf20.image.resizeBilinear(input.tensor, [model9.inputs[0].shape[1], model9.inputs[0].shape[2]], false);
|
||||||
|
const norm = resizeInput.div(255);
|
||||||
|
const res = model9.predict(norm);
|
||||||
|
tf20.dispose(resizeInput);
|
||||||
|
tf20.dispose(norm);
|
||||||
|
const overlay = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(input.canvas.width, input.canvas.height) : document.createElement("canvas");
|
||||||
|
overlay.width = input.canvas.width;
|
||||||
|
overlay.height = input.canvas.height;
|
||||||
|
const squeeze4 = tf20.squeeze(res, 0);
|
||||||
|
let resizeOutput;
|
||||||
|
if (squeeze4.shape[2] === 2) {
|
||||||
|
const softmax = squeeze4.softmax();
|
||||||
|
const [bg, fg] = tf20.unstack(softmax, 2);
|
||||||
|
tf20.dispose(softmax);
|
||||||
|
const expand = fg.expandDims(2);
|
||||||
|
tf20.dispose(bg);
|
||||||
|
tf20.dispose(fg);
|
||||||
|
resizeOutput = tf20.image.resizeBilinear(expand, [(_a = input.tensor) == null ? void 0 : _a.shape[1], (_b = input.tensor) == null ? void 0 : _b.shape[2]]);
|
||||||
|
tf20.dispose(expand);
|
||||||
|
} else {
|
||||||
|
resizeOutput = tf20.image.resizeBilinear(squeeze4, [(_c = input.tensor) == null ? void 0 : _c.shape[1], (_d = input.tensor) == null ? void 0 : _d.shape[2]]);
|
||||||
|
}
|
||||||
|
if (tf20.browser)
|
||||||
|
await tf20.browser.toPixels(resizeOutput, overlay);
|
||||||
|
tf20.dispose(resizeOutput);
|
||||||
|
tf20.dispose(squeeze4);
|
||||||
|
tf20.dispose(res);
|
||||||
|
const ctx = input.canvas.getContext("2d");
|
||||||
|
ctx.globalCompositeOperation = "darken";
|
||||||
|
await (ctx == null ? void 0 : ctx.drawImage(overlay, 0, 0));
|
||||||
|
ctx.globalCompositeOperation = "source-in";
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
// src/sample.ts
|
// src/sample.ts
|
||||||
var face3 = `
|
var face3 = `
|
||||||
/9j/4AAQSkZJRgABAQEAYABgAAD/4QBoRXhpZgAATU0AKgAAAAgABAEaAAUAAAABAAAAPgEbAAUA
|
/9j/4AAQSkZJRgABAQEAYABgAAD/4QBoRXhpZgAATU0AKgAAAAgABAEaAAUAAAABAAAAPgEbAAUA
|
||||||
|
@ -11206,7 +11222,7 @@ var Human = class {
|
||||||
return null;
|
return null;
|
||||||
if (!input)
|
if (!input)
|
||||||
return "input is not defined";
|
return "input is not defined";
|
||||||
if (this.tf.ENV.flags.IS_NODE && !(input instanceof tf20.Tensor))
|
if (this.tf.ENV.flags.IS_NODE && !(input instanceof tf21.Tensor))
|
||||||
return "input must be a tensor";
|
return "input must be a tensor";
|
||||||
try {
|
try {
|
||||||
this.tf.getBackend();
|
this.tf.getBackend();
|
||||||
|
@ -11349,8 +11365,8 @@ var Human = class {
|
||||||
if (!img)
|
if (!img)
|
||||||
return null;
|
return null;
|
||||||
let res;
|
let res;
|
||||||
if (typeof tf20["node"] !== "undefined") {
|
if (typeof tf21["node"] !== "undefined") {
|
||||||
const data = tf20["node"].decodeJpeg(img);
|
const data = tf21["node"].decodeJpeg(img);
|
||||||
const expanded = data.expandDims(0);
|
const expanded = data.expandDims(0);
|
||||||
this.tf.dispose(data);
|
this.tf.dispose(data);
|
||||||
res = await this.detect(expanded, this.config);
|
res = await this.detect(expanded, this.config);
|
||||||
|
@ -11362,7 +11378,7 @@ var Human = class {
|
||||||
return res;
|
return res;
|
||||||
});
|
});
|
||||||
this.config = mergeDeep(config, userConfig || {});
|
this.config = mergeDeep(config, userConfig || {});
|
||||||
this.tf = tf20;
|
this.tf = tf21;
|
||||||
this.draw = draw_exports;
|
this.draw = draw_exports;
|
||||||
this.version = version;
|
this.version = version;
|
||||||
this.state = "idle";
|
this.state = "idle";
|
||||||
|
@ -11385,18 +11401,10 @@ var Human = class {
|
||||||
embedding: null,
|
embedding: null,
|
||||||
nanodet: null,
|
nanodet: null,
|
||||||
centernet: null,
|
centernet: null,
|
||||||
faceres: null
|
faceres: null,
|
||||||
|
segmentation: null
|
||||||
};
|
};
|
||||||
this.image = (input) => process4(input, this.config);
|
this.image = (input) => process4(input, this.config);
|
||||||
this.classes = {
|
|
||||||
facemesh: facemesh_exports,
|
|
||||||
emotion: emotion_exports,
|
|
||||||
faceres: faceres_exports,
|
|
||||||
body: this.config.body.modelPath.includes("posenet") ? posenet_exports : blazepose_exports,
|
|
||||||
hand: handpose_exports,
|
|
||||||
nanodet: nanodet_exports,
|
|
||||||
centernet: centernet_exports
|
|
||||||
};
|
|
||||||
this.faceTriangulation = triangulation;
|
this.faceTriangulation = triangulation;
|
||||||
this.faceUVMap = uvmap;
|
this.faceUVMap = uvmap;
|
||||||
this.sysinfo = info();
|
this.sysinfo = info();
|
||||||
|
@ -11444,7 +11452,8 @@ var Human = class {
|
||||||
this.models.movenet,
|
this.models.movenet,
|
||||||
this.models.nanodet,
|
this.models.nanodet,
|
||||||
this.models.centernet,
|
this.models.centernet,
|
||||||
this.models.faceres
|
this.models.faceres,
|
||||||
|
this.models.segmentation
|
||||||
] = await Promise.all([
|
] = await Promise.all([
|
||||||
this.models.face || (this.config.face.enabled ? load2(this.config) : null),
|
this.models.face || (this.config.face.enabled ? load2(this.config) : null),
|
||||||
this.models.emotion || (this.config.face.enabled && this.config.face.emotion.enabled ? load3(this.config) : null),
|
this.models.emotion || (this.config.face.enabled && this.config.face.emotion.enabled ? load3(this.config) : null),
|
||||||
|
@ -11455,7 +11464,8 @@ var Human = class {
|
||||||
this.models.movenet || (this.config.body.enabled && this.config.body.modelPath.includes("movenet") ? load9(this.config) : null),
|
this.models.movenet || (this.config.body.enabled && this.config.body.modelPath.includes("movenet") ? load9(this.config) : null),
|
||||||
this.models.nanodet || (this.config.object.enabled && this.config.object.modelPath.includes("nanodet") ? load10(this.config) : null),
|
this.models.nanodet || (this.config.object.enabled && this.config.object.modelPath.includes("nanodet") ? load10(this.config) : null),
|
||||||
this.models.centernet || (this.config.object.enabled && this.config.object.modelPath.includes("centernet") ? load11(this.config) : null),
|
this.models.centernet || (this.config.object.enabled && this.config.object.modelPath.includes("centernet") ? load11(this.config) : null),
|
||||||
this.models.faceres || (this.config.face.enabled && this.config.face.description.enabled ? load4(this.config) : null)
|
this.models.faceres || (this.config.face.enabled && this.config.face.description.enabled ? load4(this.config) : null),
|
||||||
|
this.models.segmentation || (this.config.segmentation.enabled ? load12(this.config) : null)
|
||||||
]);
|
]);
|
||||||
} else {
|
} else {
|
||||||
if (this.config.face.enabled && !this.models.face)
|
if (this.config.face.enabled && !this.models.face)
|
||||||
|
@ -11478,6 +11488,8 @@ var Human = class {
|
||||||
this.models.centernet = await load11(this.config);
|
this.models.centernet = await load11(this.config);
|
||||||
if (this.config.face.enabled && this.config.face.description.enabled && !this.models.faceres)
|
if (this.config.face.enabled && this.config.face.description.enabled && !this.models.faceres)
|
||||||
this.models.faceres = await load4(this.config);
|
this.models.faceres = await load4(this.config);
|
||||||
|
if (this.config.segmentation.enabled && !this.models.segmentation)
|
||||||
|
this.models.segmentation = await load12(this.config);
|
||||||
}
|
}
|
||||||
if (__privateGet(this, _firstRun)) {
|
if (__privateGet(this, _firstRun)) {
|
||||||
if (this.config.debug)
|
if (this.config.debug)
|
||||||
|
@ -11612,6 +11624,16 @@ var Human = class {
|
||||||
else if (this.performance.gesture)
|
else if (this.performance.gesture)
|
||||||
delete this.performance.gesture;
|
delete this.performance.gesture;
|
||||||
}
|
}
|
||||||
|
if (this.config.segmentation.enabled) {
|
||||||
|
this.analyze("Start Segmentation:");
|
||||||
|
this.state = "run:segmentation";
|
||||||
|
timeStamp = now();
|
||||||
|
await predict11(process5, this.config);
|
||||||
|
elapsedTime = Math.trunc(now() - timeStamp);
|
||||||
|
if (elapsedTime > 0)
|
||||||
|
this.performance.segmentation = elapsedTime;
|
||||||
|
this.analyze("End Segmentation:");
|
||||||
|
}
|
||||||
this.performance.total = Math.trunc(now() - timeStart);
|
this.performance.total = Math.trunc(now() - timeStart);
|
||||||
this.state = "idle";
|
this.state = "idle";
|
||||||
this.result = {
|
this.result = {
|
||||||
|
@ -11628,7 +11650,7 @@ var Human = class {
|
||||||
return join2(faceRes, bodyRes, handRes, gestureRes, (_a = process5 == null ? void 0 : process5.tensor) == null ? void 0 : _a.shape);
|
return join2(faceRes, bodyRes, handRes, gestureRes, (_a = process5 == null ? void 0 : process5.tensor) == null ? void 0 : _a.shape);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
tf20.dispose(process5.tensor);
|
tf21.dispose(process5.tensor);
|
||||||
resolve(this.result);
|
resolve(this.result);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -213,6 +213,10 @@ var config = {
|
||||||
iouThreshold: 0.4,
|
iouThreshold: 0.4,
|
||||||
maxDetected: 10,
|
maxDetected: 10,
|
||||||
skipFrames: 19
|
skipFrames: 19
|
||||||
|
},
|
||||||
|
segmentation: {
|
||||||
|
enabled: false,
|
||||||
|
modelPath: "selfie.json"
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -238,7 +242,7 @@ function info() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// src/human.ts
|
// src/human.ts
|
||||||
var tf20 = __toModule(require_tfjs_esm());
|
var tf21 = __toModule(require_tfjs_esm());
|
||||||
|
|
||||||
// src/tfjs/backend.ts
|
// src/tfjs/backend.ts
|
||||||
var tf = __toModule(require_tfjs_esm());
|
var tf = __toModule(require_tfjs_esm());
|
||||||
|
@ -312,13 +316,6 @@ function register() {
|
||||||
var tf8 = __toModule(require_tfjs_esm());
|
var tf8 = __toModule(require_tfjs_esm());
|
||||||
|
|
||||||
// src/blazeface/facemesh.ts
|
// src/blazeface/facemesh.ts
|
||||||
var facemesh_exports = {};
|
|
||||||
__export(facemesh_exports, {
|
|
||||||
load: () => load2,
|
|
||||||
predict: () => predict,
|
|
||||||
triangulation: () => triangulation,
|
|
||||||
uvmap: () => uvmap
|
|
||||||
});
|
|
||||||
var tf5 = __toModule(require_tfjs_esm());
|
var tf5 = __toModule(require_tfjs_esm());
|
||||||
|
|
||||||
// src/blazeface/blazeface.ts
|
// src/blazeface/blazeface.ts
|
||||||
|
@ -343,16 +340,16 @@ function getBoxCenter(box6) {
|
||||||
box6.startPoint[1] + (box6.endPoint[1] - box6.startPoint[1]) / 2
|
box6.startPoint[1] + (box6.endPoint[1] - box6.startPoint[1]) / 2
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
function cutBoxFromImageAndResize(box6, image15, cropSize) {
|
function cutBoxFromImageAndResize(box6, image16, cropSize) {
|
||||||
const h = image15.shape[1];
|
const h = image16.shape[1];
|
||||||
const w = image15.shape[2];
|
const w = image16.shape[2];
|
||||||
const boxes = [[
|
const boxes = [[
|
||||||
box6.startPoint[1] / h,
|
box6.startPoint[1] / h,
|
||||||
box6.startPoint[0] / w,
|
box6.startPoint[0] / w,
|
||||||
box6.endPoint[1] / h,
|
box6.endPoint[1] / h,
|
||||||
box6.endPoint[0] / w
|
box6.endPoint[0] / w
|
||||||
]];
|
]];
|
||||||
return tf2.image.cropAndResize(image15, boxes, [0], cropSize);
|
return tf2.image.cropAndResize(image16, boxes, [0], cropSize);
|
||||||
}
|
}
|
||||||
function enlargeBox(box6, factor = 1.5) {
|
function enlargeBox(box6, factor = 1.5) {
|
||||||
const center = getBoxCenter(box6);
|
const center = getBoxCenter(box6);
|
||||||
|
@ -486,11 +483,11 @@ function decodeBounds(boxOutputs, anchors3, inputSize) {
|
||||||
return tf3.concat2d([startNormalized, endNormalized], concatAxis);
|
return tf3.concat2d([startNormalized, endNormalized], concatAxis);
|
||||||
}
|
}
|
||||||
var BlazeFaceModel = class {
|
var BlazeFaceModel = class {
|
||||||
constructor(model9, config3) {
|
constructor(model10, config3) {
|
||||||
this.model = model9;
|
this.model = model10;
|
||||||
this.anchorsData = generateAnchors(model9.inputs[0].shape[1]);
|
this.anchorsData = generateAnchors(model10.inputs[0].shape[1]);
|
||||||
this.anchors = tf3.tensor2d(this.anchorsData);
|
this.anchors = tf3.tensor2d(this.anchorsData);
|
||||||
this.inputSize = model9.inputs[0].shape[2];
|
this.inputSize = model10.inputs[0].shape[2];
|
||||||
this.config = config3;
|
this.config = config3;
|
||||||
}
|
}
|
||||||
async getBoundingBoxes(inputImage) {
|
async getBoundingBoxes(inputImage) {
|
||||||
|
@ -539,12 +536,12 @@ var BlazeFaceModel = class {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
async function load(config3) {
|
async function load(config3) {
|
||||||
const model9 = await tf3.loadGraphModel(join(config3.modelBasePath, config3.face.detector.modelPath), { fromTFHub: config3.face.detector.modelPath.includes("tfhub.dev") });
|
const model10 = await tf3.loadGraphModel(join(config3.modelBasePath, config3.face.detector.modelPath), { fromTFHub: config3.face.detector.modelPath.includes("tfhub.dev") });
|
||||||
const blazeFace = new BlazeFaceModel(model9, config3);
|
const blazeFace = new BlazeFaceModel(model10, config3);
|
||||||
if (!model9 || !model9.modelUrl)
|
if (!model10 || !model10.modelUrl)
|
||||||
log("load model failed:", config3.face.detector.modelPath);
|
log("load model failed:", config3.face.detector.modelPath);
|
||||||
else if (config3.debug)
|
else if (config3.debug)
|
||||||
log("load model:", model9.modelUrl);
|
log("load model:", model10.modelUrl);
|
||||||
return blazeFace;
|
return blazeFace;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4159,11 +4156,6 @@ var triangulation = TRI468;
|
||||||
var uvmap = UV468;
|
var uvmap = UV468;
|
||||||
|
|
||||||
// src/emotion/emotion.ts
|
// src/emotion/emotion.ts
|
||||||
var emotion_exports = {};
|
|
||||||
__export(emotion_exports, {
|
|
||||||
load: () => load3,
|
|
||||||
predict: () => predict2
|
|
||||||
});
|
|
||||||
var tf6 = __toModule(require_tfjs_esm());
|
var tf6 = __toModule(require_tfjs_esm());
|
||||||
var annotations = ["angry", "disgust", "fear", "happy", "sad", "surprise", "neutral"];
|
var annotations = ["angry", "disgust", "fear", "happy", "sad", "surprise", "neutral"];
|
||||||
var model;
|
var model;
|
||||||
|
@ -4182,7 +4174,7 @@ async function load3(config3) {
|
||||||
log("cached model:", model.modelUrl);
|
log("cached model:", model.modelUrl);
|
||||||
return model;
|
return model;
|
||||||
}
|
}
|
||||||
async function predict2(image15, config3, idx, count2) {
|
async function predict2(image16, config3, idx, count2) {
|
||||||
if (!model)
|
if (!model)
|
||||||
return null;
|
return null;
|
||||||
if (skipped < config3.face.emotion.skipFrames && config3.skipFrame && lastCount === count2 && last[idx] && last[idx].length > 0) {
|
if (skipped < config3.face.emotion.skipFrames && config3.skipFrame && lastCount === count2 && last[idx] && last[idx].length > 0) {
|
||||||
|
@ -4191,7 +4183,7 @@ async function predict2(image15, config3, idx, count2) {
|
||||||
}
|
}
|
||||||
skipped = 0;
|
skipped = 0;
|
||||||
return new Promise(async (resolve) => {
|
return new Promise(async (resolve) => {
|
||||||
const resize = tf6.image.resizeBilinear(image15, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
|
const resize = tf6.image.resizeBilinear(image16, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
|
||||||
const [red, green, blue] = tf6.split(resize, 3, 3);
|
const [red, green, blue] = tf6.split(resize, 3, 3);
|
||||||
resize.dispose();
|
resize.dispose();
|
||||||
const redNorm = tf6.mul(red, rgb[0]);
|
const redNorm = tf6.mul(red, rgb[0]);
|
||||||
|
@ -4225,14 +4217,6 @@ async function predict2(image15, config3, idx, count2) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// src/faceres/faceres.ts
|
// src/faceres/faceres.ts
|
||||||
var faceres_exports = {};
|
|
||||||
__export(faceres_exports, {
|
|
||||||
enhance: () => enhance,
|
|
||||||
load: () => load4,
|
|
||||||
match: () => match,
|
|
||||||
predict: () => predict3,
|
|
||||||
similarity: () => similarity
|
|
||||||
});
|
|
||||||
var tf7 = __toModule(require_tfjs_esm());
|
var tf7 = __toModule(require_tfjs_esm());
|
||||||
var model2;
|
var model2;
|
||||||
var last2 = [];
|
var last2 = [];
|
||||||
|
@ -4275,7 +4259,7 @@ function match(embedding, db, threshold = 0) {
|
||||||
return best;
|
return best;
|
||||||
}
|
}
|
||||||
function enhance(input) {
|
function enhance(input) {
|
||||||
const image15 = tf7.tidy(() => {
|
const image16 = tf7.tidy(() => {
|
||||||
const tensor2 = input.image || input.tensor || input;
|
const tensor2 = input.image || input.tensor || input;
|
||||||
if (!(tensor2 instanceof tf7.Tensor))
|
if (!(tensor2 instanceof tf7.Tensor))
|
||||||
return null;
|
return null;
|
||||||
|
@ -4286,9 +4270,9 @@ function enhance(input) {
|
||||||
const norm = crop.mul(255);
|
const norm = crop.mul(255);
|
||||||
return norm;
|
return norm;
|
||||||
});
|
});
|
||||||
return image15;
|
return image16;
|
||||||
}
|
}
|
||||||
async function predict3(image15, config3, idx, count2) {
|
async function predict3(image16, config3, idx, count2) {
|
||||||
var _a, _b;
|
var _a, _b;
|
||||||
if (!model2)
|
if (!model2)
|
||||||
return null;
|
return null;
|
||||||
|
@ -4298,7 +4282,7 @@ async function predict3(image15, config3, idx, count2) {
|
||||||
}
|
}
|
||||||
skipped2 = 0;
|
skipped2 = 0;
|
||||||
return new Promise(async (resolve) => {
|
return new Promise(async (resolve) => {
|
||||||
const enhanced = enhance(image15);
|
const enhanced = enhance(image16);
|
||||||
let resT;
|
let resT;
|
||||||
const obj = {
|
const obj = {
|
||||||
age: 0,
|
age: 0,
|
||||||
|
@ -4513,11 +4497,6 @@ var detectFace = async (parent, input) => {
|
||||||
};
|
};
|
||||||
|
|
||||||
// src/posenet/posenet.ts
|
// src/posenet/posenet.ts
|
||||||
var posenet_exports = {};
|
|
||||||
__export(posenet_exports, {
|
|
||||||
load: () => load5,
|
|
||||||
predict: () => predict4
|
|
||||||
});
|
|
||||||
var tf9 = __toModule(require_tfjs_esm());
|
var tf9 = __toModule(require_tfjs_esm());
|
||||||
|
|
||||||
// src/posenet/keypoints.ts
|
// src/posenet/keypoints.ts
|
||||||
|
@ -4862,11 +4841,6 @@ async function load5(config3) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// src/handpose/handpose.ts
|
// src/handpose/handpose.ts
|
||||||
var handpose_exports = {};
|
|
||||||
__export(handpose_exports, {
|
|
||||||
load: () => load6,
|
|
||||||
predict: () => predict5
|
|
||||||
});
|
|
||||||
var tf13 = __toModule(require_tfjs_esm());
|
var tf13 = __toModule(require_tfjs_esm());
|
||||||
|
|
||||||
// src/handpose/handdetector.ts
|
// src/handpose/handdetector.ts
|
||||||
|
@ -4886,16 +4860,16 @@ function getBoxCenter2(box6) {
|
||||||
box6.startPoint[1] + (box6.endPoint[1] - box6.startPoint[1]) / 2
|
box6.startPoint[1] + (box6.endPoint[1] - box6.startPoint[1]) / 2
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
function cutBoxFromImageAndResize2(box6, image15, cropSize) {
|
function cutBoxFromImageAndResize2(box6, image16, cropSize) {
|
||||||
const h = image15.shape[1];
|
const h = image16.shape[1];
|
||||||
const w = image15.shape[2];
|
const w = image16.shape[2];
|
||||||
const boxes = [[
|
const boxes = [[
|
||||||
box6.startPoint[1] / h,
|
box6.startPoint[1] / h,
|
||||||
box6.startPoint[0] / w,
|
box6.startPoint[0] / w,
|
||||||
box6.endPoint[1] / h,
|
box6.endPoint[1] / h,
|
||||||
box6.endPoint[0] / w
|
box6.endPoint[0] / w
|
||||||
]];
|
]];
|
||||||
return tf10.image.cropAndResize(image15, boxes, [0], cropSize);
|
return tf10.image.cropAndResize(image16, boxes, [0], cropSize);
|
||||||
}
|
}
|
||||||
function scaleBoxCoordinates2(box6, factor) {
|
function scaleBoxCoordinates2(box6, factor) {
|
||||||
const startPoint = [box6.startPoint[0] * factor[0], box6.startPoint[1] * factor[1]];
|
const startPoint = [box6.startPoint[0] * factor[0], box6.startPoint[1] * factor[1]];
|
||||||
|
@ -7874,9 +7848,9 @@ var anchors = [
|
||||||
|
|
||||||
// src/handpose/handdetector.ts
|
// src/handpose/handdetector.ts
|
||||||
var HandDetector = class {
|
var HandDetector = class {
|
||||||
constructor(model9) {
|
constructor(model10) {
|
||||||
var _a;
|
var _a;
|
||||||
this.model = model9;
|
this.model = model10;
|
||||||
this.anchors = anchors.map((anchor) => [anchor.x, anchor.y]);
|
this.anchors = anchors.map((anchor) => [anchor.x, anchor.y]);
|
||||||
this.anchorsTensor = tf11.tensor2d(this.anchors);
|
this.anchorsTensor = tf11.tensor2d(this.anchors);
|
||||||
this.inputSize = (_a = this.model) == null ? void 0 : _a.inputs[0].shape[2];
|
this.inputSize = (_a = this.model) == null ? void 0 : _a.inputs[0].shape[2];
|
||||||
|
@ -7930,9 +7904,9 @@ var HandDetector = class {
|
||||||
async estimateHandBounds(input, config3) {
|
async estimateHandBounds(input, config3) {
|
||||||
const inputHeight = input.shape[1];
|
const inputHeight = input.shape[1];
|
||||||
const inputWidth = input.shape[2];
|
const inputWidth = input.shape[2];
|
||||||
const image15 = tf11.tidy(() => input.resizeBilinear([this.inputSize, this.inputSize]).div(127.5).sub(1));
|
const image16 = tf11.tidy(() => input.resizeBilinear([this.inputSize, this.inputSize]).div(127.5).sub(1));
|
||||||
const predictions = await this.getBoxes(image15, config3);
|
const predictions = await this.getBoxes(image16, config3);
|
||||||
image15.dispose();
|
image16.dispose();
|
||||||
const hands = [];
|
const hands = [];
|
||||||
if (!predictions || predictions.length === 0)
|
if (!predictions || predictions.length === 0)
|
||||||
return hands;
|
return hands;
|
||||||
|
@ -8077,11 +8051,11 @@ var HandPipeline = class {
|
||||||
Math.trunc(coord[2])
|
Math.trunc(coord[2])
|
||||||
]);
|
]);
|
||||||
}
|
}
|
||||||
async estimateHands(image15, config3) {
|
async estimateHands(image16, config3) {
|
||||||
let useFreshBox = false;
|
let useFreshBox = false;
|
||||||
let boxes;
|
let boxes;
|
||||||
if (this.skipped === 0 || this.skipped > config3.hand.skipFrames || !config3.hand.landmarks || !config3.skipFrame) {
|
if (this.skipped === 0 || this.skipped > config3.hand.skipFrames || !config3.hand.landmarks || !config3.skipFrame) {
|
||||||
boxes = await this.handDetector.estimateHandBounds(image15, config3);
|
boxes = await this.handDetector.estimateHandBounds(image16, config3);
|
||||||
this.skipped = 0;
|
this.skipped = 0;
|
||||||
}
|
}
|
||||||
if (config3.skipFrame)
|
if (config3.skipFrame)
|
||||||
|
@ -8100,8 +8074,8 @@ var HandPipeline = class {
|
||||||
if (config3.hand.landmarks) {
|
if (config3.hand.landmarks) {
|
||||||
const angle = config3.hand.rotation ? computeRotation2(currentBox.palmLandmarks[palmLandmarksPalmBase], currentBox.palmLandmarks[palmLandmarksMiddleFingerBase]) : 0;
|
const angle = config3.hand.rotation ? computeRotation2(currentBox.palmLandmarks[palmLandmarksPalmBase], currentBox.palmLandmarks[palmLandmarksMiddleFingerBase]) : 0;
|
||||||
const palmCenter = getBoxCenter2(currentBox);
|
const palmCenter = getBoxCenter2(currentBox);
|
||||||
const palmCenterNormalized = [palmCenter[0] / image15.shape[2], palmCenter[1] / image15.shape[1]];
|
const palmCenterNormalized = [palmCenter[0] / image16.shape[2], palmCenter[1] / image16.shape[1]];
|
||||||
const rotatedImage = config3.hand.rotation && tf12.ENV.flags.IS_BROWSER ? tf12.image.rotateWithOffset(image15, angle, 0, palmCenterNormalized) : image15.clone();
|
const rotatedImage = config3.hand.rotation && tf12.ENV.flags.IS_BROWSER ? tf12.image.rotateWithOffset(image16, angle, 0, palmCenterNormalized) : image16.clone();
|
||||||
const rotationMatrix = buildRotationMatrix2(-angle, palmCenter);
|
const rotationMatrix = buildRotationMatrix2(-angle, palmCenter);
|
||||||
const newBox = useFreshBox ? this.getBoxForPalmLandmarks(currentBox.palmLandmarks, rotationMatrix) : currentBox;
|
const newBox = useFreshBox ? this.getBoxForPalmLandmarks(currentBox.palmLandmarks, rotationMatrix) : currentBox;
|
||||||
const croppedInput = cutBoxFromImageAndResize2(newBox, rotatedImage, [this.inputSize, this.inputSize]);
|
const croppedInput = cutBoxFromImageAndResize2(newBox, rotatedImage, [this.inputSize, this.inputSize]);
|
||||||
|
@ -8232,11 +8206,6 @@ async function load6(config3) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// src/blazepose/blazepose.ts
|
// src/blazepose/blazepose.ts
|
||||||
var blazepose_exports = {};
|
|
||||||
__export(blazepose_exports, {
|
|
||||||
load: () => load7,
|
|
||||||
predict: () => predict6
|
|
||||||
});
|
|
||||||
var tf14 = __toModule(require_tfjs_esm());
|
var tf14 = __toModule(require_tfjs_esm());
|
||||||
|
|
||||||
// src/blazepose/annotations.ts
|
// src/blazepose/annotations.ts
|
||||||
|
@ -8330,14 +8299,14 @@ async function load7(config3) {
|
||||||
log("cached model:", model4["modelUrl"]);
|
log("cached model:", model4["modelUrl"]);
|
||||||
return model4;
|
return model4;
|
||||||
}
|
}
|
||||||
async function predict6(image15, config3) {
|
async function predict6(image16, config3) {
|
||||||
var _a;
|
var _a;
|
||||||
if (!model4)
|
if (!model4)
|
||||||
return [];
|
return [];
|
||||||
if (!config3.body.enabled)
|
if (!config3.body.enabled)
|
||||||
return [];
|
return [];
|
||||||
const imgSize = { width: image15.shape[2] || 0, height: image15.shape[1] || 0 };
|
const imgSize = { width: image16.shape[2] || 0, height: image16.shape[1] || 0 };
|
||||||
const resize = tf14.image.resizeBilinear(image15, [model4["width"], model4["height"]], false);
|
const resize = tf14.image.resizeBilinear(image16, [model4["width"], model4["height"]], false);
|
||||||
const normalize = tf14.div(resize, [255]);
|
const normalize = tf14.div(resize, [255]);
|
||||||
resize.dispose();
|
resize.dispose();
|
||||||
const resT = await model4.predict(normalize);
|
const resT = await model4.predict(normalize);
|
||||||
|
@ -8413,7 +8382,7 @@ function max2d(inputs, minScore) {
|
||||||
return [0, 0, newScore];
|
return [0, 0, newScore];
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
async function predict7(image15, config3) {
|
async function predict7(image16, config3) {
|
||||||
if (skipped3 < config3.body.skipFrames && config3.skipFrame && Object.keys(keypoints).length > 0) {
|
if (skipped3 < config3.body.skipFrames && config3.skipFrame && Object.keys(keypoints).length > 0) {
|
||||||
skipped3++;
|
skipped3++;
|
||||||
return [{ id: 0, score, box: box4, boxRaw, keypoints }];
|
return [{ id: 0, score, box: box4, boxRaw, keypoints }];
|
||||||
|
@ -8423,7 +8392,7 @@ async function predict7(image15, config3) {
|
||||||
const tensor2 = tf15.tidy(() => {
|
const tensor2 = tf15.tidy(() => {
|
||||||
if (!model5.inputs[0].shape)
|
if (!model5.inputs[0].shape)
|
||||||
return null;
|
return null;
|
||||||
const resize = tf15.image.resizeBilinear(image15, [model5.inputs[0].shape[2], model5.inputs[0].shape[1]], false);
|
const resize = tf15.image.resizeBilinear(image16, [model5.inputs[0].shape[2], model5.inputs[0].shape[1]], false);
|
||||||
const enhance2 = tf15.mul(resize, 2);
|
const enhance2 = tf15.mul(resize, 2);
|
||||||
const norm = enhance2.sub(1);
|
const norm = enhance2.sub(1);
|
||||||
return norm;
|
return norm;
|
||||||
|
@ -8434,10 +8403,10 @@ async function predict7(image15, config3) {
|
||||||
tensor2.dispose();
|
tensor2.dispose();
|
||||||
if (resT) {
|
if (resT) {
|
||||||
keypoints.length = 0;
|
keypoints.length = 0;
|
||||||
const squeeze3 = resT.squeeze();
|
const squeeze4 = resT.squeeze();
|
||||||
tf15.dispose(resT);
|
tf15.dispose(resT);
|
||||||
const stack2 = squeeze3.unstack(2);
|
const stack2 = squeeze4.unstack(2);
|
||||||
tf15.dispose(squeeze3);
|
tf15.dispose(squeeze4);
|
||||||
for (let id = 0; id < stack2.length; id++) {
|
for (let id = 0; id < stack2.length; id++) {
|
||||||
const [x2, y2, partScore] = max2d(stack2[id], config3.body.minConfidence);
|
const [x2, y2, partScore] = max2d(stack2[id], config3.body.minConfidence);
|
||||||
if (score > config3.body.minConfidence) {
|
if (score > config3.body.minConfidence) {
|
||||||
|
@ -8449,8 +8418,8 @@ async function predict7(image15, config3) {
|
||||||
y2 / model5.inputs[0].shape[1]
|
y2 / model5.inputs[0].shape[1]
|
||||||
],
|
],
|
||||||
position: [
|
position: [
|
||||||
Math.round(image15.shape[2] * x2 / model5.inputs[0].shape[2]),
|
Math.round(image16.shape[2] * x2 / model5.inputs[0].shape[2]),
|
||||||
Math.round(image15.shape[1] * y2 / model5.inputs[0].shape[1])
|
Math.round(image16.shape[1] * y2 / model5.inputs[0].shape[1])
|
||||||
]
|
]
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -8498,7 +8467,7 @@ async function load9(config3) {
|
||||||
log("cached model:", model6["modelUrl"]);
|
log("cached model:", model6["modelUrl"]);
|
||||||
return model6;
|
return model6;
|
||||||
}
|
}
|
||||||
async function predict8(image15, config3) {
|
async function predict8(image16, config3) {
|
||||||
if (skipped4 < config3.body.skipFrames && config3.skipFrame && Object.keys(keypoints2).length > 0) {
|
if (skipped4 < config3.body.skipFrames && config3.skipFrame && Object.keys(keypoints2).length > 0) {
|
||||||
skipped4++;
|
skipped4++;
|
||||||
return [{ id: 0, score: score2, box: box5, boxRaw: boxRaw2, keypoints: keypoints2 }];
|
return [{ id: 0, score: score2, box: box5, boxRaw: boxRaw2, keypoints: keypoints2 }];
|
||||||
|
@ -8508,7 +8477,7 @@ async function predict8(image15, config3) {
|
||||||
const tensor2 = tf16.tidy(() => {
|
const tensor2 = tf16.tidy(() => {
|
||||||
if (!model6.inputs[0].shape)
|
if (!model6.inputs[0].shape)
|
||||||
return null;
|
return null;
|
||||||
const resize = tf16.image.resizeBilinear(image15, [model6.inputs[0].shape[2], model6.inputs[0].shape[1]], false);
|
const resize = tf16.image.resizeBilinear(image16, [model6.inputs[0].shape[2], model6.inputs[0].shape[1]], false);
|
||||||
const cast2 = tf16.cast(resize, "int32");
|
const cast2 = tf16.cast(resize, "int32");
|
||||||
return cast2;
|
return cast2;
|
||||||
});
|
});
|
||||||
|
@ -8532,8 +8501,8 @@ async function predict8(image15, config3) {
|
||||||
kpt3[id][0]
|
kpt3[id][0]
|
||||||
],
|
],
|
||||||
position: [
|
position: [
|
||||||
Math.round((image15.shape[2] || 0) * kpt3[id][1]),
|
Math.round((image16.shape[2] || 0) * kpt3[id][1]),
|
||||||
Math.round((image15.shape[1] || 0) * kpt3[id][0])
|
Math.round((image16.shape[1] || 0) * kpt3[id][0])
|
||||||
]
|
]
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -8561,11 +8530,6 @@ async function predict8(image15, config3) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// src/object/nanodet.ts
|
// src/object/nanodet.ts
|
||||||
var nanodet_exports = {};
|
|
||||||
__export(nanodet_exports, {
|
|
||||||
load: () => load10,
|
|
||||||
predict: () => predict9
|
|
||||||
});
|
|
||||||
var tf17 = __toModule(require_tfjs_esm());
|
var tf17 = __toModule(require_tfjs_esm());
|
||||||
|
|
||||||
// src/object/labels.ts
|
// src/object/labels.ts
|
||||||
|
@ -8733,15 +8697,15 @@ async function process2(res, inputSize, outputShape, config3) {
|
||||||
results = results.filter((a, idx) => nmsIdx.includes(idx)).sort((a, b) => b.score - a.score);
|
results = results.filter((a, idx) => nmsIdx.includes(idx)).sort((a, b) => b.score - a.score);
|
||||||
return results;
|
return results;
|
||||||
}
|
}
|
||||||
async function predict9(image15, config3) {
|
async function predict9(image16, config3) {
|
||||||
if (skipped5 < config3.object.skipFrames && config3.skipFrame && last3.length > 0) {
|
if (skipped5 < config3.object.skipFrames && config3.skipFrame && last3.length > 0) {
|
||||||
skipped5++;
|
skipped5++;
|
||||||
return last3;
|
return last3;
|
||||||
}
|
}
|
||||||
skipped5 = 0;
|
skipped5 = 0;
|
||||||
return new Promise(async (resolve) => {
|
return new Promise(async (resolve) => {
|
||||||
const outputSize = [image15.shape[2], image15.shape[1]];
|
const outputSize = [image16.shape[2], image16.shape[1]];
|
||||||
const resize = tf17.image.resizeBilinear(image15, [model7.inputSize, model7.inputSize], false);
|
const resize = tf17.image.resizeBilinear(image16, [model7.inputSize, model7.inputSize], false);
|
||||||
const norm = resize.div(255);
|
const norm = resize.div(255);
|
||||||
const transpose = norm.transpose([0, 3, 1, 2]);
|
const transpose = norm.transpose([0, 3, 1, 2]);
|
||||||
norm.dispose();
|
norm.dispose();
|
||||||
|
@ -8757,11 +8721,6 @@ async function predict9(image15, config3) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// src/object/centernet.ts
|
// src/object/centernet.ts
|
||||||
var centernet_exports = {};
|
|
||||||
__export(centernet_exports, {
|
|
||||||
load: () => load11,
|
|
||||||
predict: () => predict10
|
|
||||||
});
|
|
||||||
var tf18 = __toModule(require_tfjs_esm());
|
var tf18 = __toModule(require_tfjs_esm());
|
||||||
var model8;
|
var model8;
|
||||||
var last4 = [];
|
var last4 = [];
|
||||||
|
@ -9100,8 +9059,8 @@ function GLImageFilter(params) {
|
||||||
gl.uniform1f(_currentProgram.uniform.flipY, flipY ? -1 : 1);
|
gl.uniform1f(_currentProgram.uniform.flipY, flipY ? -1 : 1);
|
||||||
gl.drawArrays(gl.TRIANGLES, 0, 6);
|
gl.drawArrays(gl.TRIANGLES, 0, 6);
|
||||||
};
|
};
|
||||||
this.apply = function(image15) {
|
this.apply = function(image16) {
|
||||||
_resize(image15.width, image15.height);
|
_resize(image16.width, image16.height);
|
||||||
_drawCount = 0;
|
_drawCount = 0;
|
||||||
if (!_sourceTexture)
|
if (!_sourceTexture)
|
||||||
_sourceTexture = gl.createTexture();
|
_sourceTexture = gl.createTexture();
|
||||||
|
@ -9110,7 +9069,7 @@ function GLImageFilter(params) {
|
||||||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
|
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
|
||||||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
|
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
|
||||||
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
|
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
|
||||||
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, image15);
|
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, image16);
|
||||||
if (_filterChain.length === 0) {
|
if (_filterChain.length === 0) {
|
||||||
_draw();
|
_draw();
|
||||||
return _canvas;
|
return _canvas;
|
||||||
|
@ -9768,14 +9727,14 @@ function process4(input, config3) {
|
||||||
const shape = [outCanvas.height, outCanvas.width, 3];
|
const shape = [outCanvas.height, outCanvas.width, 3];
|
||||||
pixels = tf19.tensor3d(outCanvas.data, shape, "int32");
|
pixels = tf19.tensor3d(outCanvas.data, shape, "int32");
|
||||||
} else if (outCanvas instanceof ImageData) {
|
} else if (outCanvas instanceof ImageData) {
|
||||||
pixels = tf19.browser.fromPixels(outCanvas);
|
pixels = tf19.browser ? tf19.browser.fromPixels(outCanvas) : null;
|
||||||
} else if (config3.backend === "webgl" || config3.backend === "humangl") {
|
} else if (config3.backend === "webgl" || config3.backend === "humangl") {
|
||||||
const tempCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement("canvas");
|
const tempCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement("canvas");
|
||||||
tempCanvas.width = targetWidth;
|
tempCanvas.width = targetWidth;
|
||||||
tempCanvas.height = targetHeight;
|
tempCanvas.height = targetHeight;
|
||||||
const tempCtx = tempCanvas.getContext("2d");
|
const tempCtx = tempCanvas.getContext("2d");
|
||||||
tempCtx == null ? void 0 : tempCtx.drawImage(outCanvas, 0, 0);
|
tempCtx == null ? void 0 : tempCtx.drawImage(outCanvas, 0, 0);
|
||||||
pixels = tf19.browser.fromPixels(tempCanvas);
|
pixels = tf19.browser ? tf19.browser.fromPixels(tempCanvas) : null;
|
||||||
} else {
|
} else {
|
||||||
const tempCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement("canvas");
|
const tempCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement("canvas");
|
||||||
tempCanvas.width = targetWidth;
|
tempCanvas.width = targetWidth;
|
||||||
|
@ -9783,13 +9742,15 @@ function process4(input, config3) {
|
||||||
const tempCtx = tempCanvas.getContext("2d");
|
const tempCtx = tempCanvas.getContext("2d");
|
||||||
tempCtx == null ? void 0 : tempCtx.drawImage(outCanvas, 0, 0);
|
tempCtx == null ? void 0 : tempCtx.drawImage(outCanvas, 0, 0);
|
||||||
const data = tempCtx == null ? void 0 : tempCtx.getImageData(0, 0, targetWidth, targetHeight);
|
const data = tempCtx == null ? void 0 : tempCtx.getImageData(0, 0, targetWidth, targetHeight);
|
||||||
pixels = tf19.browser.fromPixels(data);
|
pixels = tf19.browser ? tf19.browser.fromPixels(data) : null;
|
||||||
}
|
}
|
||||||
|
if (pixels) {
|
||||||
const casted = pixels.toFloat();
|
const casted = pixels.toFloat();
|
||||||
tensor2 = casted.expandDims(0);
|
tensor2 = casted.expandDims(0);
|
||||||
pixels.dispose();
|
pixels.dispose();
|
||||||
casted.dispose();
|
casted.dispose();
|
||||||
}
|
}
|
||||||
|
}
|
||||||
const canvas2 = config3.filter.return ? outCanvas : null;
|
const canvas2 = config3.filter.return ? outCanvas : null;
|
||||||
return { tensor: tensor2, canvas: canvas2 };
|
return { tensor: tensor2, canvas: canvas2 };
|
||||||
}
|
}
|
||||||
|
@ -9945,10 +9906,10 @@ async function face2(inCanvas2, result, drawOptions) {
|
||||||
if (f.iris)
|
if (f.iris)
|
||||||
labels2.push(`distance: ${f.iris}`);
|
labels2.push(`distance: ${f.iris}`);
|
||||||
if (f.emotion && f.emotion.length > 0) {
|
if (f.emotion && f.emotion.length > 0) {
|
||||||
const emotion2 = f.emotion.map((a) => `${Math.trunc(100 * a.score)}% ${a.emotion}`);
|
const emotion3 = f.emotion.map((a) => `${Math.trunc(100 * a.score)}% ${a.emotion}`);
|
||||||
if (emotion2.length > 3)
|
if (emotion3.length > 3)
|
||||||
emotion2.length = 3;
|
emotion3.length = 3;
|
||||||
labels2.push(emotion2.join(" "));
|
labels2.push(emotion3.join(" "));
|
||||||
}
|
}
|
||||||
if (f.rotation && f.rotation.angle && f.rotation.gaze) {
|
if (f.rotation && f.rotation.angle && f.rotation.gaze) {
|
||||||
if (f.rotation.angle.roll)
|
if (f.rotation.angle.roll)
|
||||||
|
@ -10376,6 +10337,7 @@ function calc(newResult) {
|
||||||
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u;
|
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u;
|
||||||
const elapsed = Date.now() - newResult.timestamp;
|
const elapsed = Date.now() - newResult.timestamp;
|
||||||
const bufferedFactor = elapsed < 1e3 ? 8 - Math.log(elapsed) : 1;
|
const bufferedFactor = elapsed < 1e3 ? 8 - Math.log(elapsed) : 1;
|
||||||
|
bufferedResult.canvas = newResult.canvas;
|
||||||
if (!bufferedResult.body || newResult.body.length !== bufferedResult.body.length) {
|
if (!bufferedResult.body || newResult.body.length !== bufferedResult.body.length) {
|
||||||
bufferedResult.body = JSON.parse(JSON.stringify(newResult.body));
|
bufferedResult.body = JSON.parse(JSON.stringify(newResult.body));
|
||||||
} else {
|
} else {
|
||||||
|
@ -10454,6 +10416,60 @@ function calc(newResult) {
|
||||||
return bufferedResult;
|
return bufferedResult;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// src/segmentation/segmentation.ts
|
||||||
|
var tf20 = __toModule(require_tfjs_esm());
|
||||||
|
var model9;
|
||||||
|
async function load12(config3) {
|
||||||
|
if (!model9) {
|
||||||
|
model9 = await tf20.loadGraphModel(join(config3.modelBasePath, config3.segmentation.modelPath));
|
||||||
|
if (!model9 || !model9["modelUrl"])
|
||||||
|
log("load model failed:", config3.segmentation.modelPath);
|
||||||
|
else if (config3.debug)
|
||||||
|
log("load model:", model9["modelUrl"]);
|
||||||
|
} else if (config3.debug)
|
||||||
|
log("cached model:", model9["modelUrl"]);
|
||||||
|
return model9;
|
||||||
|
}
|
||||||
|
async function predict11(input, config3) {
|
||||||
|
var _a, _b, _c, _d;
|
||||||
|
if (!config3.segmentation.enabled || !input.tensor || !input.canvas)
|
||||||
|
return false;
|
||||||
|
if (!model9 || !model9.inputs[0].shape)
|
||||||
|
return false;
|
||||||
|
const resizeInput = tf20.image.resizeBilinear(input.tensor, [model9.inputs[0].shape[1], model9.inputs[0].shape[2]], false);
|
||||||
|
const norm = resizeInput.div(255);
|
||||||
|
const res = model9.predict(norm);
|
||||||
|
tf20.dispose(resizeInput);
|
||||||
|
tf20.dispose(norm);
|
||||||
|
const overlay = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(input.canvas.width, input.canvas.height) : document.createElement("canvas");
|
||||||
|
overlay.width = input.canvas.width;
|
||||||
|
overlay.height = input.canvas.height;
|
||||||
|
const squeeze4 = tf20.squeeze(res, 0);
|
||||||
|
let resizeOutput;
|
||||||
|
if (squeeze4.shape[2] === 2) {
|
||||||
|
const softmax = squeeze4.softmax();
|
||||||
|
const [bg, fg] = tf20.unstack(softmax, 2);
|
||||||
|
tf20.dispose(softmax);
|
||||||
|
const expand = fg.expandDims(2);
|
||||||
|
tf20.dispose(bg);
|
||||||
|
tf20.dispose(fg);
|
||||||
|
resizeOutput = tf20.image.resizeBilinear(expand, [(_a = input.tensor) == null ? void 0 : _a.shape[1], (_b = input.tensor) == null ? void 0 : _b.shape[2]]);
|
||||||
|
tf20.dispose(expand);
|
||||||
|
} else {
|
||||||
|
resizeOutput = tf20.image.resizeBilinear(squeeze4, [(_c = input.tensor) == null ? void 0 : _c.shape[1], (_d = input.tensor) == null ? void 0 : _d.shape[2]]);
|
||||||
|
}
|
||||||
|
if (tf20.browser)
|
||||||
|
await tf20.browser.toPixels(resizeOutput, overlay);
|
||||||
|
tf20.dispose(resizeOutput);
|
||||||
|
tf20.dispose(squeeze4);
|
||||||
|
tf20.dispose(res);
|
||||||
|
const ctx = input.canvas.getContext("2d");
|
||||||
|
ctx.globalCompositeOperation = "darken";
|
||||||
|
await (ctx == null ? void 0 : ctx.drawImage(overlay, 0, 0));
|
||||||
|
ctx.globalCompositeOperation = "source-in";
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
// src/sample.ts
|
// src/sample.ts
|
||||||
var face3 = `
|
var face3 = `
|
||||||
/9j/4AAQSkZJRgABAQEAYABgAAD/4QBoRXhpZgAATU0AKgAAAAgABAEaAAUAAAABAAAAPgEbAAUA
|
/9j/4AAQSkZJRgABAQEAYABgAAD/4QBoRXhpZgAATU0AKgAAAAgABAEaAAUAAAABAAAAPgEbAAUA
|
||||||
|
@ -11205,7 +11221,7 @@ var Human = class {
|
||||||
return null;
|
return null;
|
||||||
if (!input)
|
if (!input)
|
||||||
return "input is not defined";
|
return "input is not defined";
|
||||||
if (this.tf.ENV.flags.IS_NODE && !(input instanceof tf20.Tensor))
|
if (this.tf.ENV.flags.IS_NODE && !(input instanceof tf21.Tensor))
|
||||||
return "input must be a tensor";
|
return "input must be a tensor";
|
||||||
try {
|
try {
|
||||||
this.tf.getBackend();
|
this.tf.getBackend();
|
||||||
|
@ -11348,8 +11364,8 @@ var Human = class {
|
||||||
if (!img)
|
if (!img)
|
||||||
return null;
|
return null;
|
||||||
let res;
|
let res;
|
||||||
if (typeof tf20["node"] !== "undefined") {
|
if (typeof tf21["node"] !== "undefined") {
|
||||||
const data = tf20["node"].decodeJpeg(img);
|
const data = tf21["node"].decodeJpeg(img);
|
||||||
const expanded = data.expandDims(0);
|
const expanded = data.expandDims(0);
|
||||||
this.tf.dispose(data);
|
this.tf.dispose(data);
|
||||||
res = await this.detect(expanded, this.config);
|
res = await this.detect(expanded, this.config);
|
||||||
|
@ -11361,7 +11377,7 @@ var Human = class {
|
||||||
return res;
|
return res;
|
||||||
});
|
});
|
||||||
this.config = mergeDeep(config, userConfig || {});
|
this.config = mergeDeep(config, userConfig || {});
|
||||||
this.tf = tf20;
|
this.tf = tf21;
|
||||||
this.draw = draw_exports;
|
this.draw = draw_exports;
|
||||||
this.version = version;
|
this.version = version;
|
||||||
this.state = "idle";
|
this.state = "idle";
|
||||||
|
@ -11384,18 +11400,10 @@ var Human = class {
|
||||||
embedding: null,
|
embedding: null,
|
||||||
nanodet: null,
|
nanodet: null,
|
||||||
centernet: null,
|
centernet: null,
|
||||||
faceres: null
|
faceres: null,
|
||||||
|
segmentation: null
|
||||||
};
|
};
|
||||||
this.image = (input) => process4(input, this.config);
|
this.image = (input) => process4(input, this.config);
|
||||||
this.classes = {
|
|
||||||
facemesh: facemesh_exports,
|
|
||||||
emotion: emotion_exports,
|
|
||||||
faceres: faceres_exports,
|
|
||||||
body: this.config.body.modelPath.includes("posenet") ? posenet_exports : blazepose_exports,
|
|
||||||
hand: handpose_exports,
|
|
||||||
nanodet: nanodet_exports,
|
|
||||||
centernet: centernet_exports
|
|
||||||
};
|
|
||||||
this.faceTriangulation = triangulation;
|
this.faceTriangulation = triangulation;
|
||||||
this.faceUVMap = uvmap;
|
this.faceUVMap = uvmap;
|
||||||
this.sysinfo = info();
|
this.sysinfo = info();
|
||||||
|
@ -11443,7 +11451,8 @@ var Human = class {
|
||||||
this.models.movenet,
|
this.models.movenet,
|
||||||
this.models.nanodet,
|
this.models.nanodet,
|
||||||
this.models.centernet,
|
this.models.centernet,
|
||||||
this.models.faceres
|
this.models.faceres,
|
||||||
|
this.models.segmentation
|
||||||
] = await Promise.all([
|
] = await Promise.all([
|
||||||
this.models.face || (this.config.face.enabled ? load2(this.config) : null),
|
this.models.face || (this.config.face.enabled ? load2(this.config) : null),
|
||||||
this.models.emotion || (this.config.face.enabled && this.config.face.emotion.enabled ? load3(this.config) : null),
|
this.models.emotion || (this.config.face.enabled && this.config.face.emotion.enabled ? load3(this.config) : null),
|
||||||
|
@ -11454,7 +11463,8 @@ var Human = class {
|
||||||
this.models.movenet || (this.config.body.enabled && this.config.body.modelPath.includes("movenet") ? load9(this.config) : null),
|
this.models.movenet || (this.config.body.enabled && this.config.body.modelPath.includes("movenet") ? load9(this.config) : null),
|
||||||
this.models.nanodet || (this.config.object.enabled && this.config.object.modelPath.includes("nanodet") ? load10(this.config) : null),
|
this.models.nanodet || (this.config.object.enabled && this.config.object.modelPath.includes("nanodet") ? load10(this.config) : null),
|
||||||
this.models.centernet || (this.config.object.enabled && this.config.object.modelPath.includes("centernet") ? load11(this.config) : null),
|
this.models.centernet || (this.config.object.enabled && this.config.object.modelPath.includes("centernet") ? load11(this.config) : null),
|
||||||
this.models.faceres || (this.config.face.enabled && this.config.face.description.enabled ? load4(this.config) : null)
|
this.models.faceres || (this.config.face.enabled && this.config.face.description.enabled ? load4(this.config) : null),
|
||||||
|
this.models.segmentation || (this.config.segmentation.enabled ? load12(this.config) : null)
|
||||||
]);
|
]);
|
||||||
} else {
|
} else {
|
||||||
if (this.config.face.enabled && !this.models.face)
|
if (this.config.face.enabled && !this.models.face)
|
||||||
|
@ -11477,6 +11487,8 @@ var Human = class {
|
||||||
this.models.centernet = await load11(this.config);
|
this.models.centernet = await load11(this.config);
|
||||||
if (this.config.face.enabled && this.config.face.description.enabled && !this.models.faceres)
|
if (this.config.face.enabled && this.config.face.description.enabled && !this.models.faceres)
|
||||||
this.models.faceres = await load4(this.config);
|
this.models.faceres = await load4(this.config);
|
||||||
|
if (this.config.segmentation.enabled && !this.models.segmentation)
|
||||||
|
this.models.segmentation = await load12(this.config);
|
||||||
}
|
}
|
||||||
if (__privateGet(this, _firstRun)) {
|
if (__privateGet(this, _firstRun)) {
|
||||||
if (this.config.debug)
|
if (this.config.debug)
|
||||||
|
@ -11611,6 +11623,16 @@ var Human = class {
|
||||||
else if (this.performance.gesture)
|
else if (this.performance.gesture)
|
||||||
delete this.performance.gesture;
|
delete this.performance.gesture;
|
||||||
}
|
}
|
||||||
|
if (this.config.segmentation.enabled) {
|
||||||
|
this.analyze("Start Segmentation:");
|
||||||
|
this.state = "run:segmentation";
|
||||||
|
timeStamp = now();
|
||||||
|
await predict11(process5, this.config);
|
||||||
|
elapsedTime = Math.trunc(now() - timeStamp);
|
||||||
|
if (elapsedTime > 0)
|
||||||
|
this.performance.segmentation = elapsedTime;
|
||||||
|
this.analyze("End Segmentation:");
|
||||||
|
}
|
||||||
this.performance.total = Math.trunc(now() - timeStart);
|
this.performance.total = Math.trunc(now() - timeStart);
|
||||||
this.state = "idle";
|
this.state = "idle";
|
||||||
this.result = {
|
this.result = {
|
||||||
|
@ -11627,7 +11649,7 @@ var Human = class {
|
||||||
return join2(faceRes, bodyRes, handRes, gestureRes, (_a = process5 == null ? void 0 : process5.tensor) == null ? void 0 : _a.shape);
|
return join2(faceRes, bodyRes, handRes, gestureRes, (_a = process5 == null ? void 0 : process5.tensor) == null ? void 0 : _a.shape);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
tf20.dispose(process5.tensor);
|
tf21.dispose(process5.tensor);
|
||||||
resolve(this.result);
|
resolve(this.result);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
File diff suppressed because one or more lines are too long
|
@ -196,6 +196,15 @@ export interface Config {
|
||||||
maxDetected: number,
|
maxDetected: number,
|
||||||
skipFrames: number,
|
skipFrames: number,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
/** Controlls and configures all body segmentation module
|
||||||
|
* - enabled: true/false
|
||||||
|
* - modelPath: object detection model, can be absolute path or relative to modelBasePath
|
||||||
|
*/
|
||||||
|
segmentation: {
|
||||||
|
enabled: boolean,
|
||||||
|
modelPath: string,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
const config: Config = {
|
const config: Config = {
|
||||||
|
@ -338,5 +347,11 @@ const config: Config = {
|
||||||
skipFrames: 19, // how many max frames to go without re-running the detector
|
skipFrames: 19, // how many max frames to go without re-running the detector
|
||||||
// only used when cacheSensitivity is not zero
|
// only used when cacheSensitivity is not zero
|
||||||
},
|
},
|
||||||
|
|
||||||
|
segmentation: {
|
||||||
|
enabled: false,
|
||||||
|
modelPath: 'selfie.json', // experimental: object detection model, can be absolute path or relative to modelBasePath
|
||||||
|
// can be 'selfie' or 'meet'
|
||||||
|
},
|
||||||
};
|
};
|
||||||
export { config as defaults };
|
export { config as defaults };
|
||||||
|
|
38
src/human.ts
38
src/human.ts
|
@ -24,6 +24,7 @@ import * as image from './image/image';
|
||||||
import * as draw from './draw/draw';
|
import * as draw from './draw/draw';
|
||||||
import * as persons from './persons';
|
import * as persons from './persons';
|
||||||
import * as interpolate from './interpolate';
|
import * as interpolate from './interpolate';
|
||||||
|
import * as segmentation from './segmentation/segmentation';
|
||||||
import * as sample from './sample';
|
import * as sample from './sample';
|
||||||
import * as app from '../package.json';
|
import * as app from '../package.json';
|
||||||
import { Tensor } from './tfjs/types';
|
import { Tensor } from './tfjs/types';
|
||||||
|
@ -114,16 +115,7 @@ export class Human {
|
||||||
nanodet: Model | null,
|
nanodet: Model | null,
|
||||||
centernet: Model | null,
|
centernet: Model | null,
|
||||||
faceres: Model | null,
|
faceres: Model | null,
|
||||||
};
|
segmentation: Model | null,
|
||||||
/** @internal: Currently loaded classes */
|
|
||||||
classes: {
|
|
||||||
facemesh: typeof facemesh;
|
|
||||||
emotion: typeof emotion;
|
|
||||||
body: typeof posenet | typeof blazepose | typeof movenet;
|
|
||||||
hand: typeof handpose;
|
|
||||||
nanodet: typeof nanodet;
|
|
||||||
centernet: typeof centernet;
|
|
||||||
faceres: typeof faceres;
|
|
||||||
};
|
};
|
||||||
/** Reference face triangualtion array of 468 points, used for triangle references between points */
|
/** Reference face triangualtion array of 468 points, used for triangle references between points */
|
||||||
faceTriangulation: typeof facemesh.triangulation;
|
faceTriangulation: typeof facemesh.triangulation;
|
||||||
|
@ -173,20 +165,12 @@ export class Human {
|
||||||
nanodet: null,
|
nanodet: null,
|
||||||
centernet: null,
|
centernet: null,
|
||||||
faceres: null,
|
faceres: null,
|
||||||
|
segmentation: null,
|
||||||
};
|
};
|
||||||
// export access to image processing
|
// export access to image processing
|
||||||
// @ts-ignore eslint-typescript cannot correctly infer type in anonymous function
|
// @ts-ignore eslint-typescript cannot correctly infer type in anonymous function
|
||||||
this.image = (input: Input) => image.process(input, this.config);
|
this.image = (input: Input) => image.process(input, this.config);
|
||||||
// export raw access to underlying models
|
// export raw access to underlying models
|
||||||
this.classes = {
|
|
||||||
facemesh,
|
|
||||||
emotion,
|
|
||||||
faceres,
|
|
||||||
body: this.config.body.modelPath.includes('posenet') ? posenet : blazepose,
|
|
||||||
hand: handpose,
|
|
||||||
nanodet,
|
|
||||||
centernet,
|
|
||||||
};
|
|
||||||
this.faceTriangulation = facemesh.triangulation;
|
this.faceTriangulation = facemesh.triangulation;
|
||||||
this.faceUVMap = facemesh.uvmap;
|
this.faceUVMap = facemesh.uvmap;
|
||||||
// include platform info
|
// include platform info
|
||||||
|
@ -274,8 +258,10 @@ export class Human {
|
||||||
}
|
}
|
||||||
if (this.config.async) { // load models concurrently
|
if (this.config.async) { // load models concurrently
|
||||||
[
|
[
|
||||||
|
// @ts-ignore async model loading is not correctly inferred
|
||||||
this.models.face,
|
this.models.face,
|
||||||
this.models.emotion,
|
this.models.emotion,
|
||||||
|
// @ts-ignore async model loading is not correctly inferred
|
||||||
this.models.handpose,
|
this.models.handpose,
|
||||||
this.models.posenet,
|
this.models.posenet,
|
||||||
this.models.blazepose,
|
this.models.blazepose,
|
||||||
|
@ -284,6 +270,7 @@ export class Human {
|
||||||
this.models.nanodet,
|
this.models.nanodet,
|
||||||
this.models.centernet,
|
this.models.centernet,
|
||||||
this.models.faceres,
|
this.models.faceres,
|
||||||
|
this.models.segmentation,
|
||||||
] = await Promise.all([
|
] = await Promise.all([
|
||||||
this.models.face || (this.config.face.enabled ? facemesh.load(this.config) : null),
|
this.models.face || (this.config.face.enabled ? facemesh.load(this.config) : null),
|
||||||
this.models.emotion || ((this.config.face.enabled && this.config.face.emotion.enabled) ? emotion.load(this.config) : null),
|
this.models.emotion || ((this.config.face.enabled && this.config.face.emotion.enabled) ? emotion.load(this.config) : null),
|
||||||
|
@ -295,6 +282,7 @@ export class Human {
|
||||||
this.models.nanodet || (this.config.object.enabled && this.config.object.modelPath.includes('nanodet') ? nanodet.load(this.config) : null),
|
this.models.nanodet || (this.config.object.enabled && this.config.object.modelPath.includes('nanodet') ? nanodet.load(this.config) : null),
|
||||||
this.models.centernet || (this.config.object.enabled && this.config.object.modelPath.includes('centernet') ? centernet.load(this.config) : null),
|
this.models.centernet || (this.config.object.enabled && this.config.object.modelPath.includes('centernet') ? centernet.load(this.config) : null),
|
||||||
this.models.faceres || ((this.config.face.enabled && this.config.face.description.enabled) ? faceres.load(this.config) : null),
|
this.models.faceres || ((this.config.face.enabled && this.config.face.description.enabled) ? faceres.load(this.config) : null),
|
||||||
|
this.models.segmentation || (this.config.segmentation.enabled ? segmentation.load(this.config) : null),
|
||||||
]);
|
]);
|
||||||
} else { // load models sequentially
|
} else { // load models sequentially
|
||||||
if (this.config.face.enabled && !this.models.face) this.models.face = await facemesh.load(this.config);
|
if (this.config.face.enabled && !this.models.face) this.models.face = await facemesh.load(this.config);
|
||||||
|
@ -307,6 +295,7 @@ export class Human {
|
||||||
if (this.config.object.enabled && !this.models.nanodet && this.config.object.modelPath.includes('nanodet')) this.models.nanodet = await nanodet.load(this.config);
|
if (this.config.object.enabled && !this.models.nanodet && this.config.object.modelPath.includes('nanodet')) this.models.nanodet = await nanodet.load(this.config);
|
||||||
if (this.config.object.enabled && !this.models.centernet && this.config.object.modelPath.includes('centernet')) this.models.centernet = await centernet.load(this.config);
|
if (this.config.object.enabled && !this.models.centernet && this.config.object.modelPath.includes('centernet')) this.models.centernet = await centernet.load(this.config);
|
||||||
if (this.config.face.enabled && this.config.face.description.enabled && !this.models.faceres) this.models.faceres = await faceres.load(this.config);
|
if (this.config.face.enabled && this.config.face.description.enabled && !this.models.faceres) this.models.faceres = await faceres.load(this.config);
|
||||||
|
if (this.config.segmentation.enabled && !this.models.segmentation) this.models.segmentation = await segmentation.load(this.config);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (this.#firstRun) { // print memory stats on first run
|
if (this.#firstRun) { // print memory stats on first run
|
||||||
|
@ -568,6 +557,17 @@ export class Human {
|
||||||
else if (this.performance.gesture) delete this.performance.gesture;
|
else if (this.performance.gesture) delete this.performance.gesture;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// run segmentation
|
||||||
|
if (this.config.segmentation.enabled) {
|
||||||
|
this.analyze('Start Segmentation:');
|
||||||
|
this.state = 'run:segmentation';
|
||||||
|
timeStamp = now();
|
||||||
|
await segmentation.predict(process, this.config);
|
||||||
|
elapsedTime = Math.trunc(now() - timeStamp);
|
||||||
|
if (elapsedTime > 0) this.performance.segmentation = elapsedTime;
|
||||||
|
this.analyze('End Segmentation:');
|
||||||
|
}
|
||||||
|
|
||||||
this.performance.total = Math.trunc(now() - timeStart);
|
this.performance.total = Math.trunc(now() - timeStart);
|
||||||
this.state = 'idle';
|
this.state = 'idle';
|
||||||
this.result = {
|
this.result = {
|
||||||
|
|
|
@ -138,7 +138,7 @@ export function process(input, config): { tensor: Tensor | null, canvas: Offscre
|
||||||
const shape = [outCanvas.height, outCanvas.width, 3];
|
const shape = [outCanvas.height, outCanvas.width, 3];
|
||||||
pixels = tf.tensor3d(outCanvas.data, shape, 'int32');
|
pixels = tf.tensor3d(outCanvas.data, shape, 'int32');
|
||||||
} else if (outCanvas instanceof ImageData) { // if input is imagedata, just use it
|
} else if (outCanvas instanceof ImageData) { // if input is imagedata, just use it
|
||||||
pixels = tf.browser.fromPixels(outCanvas);
|
pixels = tf.browser ? tf.browser.fromPixels(outCanvas) : null;
|
||||||
} else if (config.backend === 'webgl' || config.backend === 'humangl') { // tf kernel-optimized method to get imagedata
|
} else if (config.backend === 'webgl' || config.backend === 'humangl') { // tf kernel-optimized method to get imagedata
|
||||||
// we can use canvas as-is as it already has a context, so we do a silly one more canvas
|
// we can use canvas as-is as it already has a context, so we do a silly one more canvas
|
||||||
const tempCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement('canvas');
|
const tempCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement('canvas');
|
||||||
|
@ -146,7 +146,7 @@ export function process(input, config): { tensor: Tensor | null, canvas: Offscre
|
||||||
tempCanvas.height = targetHeight;
|
tempCanvas.height = targetHeight;
|
||||||
const tempCtx = tempCanvas.getContext('2d');
|
const tempCtx = tempCanvas.getContext('2d');
|
||||||
tempCtx?.drawImage(outCanvas, 0, 0);
|
tempCtx?.drawImage(outCanvas, 0, 0);
|
||||||
pixels = tf.browser.fromPixels(tempCanvas);
|
pixels = tf.browser ? tf.browser.fromPixels(tempCanvas) : null;
|
||||||
} else { // cpu and wasm kernel does not implement efficient fromPixels method
|
} else { // cpu and wasm kernel does not implement efficient fromPixels method
|
||||||
// we can use canvas as-is as it already has a context, so we do a silly one more canvas
|
// we can use canvas as-is as it already has a context, so we do a silly one more canvas
|
||||||
const tempCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement('canvas');
|
const tempCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement('canvas');
|
||||||
|
@ -155,13 +155,15 @@ export function process(input, config): { tensor: Tensor | null, canvas: Offscre
|
||||||
const tempCtx = tempCanvas.getContext('2d');
|
const tempCtx = tempCanvas.getContext('2d');
|
||||||
tempCtx?.drawImage(outCanvas, 0, 0);
|
tempCtx?.drawImage(outCanvas, 0, 0);
|
||||||
const data = tempCtx?.getImageData(0, 0, targetWidth, targetHeight);
|
const data = tempCtx?.getImageData(0, 0, targetWidth, targetHeight);
|
||||||
pixels = tf.browser.fromPixels(data);
|
pixels = tf.browser ? tf.browser.fromPixels(data) : null;
|
||||||
}
|
}
|
||||||
|
if (pixels) {
|
||||||
const casted = pixels.toFloat();
|
const casted = pixels.toFloat();
|
||||||
tensor = casted.expandDims(0);
|
tensor = casted.expandDims(0);
|
||||||
pixels.dispose();
|
pixels.dispose();
|
||||||
casted.dispose();
|
casted.dispose();
|
||||||
}
|
}
|
||||||
|
}
|
||||||
const canvas = config.filter.return ? outCanvas : null;
|
const canvas = config.filter.return ? outCanvas : null;
|
||||||
return { tensor, canvas };
|
return { tensor, canvas };
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,6 +21,8 @@ export function calc(newResult: Result): Result {
|
||||||
// - at 1sec delay buffer = 1 which means live data is used
|
// - at 1sec delay buffer = 1 which means live data is used
|
||||||
const bufferedFactor = elapsed < 1000 ? 8 - Math.log(elapsed) : 1;
|
const bufferedFactor = elapsed < 1000 ? 8 - Math.log(elapsed) : 1;
|
||||||
|
|
||||||
|
bufferedResult.canvas = newResult.canvas;
|
||||||
|
|
||||||
// interpolate body results
|
// interpolate body results
|
||||||
if (!bufferedResult.body || (newResult.body.length !== bufferedResult.body.length)) {
|
if (!bufferedResult.body || (newResult.body.length !== bufferedResult.body.length)) {
|
||||||
bufferedResult.body = JSON.parse(JSON.stringify(newResult.body as Body[])); // deep clone once
|
bufferedResult.body = JSON.parse(JSON.stringify(newResult.body as Body[])); // deep clone once
|
||||||
|
|
|
@ -176,7 +176,7 @@ export interface Result {
|
||||||
/** global performance object with timing values for each operation */
|
/** global performance object with timing values for each operation */
|
||||||
performance: Record<string, unknown>,
|
performance: Record<string, unknown>,
|
||||||
/** optional processed canvas that can be used to draw input on screen */
|
/** optional processed canvas that can be used to draw input on screen */
|
||||||
readonly canvas?: OffscreenCanvas | HTMLCanvasElement,
|
canvas?: OffscreenCanvas | HTMLCanvasElement,
|
||||||
/** timestamp of detection representing the milliseconds elapsed since the UNIX epoch */
|
/** timestamp of detection representing the milliseconds elapsed since the UNIX epoch */
|
||||||
readonly timestamp: number,
|
readonly timestamp: number,
|
||||||
/** getter property that returns unified persons object */
|
/** getter property that returns unified persons object */
|
||||||
|
|
|
@ -0,0 +1,29 @@
|
||||||
|
import * as tf from '../../dist/tfjs.esm.js';
|
||||||
|
import { Tensor } from '../tfjs/types';
|
||||||
|
|
||||||
|
function get1dGaussianKernel(sigma: number, size: number): Tensor {
|
||||||
|
// Generate a 1d gaussian distribution size numbers long
|
||||||
|
const range = tf.range(Math.floor(-size / 2) + 1, Math.floor(size / 2) + 1);
|
||||||
|
const distribution = tf.pow(tf.exp(range.div(-2.0 * (sigma * sigma))), 2);
|
||||||
|
const normalized = distribution.div(tf.sum(distribution)) as Tensor;
|
||||||
|
return normalized;
|
||||||
|
}
|
||||||
|
|
||||||
|
function get2dGaussianKernel(size: number, sigma?: number): Tensor {
|
||||||
|
// This default is to mimic opencv2.
|
||||||
|
sigma = sigma === undefined ? 0.3 * ((size - 1) * 0.5 - 1) + 0.8 : sigma;
|
||||||
|
const kerne1d = get1dGaussianKernel(sigma, size);
|
||||||
|
return tf.outerProduct(kerne1d, kerne1d);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function getGaussianKernel(size = 5, channels = 1, sigma?: number): Tensor {
|
||||||
|
return tf.tidy(() => {
|
||||||
|
const kerne2d = get2dGaussianKernel(size, sigma);
|
||||||
|
const kerne3d = channels === 3 ? tf.stack([kerne2d, kerne2d, kerne2d]) : kerne2d;
|
||||||
|
return tf.reshape(kerne3d, [size, size, channels, 1]);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
export function blur(image: Tensor, kernel: Tensor, pad: number | 'valid' | 'same' = 'same'): Tensor {
|
||||||
|
return tf.tidy(() => tf.depthwiseConv2d(image, kernel, 1, pad));
|
||||||
|
}
|
|
@ -0,0 +1,75 @@
|
||||||
|
/**
|
||||||
|
* EfficientPose Module
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { log, join } from '../helpers';
|
||||||
|
import * as tf from '../../dist/tfjs.esm.js';
|
||||||
|
import { GraphModel, Tensor } from '../tfjs/types';
|
||||||
|
import { Config } from '../config';
|
||||||
|
// import * as blur from './blur';
|
||||||
|
|
||||||
|
let model: GraphModel;
|
||||||
|
// let blurKernel;
|
||||||
|
|
||||||
|
export type Segmentation = boolean;
|
||||||
|
|
||||||
|
export async function load(config: Config): Promise<GraphModel> {
|
||||||
|
if (!model) {
|
||||||
|
// @ts-ignore type mismatch on GraphModel
|
||||||
|
model = await tf.loadGraphModel(join(config.modelBasePath, config.segmentation.modelPath));
|
||||||
|
if (!model || !model['modelUrl']) log('load model failed:', config.segmentation.modelPath);
|
||||||
|
else if (config.debug) log('load model:', model['modelUrl']);
|
||||||
|
} else if (config.debug) log('cached model:', model['modelUrl']);
|
||||||
|
// if (!blurKernel) blurKernel = blur.getGaussianKernel(50, 1, 1);
|
||||||
|
return model;
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function predict(input: { tensor: Tensor | null, canvas: OffscreenCanvas | HTMLCanvasElement }, config: Config): Promise<Segmentation> {
|
||||||
|
if (!config.segmentation.enabled || !input.tensor || !input.canvas) return false;
|
||||||
|
if (!model || !model.inputs[0].shape) return false;
|
||||||
|
const resizeInput = tf.image.resizeBilinear(input.tensor, [model.inputs[0].shape[1], model.inputs[0].shape[2]], false);
|
||||||
|
const norm = resizeInput.div(255);
|
||||||
|
const res = model.predict(norm) as Tensor;
|
||||||
|
tf.dispose(resizeInput);
|
||||||
|
tf.dispose(norm);
|
||||||
|
|
||||||
|
const overlay = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(input.canvas.width, input.canvas.height) : document.createElement('canvas');
|
||||||
|
overlay.width = input.canvas.width;
|
||||||
|
overlay.height = input.canvas.height;
|
||||||
|
|
||||||
|
const squeeze = tf.squeeze(res, 0);
|
||||||
|
let resizeOutput;
|
||||||
|
if (squeeze.shape[2] === 2) { // model meet has two channels for fg and bg
|
||||||
|
const softmax = squeeze.softmax();
|
||||||
|
const [bg, fg] = tf.unstack(softmax, 2);
|
||||||
|
tf.dispose(softmax);
|
||||||
|
const expand = fg.expandDims(2);
|
||||||
|
tf.dispose(bg);
|
||||||
|
tf.dispose(fg);
|
||||||
|
resizeOutput = tf.image.resizeBilinear(expand, [input.tensor?.shape[1], input.tensor?.shape[2]]);
|
||||||
|
tf.dispose(expand);
|
||||||
|
} else { // model selfie has a single channel
|
||||||
|
resizeOutput = tf.image.resizeBilinear(squeeze, [input.tensor?.shape[1], input.tensor?.shape[2]]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// const blurred = blur.blur(resizeOutput, blurKernel);
|
||||||
|
if (tf.browser) await tf.browser.toPixels(resizeOutput, overlay);
|
||||||
|
// tf.dispose(blurred);
|
||||||
|
tf.dispose(resizeOutput);
|
||||||
|
tf.dispose(squeeze);
|
||||||
|
tf.dispose(res);
|
||||||
|
|
||||||
|
const ctx = input.canvas.getContext('2d') as CanvasRenderingContext2D;
|
||||||
|
// https://developer.mozilla.org/en-US/docs/Web/API/CanvasRenderingContext2D/globalCompositeOperation
|
||||||
|
// best options are: darken, color-burn, multiply
|
||||||
|
ctx.globalCompositeOperation = 'darken';
|
||||||
|
await ctx?.drawImage(overlay, 0, 0);
|
||||||
|
ctx.globalCompositeOperation = 'source-in';
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Segmentation todo:
|
||||||
|
- Smoothen
|
||||||
|
- Get latest canvas in interpolate
|
||||||
|
- Buffered fetches latest from video instead from interpolate
|
||||||
|
*/
|
2
wiki
2
wiki
|
@ -1 +1 @@
|
||||||
Subproject commit 0087af5684c5722b2cf7ffd3db57b8117b7ac8c5
|
Subproject commit 8e898a636f5254a3fe451b097c633c9965a8a680
|
Loading…
Reference in New Issue