mirror of https://github.com/vladmandic/human
expand type safety
parent
fc5f90b639
commit
47c7bdfae2
|
@ -22,13 +22,29 @@
|
|||
"eslint:recommended",
|
||||
"plugin:@typescript-eslint/eslint-recommended",
|
||||
"plugin:@typescript-eslint/recommended",
|
||||
"plugin:@typescript-eslint/recommended-requiring-type-checking",
|
||||
"plugin:@typescript-eslint/strict",
|
||||
"plugin:import/recommended",
|
||||
"plugin:promise/recommended"
|
||||
],
|
||||
"rules": {
|
||||
"@typescript-eslint/ban-ts-comment":"off",
|
||||
"@typescript-eslint/dot-notation":"off",
|
||||
"@typescript-eslint/no-empty-interface":"off",
|
||||
"@typescript-eslint/no-inferrable-types":"off",
|
||||
"@typescript-eslint/no-misused-promises":"off",
|
||||
"@typescript-eslint/no-unnecessary-condition":"off",
|
||||
"@typescript-eslint/no-unsafe-argument":"off",
|
||||
"@typescript-eslint/no-unsafe-assignment":"off",
|
||||
"@typescript-eslint/no-unsafe-call":"off",
|
||||
"@typescript-eslint/no-unsafe-member-access":"off",
|
||||
"@typescript-eslint/no-unsafe-return":"off",
|
||||
"@typescript-eslint/non-nullable-type-assertion-style":"off",
|
||||
"@typescript-eslint/prefer-for-of":"off",
|
||||
"@typescript-eslint/prefer-nullish-coalescing":"off",
|
||||
"@typescript-eslint/prefer-ts-expect-error":"off",
|
||||
"@typescript-eslint/restrict-plus-operands":"off",
|
||||
"@typescript-eslint/restrict-template-expressions":"off",
|
||||
"dot-notation":"off",
|
||||
"guard-for-in":"off",
|
||||
"import/extensions": ["off", "always"],
|
||||
|
@ -47,6 +63,7 @@
|
|||
"no-regex-spaces":"off",
|
||||
"no-restricted-syntax":"off",
|
||||
"no-return-assign":"off",
|
||||
"no-void":"off",
|
||||
"object-curly-newline":"off",
|
||||
"prefer-destructuring":"off",
|
||||
"prefer-template":"off",
|
||||
|
@ -130,8 +147,8 @@
|
|||
"html", "@html-eslint"
|
||||
],
|
||||
"rules": {
|
||||
"@html-eslint/indent": ["error", 2],
|
||||
"@html-eslint/element-newline":"off"
|
||||
"@html-eslint/element-newline":"off",
|
||||
"@html-eslint/indent": ["error", 2]
|
||||
}
|
||||
}
|
||||
],
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
|
||||
## Changelog
|
||||
|
||||
### **HEAD -> main** 2022/08/20 mandic00@live.com
|
||||
### **HEAD -> main** 2022/08/21 mandic00@live.com
|
||||
|
||||
|
||||
### **2.9.4** 2022/08/20 mandic00@live.com
|
||||
|
|
2
TODO.md
2
TODO.md
|
@ -66,6 +66,8 @@ Model is supported using `WebGL` backend in browser
|
|||
- Allow hosting models in **Google Cloud Bucket**
|
||||
Hosted models can be directly used without downloading to local storage
|
||||
Example: `modelPath: 'https://storage.googleapis.com/human-models/facemesh.json'`
|
||||
- Stricter linting rules for both **TypeScript** and **JavaScript**
|
||||
See `./eslintrc.json` for details
|
||||
- Fix **MobileFaceNet** model as alternative for face embedding/descriptor detection
|
||||
Configurable using `config.face.mobilefacenet` config section
|
||||
- Fix **EfficientPose** module as alternative body detection
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -103,20 +103,20 @@ async function webCam() { // initialize webcam
|
|||
const stream: MediaStream = await navigator.mediaDevices.getUserMedia(cameraOptions);
|
||||
const ready = new Promise((resolve) => { dom.video.onloadeddata = () => resolve(true); });
|
||||
dom.video.srcObject = stream;
|
||||
dom.video.play();
|
||||
void dom.video.play();
|
||||
await ready;
|
||||
dom.canvas.width = dom.video.videoWidth;
|
||||
dom.canvas.height = dom.video.videoHeight;
|
||||
if (human.env.initial) log('video:', dom.video.videoWidth, dom.video.videoHeight, '|', stream.getVideoTracks()[0].label);
|
||||
dom.canvas.onclick = () => { // pause when clicked on screen and resume on next click
|
||||
if (dom.video.paused) dom.video.play();
|
||||
if (dom.video.paused) void dom.video.play();
|
||||
else dom.video.pause();
|
||||
};
|
||||
}
|
||||
|
||||
async function detectionLoop() { // main detection loop
|
||||
if (!dom.video.paused) {
|
||||
if (current.face && current.face.tensor) human.tf.dispose(current.face.tensor); // dispose previous tensor
|
||||
if (current.face?.tensor) human.tf.dispose(current.face.tensor); // dispose previous tensor
|
||||
await human.detect(dom.video); // actual detection; were not capturing output in a local variable as it can also be reached via human.result
|
||||
const now = human.now();
|
||||
fps.detect = 1000 / (now - timestamp.detect);
|
||||
|
@ -126,8 +126,8 @@ async function detectionLoop() { // main detection loop
|
|||
}
|
||||
|
||||
async function validationLoop(): Promise<H.FaceResult> { // main screen refresh loop
|
||||
const interpolated = await human.next(human.result); // smoothen result using last-known results
|
||||
await human.draw.canvas(dom.video, dom.canvas); // draw canvas to screen
|
||||
const interpolated = human.next(human.result); // smoothen result using last-known results
|
||||
human.draw.canvas(dom.video, dom.canvas); // draw canvas to screen
|
||||
await human.draw.all(dom.canvas, interpolated); // draw labels, boxes, lines, etc.
|
||||
const now = human.now();
|
||||
fps.draw = 1000 / (now - timestamp.draw);
|
||||
|
@ -135,7 +135,7 @@ async function validationLoop(): Promise<H.FaceResult> { // main screen refresh
|
|||
printFPS(`fps: ${fps.detect.toFixed(1).padStart(5, ' ')} detect | ${fps.draw.toFixed(1).padStart(5, ' ')} draw`); // write status
|
||||
ok.faceCount = human.result.face.length === 1; // must be exactly detected face
|
||||
if (ok.faceCount) { // skip the rest if no face
|
||||
const gestures: string[] = Object.values(human.result.gesture).map((gesture) => (gesture as H.GestureResult).gesture); // flatten all gestures
|
||||
const gestures: string[] = Object.values(human.result.gesture).map((gesture: H.GestureResult) => gesture.gesture); // flatten all gestures
|
||||
if (gestures.includes('blink left eye') || gestures.includes('blink right eye')) blink.start = human.now(); // blink starts when eyes get closed
|
||||
if (blink.start > 0 && !gestures.includes('blink left eye') && !gestures.includes('blink right eye')) blink.end = human.now(); // if blink started how long until eyes are back open
|
||||
ok.blinkDetected = ok.blinkDetected || (Math.abs(blink.end - blink.start) > options.blinkMin && Math.abs(blink.end - blink.start) < options.blinkMax);
|
||||
|
@ -173,8 +173,8 @@ async function validationLoop(): Promise<H.FaceResult> { // main screen refresh
|
|||
ok.elapsedMs = Math.trunc(human.now() - startTime);
|
||||
return new Promise((resolve) => {
|
||||
setTimeout(async () => {
|
||||
const res = await validationLoop(); // run validation loop until conditions are met
|
||||
if (res) resolve(human.result.face[0]); // recursive promise resolve
|
||||
await validationLoop(); // run validation loop until conditions are met
|
||||
resolve(human.result.face[0]); // recursive promise resolve
|
||||
}, 30); // use to slow down refresh from max refresh rate to target of 30 fps
|
||||
});
|
||||
}
|
||||
|
@ -210,7 +210,7 @@ async function detectFace() {
|
|||
}
|
||||
const db = await indexDb.load();
|
||||
const descriptors = db.map((rec) => rec.descriptor).filter((desc) => desc.length > 0);
|
||||
const res = await human.match(current.face.embedding, descriptors, matchOptions);
|
||||
const res = human.match(current.face.embedding, descriptors, matchOptions);
|
||||
current.record = db[res.index] || null;
|
||||
if (current.record) {
|
||||
log(`best match: ${current.record.name} | id: ${current.record.id} | similarity: ${Math.round(1000 * res.similarity) / 10}%`);
|
||||
|
|
|
@ -4,6 +4,6 @@
|
|||
author: <https://github.com/vladmandic>'
|
||||
*/
|
||||
|
||||
import*as c from"../../dist/human.esm.js";var w={async:!1,modelBasePath:"../../models",filter:{enabled:!0,equalization:!1,flip:!1},face:{enabled:!0,detector:{rotation:!1},mesh:{enabled:!0},attention:{enabled:!1},iris:{enabled:!0},description:{enabled:!0},emotion:{enabled:!0}},body:{enabled:!0},hand:{enabled:!0},object:{enabled:!1},gesture:{enabled:!0}},e=new c.Human(w);e.env.perfadd=!1;e.draw.options.font='small-caps 18px "Lato"';e.draw.options.lineHeight=20;var t={video:document.getElementById("video"),canvas:document.getElementById("canvas"),log:document.getElementById("log"),fps:document.getElementById("status"),perf:document.getElementById("performance")},n={detect:0,draw:0,tensors:0,start:0},s={detectFPS:0,drawFPS:0,frames:0,averageMs:0},i=(...a)=>{t.log.innerText+=a.join(" ")+`
|
||||
`,console.log(...a)},r=a=>t.fps.innerText=a,b=a=>t.perf.innerText="tensors:"+e.tf.memory().numTensors+" | performance: "+JSON.stringify(a).replace(/"|{|}/g,"").replace(/,/g," | ");async function h(){r("starting webcam...");let a={audio:!1,video:{facingMode:"user",resizeMode:"none",width:{ideal:document.body.clientWidth},height:{ideal:document.body.clientHeight}}},d=await navigator.mediaDevices.getUserMedia(a),f=new Promise(p=>{t.video.onloadeddata=()=>p(!0)});t.video.srcObject=d,t.video.play(),await f,t.canvas.width=t.video.videoWidth,t.canvas.height=t.video.videoHeight;let o=d.getVideoTracks()[0],v=o.getCapabilities?o.getCapabilities():"",g=o.getSettings?o.getSettings():"",u=o.getConstraints?o.getConstraints():"";i("video:",t.video.videoWidth,t.video.videoHeight,o.label,{stream:d,track:o,settings:g,constraints:u,capabilities:v}),t.canvas.onclick=()=>{t.video.paused?t.video.play():t.video.pause()}}async function l(){if(!t.video.paused){n.start===0&&(n.start=e.now()),await e.detect(t.video);let a=e.tf.memory().numTensors;a-n.tensors!==0&&i("allocated tensors:",a-n.tensors),n.tensors=a,s.detectFPS=Math.round(1e3*1e3/(e.now()-n.detect))/1e3,s.frames++,s.averageMs=Math.round(1e3*(e.now()-n.start)/s.frames)/1e3,s.frames%100===0&&!t.video.paused&&i("performance",{...s,tensors:n.tensors})}n.detect=e.now(),requestAnimationFrame(l)}async function m(){if(!t.video.paused){let d=await e.next(e.result);e.config.filter.flip?await e.draw.canvas(d.canvas,t.canvas):await e.draw.canvas(t.video,t.canvas),await e.draw.all(t.canvas,d),b(d.performance)}let a=e.now();s.drawFPS=Math.round(1e3*1e3/(a-n.draw))/1e3,n.draw=a,r(t.video.paused?"paused":`fps: ${s.detectFPS.toFixed(1).padStart(5," ")} detect | ${s.drawFPS.toFixed(1).padStart(5," ")} draw`),setTimeout(m,30)}async function M(){i("human version:",e.version,"| tfjs version:",e.tf.version["tfjs-core"]),i("platform:",e.env.platform,"| agent:",e.env.agent),r("loading..."),await e.load(),i("backend:",e.tf.getBackend(),"| available:",e.env.backends),i("models stats:",e.getModelStats()),i("models loaded:",Object.values(e.models).filter(a=>a!==null).length),r("initializing..."),await e.warmup(),await h(),await l(),await m()}window.onload=M;
|
||||
import*as c from"../../dist/human.esm.js";var w={async:!1,modelBasePath:"../../models",filter:{enabled:!0,equalization:!1,flip:!1},face:{enabled:!0,detector:{rotation:!1},mesh:{enabled:!0},attention:{enabled:!1},iris:{enabled:!0},description:{enabled:!0},emotion:{enabled:!0}},body:{enabled:!0},hand:{enabled:!0},object:{enabled:!1},gesture:{enabled:!0}},e=new c.Human(w);e.env.perfadd=!1;e.draw.options.font='small-caps 18px "Lato"';e.draw.options.lineHeight=20;var t={video:document.getElementById("video"),canvas:document.getElementById("canvas"),log:document.getElementById("log"),fps:document.getElementById("status"),perf:document.getElementById("performance")},n={detect:0,draw:0,tensors:0,start:0},o={detectFPS:0,drawFPS:0,frames:0,averageMs:0},i=(...a)=>{t.log.innerText+=a.join(" ")+`
|
||||
`,console.log(...a)},r=a=>t.fps.innerText=a,b=a=>t.perf.innerText="tensors:"+e.tf.memory().numTensors.toString()+" | performance: "+JSON.stringify(a).replace(/"|{|}/g,"").replace(/,/g," | ");async function h(){r("starting webcam...");let a={audio:!1,video:{facingMode:"user",resizeMode:"none",width:{ideal:document.body.clientWidth},height:{ideal:document.body.clientHeight}}},d=await navigator.mediaDevices.getUserMedia(a),f=new Promise(p=>{t.video.onloadeddata=()=>p(!0)});t.video.srcObject=d,t.video.play(),await f,t.canvas.width=t.video.videoWidth,t.canvas.height=t.video.videoHeight;let s=d.getVideoTracks()[0],v=s.getCapabilities?s.getCapabilities():"",g=s.getSettings?s.getSettings():"",u=s.getConstraints?s.getConstraints():"";i("video:",t.video.videoWidth,t.video.videoHeight,s.label,{stream:d,track:s,settings:g,constraints:u,capabilities:v}),t.canvas.onclick=()=>{t.video.paused?t.video.play():t.video.pause()}}async function l(){if(!t.video.paused){n.start===0&&(n.start=e.now()),await e.detect(t.video);let a=e.tf.memory().numTensors;a-n.tensors!==0&&i("allocated tensors:",a-n.tensors),n.tensors=a,o.detectFPS=Math.round(1e3*1e3/(e.now()-n.detect))/1e3,o.frames++,o.averageMs=Math.round(1e3*(e.now()-n.start)/o.frames)/1e3,o.frames%100===0&&!t.video.paused&&i("performance",{...o,tensors:n.tensors})}n.detect=e.now(),requestAnimationFrame(l)}async function m(){if(!t.video.paused){let d=e.next(e.result);e.config.filter.flip?e.draw.canvas(d.canvas,t.canvas):e.draw.canvas(t.video,t.canvas),await e.draw.all(t.canvas,d),b(d.performance)}let a=e.now();o.drawFPS=Math.round(1e3*1e3/(a-n.draw))/1e3,n.draw=a,r(t.video.paused?"paused":`fps: ${o.detectFPS.toFixed(1).padStart(5," ")} detect | ${o.drawFPS.toFixed(1).padStart(5," ")} draw`),setTimeout(m,30)}async function M(){i("human version:",e.version,"| tfjs version:",e.tf.version["tfjs-core"]),i("platform:",e.env.platform,"| agent:",e.env.agent),r("loading..."),await e.load(),i("backend:",e.tf.getBackend(),"| available:",e.env.backends),i("models stats:",e.getModelStats()),i("models loaded:",Object.values(e.models).filter(a=>a!==null).length),r("initializing..."),await e.warmup(),await h(),await l(),await m()}window.onload=M;
|
||||
//# sourceMappingURL=index.js.map
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -45,7 +45,7 @@ const log = (...msg) => { // helper method to output messages
|
|||
console.log(...msg); // eslint-disable-line no-console
|
||||
};
|
||||
const status = (msg) => dom.fps.innerText = msg; // print status element
|
||||
const perf = (msg) => dom.perf.innerText = 'tensors:' + human.tf.memory().numTensors + ' | performance: ' + JSON.stringify(msg).replace(/"|{|}/g, '').replace(/,/g, ' | '); // print performance element
|
||||
const perf = (msg) => dom.perf.innerText = 'tensors:' + (human.tf.memory().numTensors as number).toString() + ' | performance: ' + JSON.stringify(msg).replace(/"|{|}/g, '').replace(/,/g, ' | '); // print performance element
|
||||
|
||||
async function webCam() { // initialize webcam
|
||||
status('starting webcam...');
|
||||
|
@ -54,7 +54,7 @@ async function webCam() { // initialize webcam
|
|||
const stream: MediaStream = await navigator.mediaDevices.getUserMedia(options);
|
||||
const ready = new Promise((resolve) => { dom.video.onloadeddata = () => resolve(true); });
|
||||
dom.video.srcObject = stream;
|
||||
dom.video.play();
|
||||
void dom.video.play();
|
||||
await ready;
|
||||
dom.canvas.width = dom.video.videoWidth;
|
||||
dom.canvas.height = dom.video.videoHeight;
|
||||
|
@ -64,7 +64,7 @@ async function webCam() { // initialize webcam
|
|||
const constraints: MediaTrackConstraints | string = track.getConstraints ? track.getConstraints() : '';
|
||||
log('video:', dom.video.videoWidth, dom.video.videoHeight, track.label, { stream, track, settings, constraints, capabilities });
|
||||
dom.canvas.onclick = () => { // pause when clicked on screen and resume on next click
|
||||
if (dom.video.paused) dom.video.play();
|
||||
if (dom.video.paused) void dom.video.play();
|
||||
else dom.video.pause();
|
||||
};
|
||||
}
|
||||
|
@ -88,9 +88,9 @@ async function detectionLoop() { // main detection loop
|
|||
|
||||
async function drawLoop() { // main screen refresh loop
|
||||
if (!dom.video.paused) {
|
||||
const interpolated = await human.next(human.result); // smoothen result using last-known results
|
||||
if (human.config.filter.flip) await human.draw.canvas(interpolated.canvas as HTMLCanvasElement, dom.canvas); // draw processed image to screen canvas
|
||||
else await human.draw.canvas(dom.video, dom.canvas); // draw original video to screen canvas // better than using procesed image as this loop happens faster than processing loop
|
||||
const interpolated = human.next(human.result); // smoothen result using last-known results
|
||||
if (human.config.filter.flip) human.draw.canvas(interpolated.canvas as HTMLCanvasElement, dom.canvas); // draw processed image to screen canvas
|
||||
else human.draw.canvas(dom.video, dom.canvas); // draw original video to screen canvas // better than using procesed image as this loop happens faster than processing loop
|
||||
await human.draw.all(dom.canvas, interpolated); // draw labels, boxes, lines, etc.
|
||||
perf(interpolated.performance); // write performance data
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ export async function loadDetect(config: Config): Promise<GraphModel> {
|
|||
inputSize.detector[0] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[1].size) : 0;
|
||||
inputSize.detector[1] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : 0;
|
||||
} else if (config.debug && models.detector) log('cached model:', models.detector['modelUrl']);
|
||||
await detect.createAnchors();
|
||||
detect.createAnchors();
|
||||
return models.detector as GraphModel;
|
||||
}
|
||||
|
||||
|
@ -59,7 +59,7 @@ export async function load(config: Config): Promise<[GraphModel | null, GraphMod
|
|||
return [models.detector, models.landmarks];
|
||||
}
|
||||
|
||||
async function prepareImage(input: Tensor, size: number): Promise<Tensor> {
|
||||
function prepareImage(input: Tensor, size: number): Tensor {
|
||||
const t: Record<string, Tensor> = {};
|
||||
if (!input.shape || !input.shape[1] || !input.shape[2]) return input;
|
||||
let final: Tensor;
|
||||
|
@ -120,7 +120,7 @@ function rescaleKeypoints(keypoints: BodyKeypoint[], outputSize: [number, number
|
|||
return keypoints;
|
||||
}
|
||||
|
||||
async function fixKeypoints(keypoints: BodyKeypoint[]) {
|
||||
function fixKeypoints(keypoints: BodyKeypoint[]) {
|
||||
// palm z-coord is incorrect around near-zero so we approximate it
|
||||
const leftPalm = keypoints.find((k) => k.part === 'leftPalm') as BodyKeypoint;
|
||||
const leftWrist = keypoints.find((k) => k.part === 'leftWrist') as BodyKeypoint;
|
||||
|
@ -220,7 +220,7 @@ export async function predict(input: Tensor, config: Config): Promise<BodyResult
|
|||
const boxes = await detectBoxes(t.detector, config, outputSize);
|
||||
}
|
||||
*/
|
||||
t.landmarks = await prepareImage(input, 256); // padded and resized
|
||||
t.landmarks = prepareImage(input, 256); // padded and resized
|
||||
cache = await detectLandmarks(t.landmarks, config, outputSize);
|
||||
/*
|
||||
cropBox = [0, 0, 1, 1]; // reset crop coordinates
|
||||
|
|
|
@ -10,7 +10,7 @@ let anchorTensor: { x, y };
|
|||
const numLayers = 5;
|
||||
const strides = [8, 16, 32, 32, 32];
|
||||
|
||||
export async function createAnchors() {
|
||||
export function createAnchors() {
|
||||
const anchors: { x: number, y: number }[] = [];
|
||||
let layerId = 0;
|
||||
while (layerId < numLayers) {
|
||||
|
@ -62,7 +62,7 @@ export async function decode(boxesTensor: Tensor, logitsTensor: Tensor, config:
|
|||
const i = (await t.argmax.data())[0];
|
||||
const scores = await t.scores.data();
|
||||
const detected: { box: Box, boxRaw: Box, score: number }[] = [];
|
||||
const minScore = (config.body['detector'] && config.body['detector'].minConfidence) ? config.body['detector'].minConfidence : 0;
|
||||
const minScore = config.body?.['detector']?.minConfidence || 0;
|
||||
if (scores[i] >= minScore) {
|
||||
const boxes = await t.boxes.array();
|
||||
const boxRaw: Box = boxes[i];
|
||||
|
|
|
@ -42,7 +42,7 @@ export async function load(config: Config): Promise<GraphModel> {
|
|||
return model;
|
||||
}
|
||||
|
||||
async function parseSinglePose(res, config, image) {
|
||||
function parseSinglePose(res, config, image) {
|
||||
const kpt = res[0][0];
|
||||
const keypoints: BodyKeypoint[] = [];
|
||||
let score = 0;
|
||||
|
@ -80,7 +80,7 @@ async function parseSinglePose(res, config, image) {
|
|||
return bodies;
|
||||
}
|
||||
|
||||
async function parseMultiPose(res, config, image) {
|
||||
function parseMultiPose(res, config, image) {
|
||||
const bodies: BodyResult[] = [];
|
||||
for (let id = 0; id < res[0].length; id++) {
|
||||
const kpt = res[0][id];
|
||||
|
@ -174,8 +174,8 @@ export async function predict(input: Tensor, config: Config): Promise<BodyResult
|
|||
cache.last = now();
|
||||
const res = await t.res.array();
|
||||
cache.bodies = (t.res.shape[2] === 17)
|
||||
? await parseSinglePose(res, config, input)
|
||||
: await parseMultiPose(res, config, input);
|
||||
? parseSinglePose(res, config, input)
|
||||
: parseMultiPose(res, config, input);
|
||||
for (const body of cache.bodies) {
|
||||
fix.rescaleBody(body, [input.shape[2] || 1, input.shape[1] || 1]);
|
||||
fix.jitter(body.keypoints);
|
||||
|
|
|
@ -90,7 +90,7 @@ export function padInput(input: Tensor, inputSize: number): Tensor {
|
|||
}
|
||||
|
||||
export function rescaleBody(body: BodyResult, outputSize: [number, number]): BodyResult {
|
||||
body.keypoints = body.keypoints.filter((kpt) => kpt && kpt.position); // filter invalid keypoints
|
||||
body.keypoints = body.keypoints.filter((kpt) => kpt?.position); // filter invalid keypoints
|
||||
for (const kpt of body.keypoints) {
|
||||
kpt.position = [
|
||||
kpt.position[0] * (outputSize[0] + cache.padding[2][0] + cache.padding[2][1]) / outputSize[0] - cache.padding[2][0],
|
||||
|
|
|
@ -19,7 +19,7 @@ const localMaximumRadius = 1;
|
|||
const outputStride = 16;
|
||||
const squaredNmsRadius = 50 ** 2;
|
||||
|
||||
function traverse(edgeId, sourceKeypoint, targetId, scores, offsets, displacements, offsetRefineStep = 2) {
|
||||
function traverse(edgeId: number, sourceKeypoint, targetId, scores, offsets, displacements, offsetRefineStep = 2) {
|
||||
const getDisplacement = (point) => ({
|
||||
y: displacements.get(point.y, point.x, edgeId),
|
||||
x: displacements.get(point.y, point.x, (displacements.shape[2] / 2) + edgeId),
|
||||
|
@ -81,8 +81,8 @@ export function decodePose(root, scores, offsets, displacementsFwd, displacement
|
|||
return keypoints;
|
||||
}
|
||||
|
||||
function scoreIsMaximumInLocalWindow(keypointId, score, heatmapY, heatmapX, scores) {
|
||||
const [height, width] = scores.shape;
|
||||
function scoreIsMaximumInLocalWindow(keypointId, score: number, heatmapY: number, heatmapX: number, scores) {
|
||||
const [height, width]: [number, number] = scores.shape;
|
||||
let localMaximum = true;
|
||||
const yStart = Math.max(heatmapY - localMaximumRadius, 0);
|
||||
const yEnd = Math.min(heatmapY + localMaximumRadius + 1, height);
|
||||
|
@ -172,7 +172,7 @@ export async function predict(input: Tensor, config: Config): Promise<BodyResult
|
|||
const buffers = await Promise.all(res.map((tensor: Tensor) => tensor.buffer()));
|
||||
for (const t of res) tf.dispose(t);
|
||||
|
||||
const decoded = await decode(buffers[0], buffers[1], buffers[2], buffers[3], config.body.maxDetected, config.body.minConfidence);
|
||||
const decoded = decode(buffers[0], buffers[1], buffers[2], buffers[3], config.body.maxDetected, config.body.minConfidence);
|
||||
if (!model.inputs[0].shape) return [];
|
||||
const scaled = utils.scalePoses(decoded, [input.shape[1], input.shape[2]], [model.inputs[0].shape[2], model.inputs[0].shape[1]]);
|
||||
return scaled;
|
||||
|
|
|
@ -154,14 +154,14 @@ export class MaxHeap {
|
|||
}
|
||||
}
|
||||
|
||||
export function getOffsetPoint(y, x, keypoint, offsets) {
|
||||
export function getOffsetPoint(y, x, keypoint: number, offsets) {
|
||||
return {
|
||||
y: offsets.get(y, x, keypoint),
|
||||
x: offsets.get(y, x, keypoint + count),
|
||||
};
|
||||
}
|
||||
|
||||
export function getImageCoords(part, outputStride, offsets) {
|
||||
export function getImageCoords(part, outputStride: number, offsets) {
|
||||
const { heatmapY, heatmapX, id: keypoint } = part;
|
||||
const { y, x } = getOffsetPoint(heatmapY, heatmapX, keypoint, offsets);
|
||||
return {
|
||||
|
@ -190,7 +190,7 @@ export function squaredDistance(y1, x1, y2, x2) {
|
|||
return dy * dy + dx * dx;
|
||||
}
|
||||
|
||||
export function addVectors(a, b) {
|
||||
export function addVectors(a: { x: number, y: number }, b: { x: number, y: number }) {
|
||||
return { x: a.x + b.x, y: a.y + b.y };
|
||||
}
|
||||
|
||||
|
|
|
@ -5,8 +5,8 @@ import type { BodyResult } from '../result';
|
|||
import type { AnyCanvas, DrawOptions } from '../exports';
|
||||
|
||||
/** draw detected bodies */
|
||||
export async function body(inCanvas: AnyCanvas, result: BodyResult[], drawOptions?: Partial<DrawOptions>) {
|
||||
const localOptions = mergeDeep(options, drawOptions);
|
||||
export function body(inCanvas: AnyCanvas, result: BodyResult[], drawOptions?: Partial<DrawOptions>) {
|
||||
const localOptions: DrawOptions = mergeDeep(options, drawOptions);
|
||||
if (!result || !inCanvas) return;
|
||||
const ctx = getCanvasContext(inCanvas);
|
||||
if (!ctx) return;
|
||||
|
|
|
@ -24,8 +24,8 @@ export { object } from './object';
|
|||
export { gesture } from './gesture';
|
||||
|
||||
/** draw combined person results instead of individual detection result objects */
|
||||
export async function person(inCanvas: AnyCanvas, result: PersonResult[], drawOptions?: Partial<DrawOptions>) {
|
||||
const localOptions = mergeDeep(options, drawOptions);
|
||||
export function person(inCanvas: AnyCanvas, result: PersonResult[], drawOptions?: Partial<DrawOptions>) {
|
||||
const localOptions: DrawOptions = mergeDeep(options, drawOptions);
|
||||
if (!result || !inCanvas) return;
|
||||
const ctx = getCanvasContext(inCanvas);
|
||||
if (!ctx) return;
|
||||
|
@ -52,7 +52,7 @@ export async function person(inCanvas: AnyCanvas, result: PersonResult[], drawOp
|
|||
}
|
||||
|
||||
/** draw processed canvas */
|
||||
export async function canvas(input: AnyCanvas | HTMLImageElement | HTMLVideoElement, output: AnyCanvas) {
|
||||
export function canvas(input: AnyCanvas | HTMLImageElement | HTMLVideoElement, output: AnyCanvas) {
|
||||
if (!input || !output) return;
|
||||
const ctx = getCanvasContext(output);
|
||||
if (!ctx) return;
|
||||
|
|
|
@ -23,7 +23,7 @@ function drawLabels(f: FaceResult, ctx: CanvasRenderingContext2D | OffscreenCanv
|
|||
if (emotion.length > 3) emotion.length = 3;
|
||||
labels.push(emotion.join(' '));
|
||||
}
|
||||
if (f.rotation && f.rotation.angle && f.rotation.gaze) {
|
||||
if (f.rotation?.angle && f.rotation?.gaze) {
|
||||
if (f.rotation.angle.roll) labels.push(`roll: ${rad2deg(f.rotation.angle.roll)}° yaw:${rad2deg(f.rotation.angle.yaw)}° pitch:${rad2deg(f.rotation.angle.pitch)}°`);
|
||||
if (f.rotation.gaze.bearing) labels.push(`gaze: ${rad2deg(f.rotation.gaze.bearing)}°`);
|
||||
}
|
||||
|
@ -44,7 +44,7 @@ function drawLabels(f: FaceResult, ctx: CanvasRenderingContext2D | OffscreenCanv
|
|||
|
||||
function drawIrisElipse(f: FaceResult, ctx: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D) {
|
||||
// iris: array[center, left, top, right, bottom]
|
||||
if (f.annotations && f.annotations.leftEyeIris && f.annotations.leftEyeIris[0]) {
|
||||
if (f.annotations?.leftEyeIris && f.annotations?.leftEyeIris[0]) {
|
||||
ctx.strokeStyle = opt.useDepth ? 'rgba(255, 200, 255, 0.3)' : opt.color;
|
||||
ctx.beginPath();
|
||||
const sizeX = Math.abs(f.annotations.leftEyeIris[3][0] - f.annotations.leftEyeIris[1][0]) / 2;
|
||||
|
@ -56,7 +56,7 @@ function drawIrisElipse(f: FaceResult, ctx: CanvasRenderingContext2D | Offscreen
|
|||
ctx.fill();
|
||||
}
|
||||
}
|
||||
if (f.annotations && f.annotations.rightEyeIris && f.annotations.rightEyeIris[0]) {
|
||||
if (f.annotations?.rightEyeIris && f.annotations?.rightEyeIris[0]) {
|
||||
ctx.strokeStyle = opt.useDepth ? 'rgba(255, 200, 255, 0.3)' : opt.color;
|
||||
ctx.beginPath();
|
||||
const sizeX = Math.abs(f.annotations.rightEyeIris[3][0] - f.annotations.rightEyeIris[1][0]) / 2;
|
||||
|
@ -149,7 +149,7 @@ function drawFaceBoxes(f: FaceResult, ctx) {
|
|||
}
|
||||
|
||||
/** draw detected faces */
|
||||
export async function face(inCanvas: AnyCanvas, result: FaceResult[], drawOptions?: Partial<DrawOptions>) {
|
||||
export function face(inCanvas: AnyCanvas, result: FaceResult[], drawOptions?: Partial<DrawOptions>) {
|
||||
opt = mergeDeep(options, drawOptions);
|
||||
if (!result || !inCanvas) return;
|
||||
const ctx = getCanvasContext(inCanvas);
|
||||
|
|
|
@ -5,8 +5,8 @@ import type { GestureResult } from '../result';
|
|||
import type { AnyCanvas, DrawOptions } from '../exports';
|
||||
|
||||
/** draw detected gestures */
|
||||
export async function gesture(inCanvas: AnyCanvas, result: GestureResult[], drawOptions?: Partial<DrawOptions>) {
|
||||
const localOptions = mergeDeep(options, drawOptions);
|
||||
export function gesture(inCanvas: AnyCanvas, result: GestureResult[], drawOptions?: Partial<DrawOptions>) {
|
||||
const localOptions: DrawOptions = mergeDeep(options, drawOptions);
|
||||
if (!result || !inCanvas) return;
|
||||
if (localOptions.drawGestures) {
|
||||
const ctx = getCanvasContext(inCanvas);
|
||||
|
|
|
@ -5,8 +5,8 @@ import type { HandResult } from '../result';
|
|||
import type { AnyCanvas, DrawOptions, Point } from '../exports';
|
||||
|
||||
/** draw detected hands */
|
||||
export async function hand(inCanvas: AnyCanvas, result: HandResult[], drawOptions?: Partial<DrawOptions>) {
|
||||
const localOptions = mergeDeep(options, drawOptions);
|
||||
export function hand(inCanvas: AnyCanvas, result: HandResult[], drawOptions?: Partial<DrawOptions>) {
|
||||
const localOptions: DrawOptions = mergeDeep(options, drawOptions);
|
||||
if (!result || !inCanvas) return;
|
||||
const ctx = getCanvasContext(inCanvas);
|
||||
if (!ctx) return;
|
||||
|
|
|
@ -5,8 +5,8 @@ import type { ObjectResult } from '../result';
|
|||
import type { AnyCanvas, DrawOptions } from '../exports';
|
||||
|
||||
/** draw detected objects */
|
||||
export async function object(inCanvas: AnyCanvas, result: ObjectResult[], drawOptions?: Partial<DrawOptions>) {
|
||||
const localOptions = mergeDeep(options, drawOptions);
|
||||
export function object(inCanvas: AnyCanvas, result: ObjectResult[], drawOptions?: Partial<DrawOptions>) {
|
||||
const localOptions: DrawOptions = mergeDeep(options, drawOptions);
|
||||
if (!result || !inCanvas) return;
|
||||
const ctx = getCanvasContext(inCanvas);
|
||||
if (!ctx) return;
|
||||
|
|
|
@ -44,23 +44,23 @@ export interface DrawOptions {
|
|||
|
||||
/** currently set draw options {@link DrawOptions} */
|
||||
export const options: DrawOptions = {
|
||||
color: <string>'rgba(173, 216, 230, 0.6)', // 'lightblue' with light alpha channel
|
||||
labelColor: <string>'rgba(173, 216, 230, 1)', // 'lightblue' with dark alpha channel
|
||||
shadowColor: <string>'black',
|
||||
alpha: 0.5,
|
||||
font: <string>'small-caps 16px "Segoe UI"',
|
||||
lineHeight: <number>18,
|
||||
lineWidth: <number>4,
|
||||
pointSize: <number>2,
|
||||
roundRect: <number>8,
|
||||
drawPoints: <boolean>false,
|
||||
drawLabels: <boolean>true,
|
||||
drawBoxes: <boolean>true,
|
||||
drawAttention: <boolean>true,
|
||||
drawGestures: <boolean>true,
|
||||
drawPolygons: <boolean>true,
|
||||
drawGaze: <boolean>true,
|
||||
fillPolygons: <boolean>false,
|
||||
useDepth: <boolean>true,
|
||||
useCurves: <boolean>false,
|
||||
color: 'rgba(173, 216, 230, 0.6)' as string, // 'lightblue' with light alpha channel
|
||||
labelColor: 'rgba(173, 216, 230, 1)' as string, // 'lightblue' with dark alpha channel
|
||||
shadowColor: 'black' as string,
|
||||
alpha: 0.5 as number,
|
||||
font: 'small-caps 16px "Segoe UI"' as string,
|
||||
lineHeight: 18 as number,
|
||||
lineWidth: 4 as number,
|
||||
pointSize: 2 as number,
|
||||
roundRect: 8 as number,
|
||||
drawPoints: false as boolean,
|
||||
drawLabels: true as boolean,
|
||||
drawBoxes: true as boolean,
|
||||
drawAttention: true as boolean,
|
||||
drawGestures: true as boolean,
|
||||
drawPolygons: true as boolean,
|
||||
drawGaze: true as boolean,
|
||||
fillPolygons: false as boolean,
|
||||
useDepth: true as boolean,
|
||||
useCurves: false as boolean,
|
||||
};
|
||||
|
|
|
@ -200,7 +200,7 @@ export const detectFace = async (instance: Human /* instance of human */, input:
|
|||
// if (faces[i]?.annotations?.leftEyeIris) delete faces[i].annotations.leftEyeIris;
|
||||
// if (faces[i]?.annotations?.rightEyeIris) delete faces[i].annotations.rightEyeIris;
|
||||
}
|
||||
const irisSize = (faces[i].annotations && faces[i].annotations.leftEyeIris && faces[i].annotations.leftEyeIris[0] && faces[i].annotations.rightEyeIris && faces[i].annotations.rightEyeIris[0]
|
||||
const irisSize = (faces[i]?.annotations?.leftEyeIris?.[0] && faces[i]?.annotations?.rightEyeIris?.[0]
|
||||
&& (faces[i].annotations.leftEyeIris.length > 0) && (faces[i].annotations.rightEyeIris.length > 0)
|
||||
&& (faces[i].annotations.leftEyeIris[0] !== null) && (faces[i].annotations.rightEyeIris[0] !== null))
|
||||
? Math.max(Math.abs(faces[i].annotations.leftEyeIris[3][0] - faces[i].annotations.leftEyeIris[1][0]), Math.abs(faces[i].annotations.rightEyeIris[4][1] - faces[i].annotations.rightEyeIris[2][1])) / input.shape[2]
|
||||
|
|
|
@ -56,11 +56,11 @@ export class FaceBoxes {
|
|||
}
|
||||
}
|
||||
|
||||
export async function load(config) {
|
||||
export async function load(config: Config) {
|
||||
const model = await loadModel(config.face.detector?.modelPath);
|
||||
if (config.debug) log(`load model: ${config.face.detector.modelPath.match(/\/(.*)\./)[1]}`);
|
||||
if (config.face.detector?.modelPath && config.debug) log(`load model: ${config.face.detector.modelPath?.match(/\/(.*)\./)?.[1] || ''}`);
|
||||
const faceboxes = new FaceBoxes(model, config);
|
||||
if (config.face.mesh.enabled && config.debug) log(`load model: ${config.face.mesh.modelPath.match(/\/(.*)\./)[1]}`);
|
||||
if (config.face.iris.enabled && config.debug) log(`load model: ${config.face.iris.modelPath.match(/\/(.*)\./)[1]}`);
|
||||
if (config.face.mesh?.enabled && config.face.mesh?.modelPath && config.debug) log(`load model: ${config.face.mesh.modelPath.match(/\/(.*)\./)?.[1] || ''}`);
|
||||
if (config.face.iris?.enabled && config.face.iris?.modelPath && config.debug) log(`load model: ${config.face.iris.modelPath?.match(/\/(.*)\./)?.[1] || ''}`);
|
||||
return faceboxes;
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ export const calculateLandmarksBoundingBox = (landmarks) => {
|
|||
|
||||
export const fixedRotationMatrix = [[1, 0, 0], [0, 1, 0], [0, 0, 1]];
|
||||
|
||||
export const normalizeRadians = (angle) => angle - 2 * Math.PI * Math.floor((angle + Math.PI) / (2 * Math.PI));
|
||||
export const normalizeRadians = (angle: number) => angle - 2 * Math.PI * Math.floor((angle + Math.PI) / (2 * Math.PI));
|
||||
|
||||
export const computeRotation = (point1, point2) => normalizeRadians(Math.PI / 2 - Math.atan2(-(point2[1] - point1[1]), point2[0] - point1[0]));
|
||||
|
||||
|
@ -120,7 +120,7 @@ export const rotatePoint = (homogeneousCoordinate, rotationMatrix) => [dot(homog
|
|||
|
||||
export const xyDistanceBetweenPoints = (a, b) => Math.sqrt(((a[0] - b[0]) ** 2) + ((a[1] - b[1]) ** 2));
|
||||
|
||||
export function generateAnchors(inputSize) {
|
||||
export function generateAnchors(inputSize: number) {
|
||||
const spec = inputSize === 192
|
||||
? { strides: [4], anchors: [1] } // facemesh-detector
|
||||
: { strides: [inputSize / 16, inputSize / 8], anchors: [2, 6] }; // blazeface
|
||||
|
|
|
@ -40,8 +40,8 @@ export async function load(config: Config): Promise<GraphModel> {
|
|||
export function enhance(input): Tensor {
|
||||
const tensor = (input.image || input.tensor || input) as Tensor; // input received from detector is already normalized to 0..1, input is also assumed to be straightened
|
||||
if (!model?.inputs[0].shape) return tensor; // model has no shape so no point continuing
|
||||
const crop = tf.image.resizeBilinear(tensor, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
|
||||
const norm = tf.mul(crop, constants.tf255);
|
||||
const crop: Tensor = tf.image.resizeBilinear(tensor, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
|
||||
const norm: Tensor = tf.mul(crop, constants.tf255);
|
||||
tf.dispose(crop);
|
||||
return norm;
|
||||
/*
|
||||
|
@ -74,10 +74,10 @@ export async function predict(image: Tensor, config: Config, idx: number, count:
|
|||
skipped = 0;
|
||||
return new Promise(async (resolve) => {
|
||||
const obj = {
|
||||
age: <number>0,
|
||||
gender: <Gender>'unknown',
|
||||
genderScore: <number>0,
|
||||
descriptor: <number[]>[],
|
||||
age: 0 as number,
|
||||
gender: 'unknown' as Gender,
|
||||
genderScore: 0 as number,
|
||||
descriptor: [] as number[],
|
||||
};
|
||||
|
||||
if (config.face.description?.enabled) {
|
||||
|
@ -85,7 +85,7 @@ export async function predict(image: Tensor, config: Config, idx: number, count:
|
|||
const resT = model?.execute(enhanced) as Tensor[];
|
||||
lastTime = now();
|
||||
tf.dispose(enhanced);
|
||||
const genderT = await resT.find((t) => t.shape[1] === 1) as Tensor;
|
||||
const genderT = resT.find((t) => t.shape[1] === 1) as Tensor;
|
||||
const gender = await genderT.data();
|
||||
const confidence = Math.trunc(200 * Math.abs((gender[0] - 0.5))) / 100;
|
||||
if (confidence > (config.face.description.minConfidence || 0)) {
|
||||
|
@ -93,7 +93,7 @@ export async function predict(image: Tensor, config: Config, idx: number, count:
|
|||
obj.genderScore = Math.min(0.99, confidence);
|
||||
}
|
||||
const argmax = tf.argMax(resT.find((t) => t.shape[1] === 100), 1);
|
||||
const age = (await argmax.data())[0];
|
||||
const age: number = (await argmax.data())[0];
|
||||
tf.dispose(argmax);
|
||||
const ageT = resT.find((t) => t.shape[1] === 100) as Tensor;
|
||||
const all = await ageT.data();
|
||||
|
@ -102,7 +102,7 @@ export async function predict(image: Tensor, config: Config, idx: number, count:
|
|||
const desc = resT.find((t) => t.shape[1] === 1024);
|
||||
// const reshape = desc.reshape([128, 8]); // reshape large 1024-element descriptor to 128 x 8
|
||||
// const reduce = reshape.logSumExp(1); // reduce 2nd dimension by calculating logSumExp on it which leaves us with 128-element descriptor
|
||||
const descriptor = desc ? await desc.data() : <number[]>[];
|
||||
const descriptor = desc ? await desc.data() : [] as number[];
|
||||
obj.descriptor = Array.from(descriptor);
|
||||
resT.forEach((t) => tf.dispose(t));
|
||||
}
|
||||
|
|
|
@ -96,7 +96,7 @@ export async function load(config: Config): Promise<[GraphModel | null, GraphMod
|
|||
if (config.debug) log('cached model:', handDetectorModel['modelUrl']);
|
||||
if (config.debug) log('cached model:', handPoseModel['modelUrl']);
|
||||
}
|
||||
const handDetector = new handdetector.HandDetector(handDetectorModel);
|
||||
handPipeline = new handpipeline.HandPipeline(handDetector, handPoseModel);
|
||||
const handDetector = handDetectorModel ? new handdetector.HandDetector(handDetectorModel) : undefined;
|
||||
if (handDetector && handPoseModel) handPipeline = new handpipeline.HandPipeline(handDetector, handPoseModel);
|
||||
return [handDetectorModel, handPoseModel];
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@ import * as anchors from './handposeanchors';
|
|||
import { constants } from '../tfjs/constants';
|
||||
import type { Tensor, GraphModel } from '../tfjs/types';
|
||||
import type { Point } from '../result';
|
||||
import type { Config } from '../config';
|
||||
|
||||
export class HandDetector {
|
||||
model: GraphModel;
|
||||
|
@ -18,11 +19,11 @@ export class HandDetector {
|
|||
inputSizeTensor: Tensor;
|
||||
doubleInputSizeTensor: Tensor;
|
||||
|
||||
constructor(model) {
|
||||
constructor(model: GraphModel) {
|
||||
this.model = model;
|
||||
this.anchors = anchors.anchors.map((anchor) => [anchor.x, anchor.y]);
|
||||
this.anchorsTensor = tf.tensor2d(this.anchors);
|
||||
this.inputSize = (this.model && this.model.inputs && this.model.inputs[0].shape) ? this.model.inputs[0].shape[2] : 0;
|
||||
this.inputSize = this?.model?.inputs?.[0]?.shape?.[2] || 0;
|
||||
this.inputSizeTensor = tf.tensor1d([this.inputSize, this.inputSize]);
|
||||
this.doubleInputSizeTensor = tf.tensor1d([this.inputSize * 2, this.inputSize * 2]);
|
||||
}
|
||||
|
@ -40,20 +41,20 @@ export class HandDetector {
|
|||
t.endPoints = tf.mul(t.add, this.inputSizeTensor);
|
||||
const res = tf.concat2d([t.startPoints, t.endPoints], 1);
|
||||
Object.keys(t).forEach((tensor) => tf.dispose(t[tensor]));
|
||||
return res;
|
||||
return res as Tensor;
|
||||
}
|
||||
|
||||
normalizeLandmarks(rawPalmLandmarks, index) {
|
||||
normalizeLandmarks(rawPalmLandmarks, index: number) {
|
||||
const t: Record<string, Tensor> = {};
|
||||
t.reshape = tf.reshape(rawPalmLandmarks, [-1, 7, 2]);
|
||||
t.div = tf.div(t.reshape, this.inputSizeTensor);
|
||||
t.landmarks = tf.add(t.div, this.anchors[index]);
|
||||
t.landmarks = tf.add(t.div, this.anchors[index] ? this.anchors[index] : 0);
|
||||
const res = tf.mul(t.landmarks, this.inputSizeTensor);
|
||||
Object.keys(t).forEach((tensor) => tf.dispose(t[tensor]));
|
||||
return res;
|
||||
return res as Tensor;
|
||||
}
|
||||
|
||||
async predict(input, config): Promise<{ startPoint: Point; endPoint: Point, palmLandmarks: Point[]; confidence: number }[]> {
|
||||
async predict(input: Tensor, config: Config): Promise<{ startPoint: Point; endPoint: Point, palmLandmarks: Point[]; confidence: number }[]> {
|
||||
const t: Record<string, Tensor> = {};
|
||||
t.resize = tf.image.resizeBilinear(input, [this.inputSize, this.inputSize]);
|
||||
t.div = tf.div(t.resize, constants.tf127);
|
||||
|
@ -67,7 +68,7 @@ export class HandDetector {
|
|||
t.boxes = tf.slice(t.predictions, [0, 1], [-1, 4]);
|
||||
t.norm = this.normalizeBoxes(t.boxes);
|
||||
// box detection is flaky so we look for 3x boxes than we need results
|
||||
t.nms = await tf.image.nonMaxSuppressionAsync(t.norm, t.scores, 3 * config.hand.maxDetected, config.hand.iouThreshold, config.hand.minConfidence);
|
||||
t.nms = await tf.image.nonMaxSuppressionAsync(t.norm, t.scores, 3 * (config.hand?.maxDetected || 1), config.hand.iouThreshold, config.hand.minConfidence);
|
||||
const nms = await t.nms.array() as number[];
|
||||
const hands: { startPoint: Point; endPoint: Point; palmLandmarks: Point[]; confidence: number }[] = [];
|
||||
for (const index of nms) {
|
||||
|
@ -81,7 +82,7 @@ export class HandDetector {
|
|||
const endPoint = box.slice(2, 4) as unknown as Point;
|
||||
const palmLandmarks = await p.palmLandmarks.array();
|
||||
const hand = { startPoint, endPoint, palmLandmarks, confidence: scores[index] };
|
||||
const scaled = util.scaleBoxCoordinates(hand, [input.shape[2] / this.inputSize, input.shape[1] / this.inputSize]);
|
||||
const scaled = util.scaleBoxCoordinates(hand, [(input.shape[2] || 1) / this.inputSize, (input.shape[1] || 0) / this.inputSize]);
|
||||
hands.push(scaled);
|
||||
Object.keys(p).forEach((tensor) => tf.dispose(p[tensor]));
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ export class HandPipeline {
|
|||
constructor(handDetector, handPoseModel) {
|
||||
this.handDetector = handDetector;
|
||||
this.handPoseModel = handPoseModel;
|
||||
this.inputSize = this.handPoseModel && this.handPoseModel.inputs[0].shape ? this.handPoseModel.inputs[0].shape[2] : 0;
|
||||
this.inputSize = this.handPoseModel?.inputs?.[0].shape?.[2] || 0;
|
||||
this.storedBoxes = [];
|
||||
this.skipped = Number.MAX_SAFE_INTEGER;
|
||||
this.detectedHands = 0;
|
||||
|
|
|
@ -295,7 +295,7 @@ export class Human {
|
|||
|
||||
if (this.env.initial) { // print version info on first run and check for correct backend setup
|
||||
if (this.config.debug) log(`version: ${this.version}`);
|
||||
if (this.config.debug) log(`tfjs version: ${this.tf.version['tfjs-core']}`);
|
||||
if (this.config.debug) log(`tfjs version: ${this.tf.version['tfjs-core'] as string}`);
|
||||
if (!await backend.check(this)) log('error: backend check failed');
|
||||
await tf.ready();
|
||||
if (this.env.browser) {
|
||||
|
@ -321,7 +321,7 @@ export class Human {
|
|||
|
||||
/** emit event */
|
||||
emit = (event: string) => {
|
||||
if (this.events && this.events.dispatchEvent) this.events.dispatchEvent(new Event(event));
|
||||
if (this.events?.dispatchEvent) this.events.dispatchEvent(new Event(event));
|
||||
};
|
||||
|
||||
/** Runs interpolation using last known result and returns smoothened result
|
||||
|
|
|
@ -20,5 +20,5 @@ export async function histogramEqualization(inputImage: Tensor): Promise<Tensor>
|
|||
const rgb = tf.stack([enh[0], enh[1], enh[2]], 2);
|
||||
const reshape = tf.reshape(rgb, [1, squeeze.shape[0], squeeze.shape[1], 3]);
|
||||
tf.dispose([...channels, ...min, ...max, ...sub, ...range, ...fact, ...enh, rgb, squeeze]);
|
||||
return reshape; // output shape is [1, height, width, 3]
|
||||
return reshape as Tensor; // output shape is [1, height, width, 3]
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ const last: { inputSum: number, cacheDiff: number, sumMethod: number, inputTenso
|
|||
};
|
||||
|
||||
export function canvas(width: number, height: number): AnyCanvas {
|
||||
let c;
|
||||
let c: AnyCanvas;
|
||||
if (env.browser) { // browser defines canvas object
|
||||
if (env.worker) { // if runing in web worker use OffscreenCanvas
|
||||
if (typeof OffscreenCanvas === 'undefined') throw new Error('canvas error: attempted to run in web worker but OffscreenCanvas is not supported');
|
||||
|
@ -42,6 +42,7 @@ export function canvas(width: number, height: number): AnyCanvas {
|
|||
else if (typeof globalThis.Canvas !== 'undefined') c = new globalThis.Canvas(width, height);
|
||||
// else throw new Error('canvas error: attempted to use canvas in nodejs without canvas support installed');
|
||||
}
|
||||
// @ts-ignore its either defined or we already threw an error
|
||||
return c;
|
||||
}
|
||||
|
||||
|
@ -98,7 +99,7 @@ export async function process(input: Input, config: Config, getTensor: boolean =
|
|||
}
|
||||
}
|
||||
// at the end shape must be [1, height, width, 3]
|
||||
if (tensor == null || (tensor as Tensor).shape.length !== 4 || (tensor as Tensor).shape[0] !== 1 || (tensor as Tensor).shape[3] !== 3) throw new Error(`input error: attempted to use tensor with unrecognized shape: ${(input as Tensor).shape}`);
|
||||
if (tensor == null || tensor.shape.length !== 4 || tensor.shape[0] !== 1 || tensor.shape[3] !== 3) throw new Error(`input error: attempted to use tensor with unrecognized shape: ${((input as Tensor).shape).toString()}`);
|
||||
if ((tensor).dtype === 'int32') {
|
||||
const cast = tf.cast(tensor, 'float32');
|
||||
tf.dispose(tensor);
|
||||
|
@ -111,14 +112,14 @@ export async function process(input: Input, config: Config, getTensor: boolean =
|
|||
if (config.debug) log('input stream is not ready');
|
||||
return { tensor: null, canvas: inCanvas }; // video may become temporarily unavailable due to onresize
|
||||
}
|
||||
const originalWidth = input['naturalWidth'] || input['videoWidth'] || input['width'] || (input['shape'] && (input['shape'][1] > 0));
|
||||
const originalHeight = input['naturalHeight'] || input['videoHeight'] || input['height'] || (input['shape'] && (input['shape'][2] > 0));
|
||||
const originalWidth: number = input['naturalWidth'] || input['videoWidth'] || input['width'] || (input['shape'] && (input['shape'][1] > 0));
|
||||
const originalHeight: number = input['naturalHeight'] || input['videoHeight'] || input['height'] || (input['shape'] && (input['shape'][2] > 0));
|
||||
if (!originalWidth || !originalHeight) {
|
||||
if (config.debug) log('cannot determine input dimensions');
|
||||
return { tensor: null, canvas: inCanvas }; // video may become temporarily unavailable due to onresize
|
||||
}
|
||||
let targetWidth = originalWidth;
|
||||
let targetHeight = originalHeight;
|
||||
let targetWidth: number = originalWidth;
|
||||
let targetHeight: number = originalHeight;
|
||||
if (targetWidth > maxSize) {
|
||||
targetWidth = maxSize;
|
||||
targetHeight = Math.trunc(targetWidth * originalHeight / originalWidth);
|
||||
|
@ -129,9 +130,9 @@ export async function process(input: Input, config: Config, getTensor: boolean =
|
|||
}
|
||||
|
||||
// create our canvas and resize it if needed
|
||||
if ((config.filter.width || 0) > 0) targetWidth = config.filter.width;
|
||||
else if ((config.filter.height || 0) > 0) targetWidth = originalWidth * ((config.filter.height || 0) / originalHeight);
|
||||
if ((config.filter.height || 0) > 0) targetHeight = config.filter.height;
|
||||
if ((config.filter?.width || 0) > 0) targetWidth = config.filter.width as number;
|
||||
else if ((config.filter?.height || 0) > 0) targetWidth = originalWidth * ((config.filter.height || 0) / originalHeight);
|
||||
if ((config.filter.height || 0) > 0) targetHeight = config.filter.height as number;
|
||||
else if ((config.filter.width || 0) > 0) targetHeight = originalHeight * ((config.filter.width || 0) / originalWidth);
|
||||
if (!targetWidth || !targetHeight) throw new Error('input error: cannot determine dimension');
|
||||
if (!inCanvas || (inCanvas.width !== targetWidth) || (inCanvas.height !== targetHeight)) inCanvas = canvas(targetWidth, targetHeight);
|
||||
|
@ -227,8 +228,8 @@ export async function process(input: Input, config: Config, getTensor: boolean =
|
|||
pixels = rgb;
|
||||
}
|
||||
if (!pixels) throw new Error('input error: cannot create tensor');
|
||||
const casted = tf.cast(pixels, 'float32');
|
||||
const tensor = config.filter.equalization ? await enhance.histogramEqualization(casted) : tf.expandDims(casted, 0);
|
||||
const casted: Tensor = tf.cast(pixels, 'float32');
|
||||
const tensor: Tensor = config.filter.equalization ? await enhance.histogramEqualization(casted) : tf.expandDims(casted, 0);
|
||||
tf.dispose([pixels, casted]);
|
||||
return { tensor, canvas: (config.filter.return ? outCanvas : null) };
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ import * as shaders from './imagefxshaders';
|
|||
import { canvas } from './image';
|
||||
import { log } from '../util/util';
|
||||
|
||||
const collect = (source, prefix, collection) => {
|
||||
const collect = (source, prefix: string, collection) => {
|
||||
const r = new RegExp('\\b' + prefix + ' \\w+ (\\w+)', 'ig');
|
||||
source.replace(r, (match, name) => {
|
||||
collection[name] = 0;
|
||||
|
@ -37,7 +37,7 @@ class GLProgram {
|
|||
this.gl.attachShader(this.id, fragmentShader);
|
||||
this.gl.linkProgram(this.id);
|
||||
if (!this.gl.getProgramParameter(this.id, this.gl.LINK_STATUS)) {
|
||||
log(`filter: gl link failed: ${this.gl.getProgramInfoLog(this.id)}`);
|
||||
log(`filter: gl link failed: ${this.gl.getProgramInfoLog(this.id) || 'unknown'}`);
|
||||
return;
|
||||
}
|
||||
this.gl.useProgram(this.id);
|
||||
|
@ -57,7 +57,7 @@ class GLProgram {
|
|||
this.gl.shaderSource(shader, source);
|
||||
this.gl.compileShader(shader);
|
||||
if (!this.gl.getShaderParameter(shader, this.gl.COMPILE_STATUS)) {
|
||||
log(`filter: gl compile failed: ${this.gl.getShaderInfoLog(shader)}`);
|
||||
log(`filter: gl compile failed: ${this.gl.getShaderInfoLog(shader) || 'unknown'}`);
|
||||
return null;
|
||||
}
|
||||
return shader;
|
||||
|
@ -174,7 +174,7 @@ export function GLImageFilter() {
|
|||
}
|
||||
|
||||
const filter = {
|
||||
colorMatrix: (matrix) => { // general color matrix filter
|
||||
colorMatrix: (matrix: number[]) => { // general color matrix filter
|
||||
const m = new Float32Array(matrix);
|
||||
m[4] /= 255;
|
||||
m[9] /= 255;
|
||||
|
@ -189,7 +189,7 @@ export function GLImageFilter() {
|
|||
draw();
|
||||
},
|
||||
|
||||
brightness: (brightness) => {
|
||||
brightness: (brightness: number) => {
|
||||
const b = (brightness || 0) + 1;
|
||||
filter.colorMatrix([
|
||||
b, 0, 0, 0, 0,
|
||||
|
@ -199,7 +199,7 @@ export function GLImageFilter() {
|
|||
]);
|
||||
},
|
||||
|
||||
saturation: (amount) => {
|
||||
saturation: (amount: number) => {
|
||||
const x = (amount || 0) * 2 / 3 + 1;
|
||||
const y = ((x - 1) * -0.5);
|
||||
filter.colorMatrix([
|
||||
|
@ -214,7 +214,7 @@ export function GLImageFilter() {
|
|||
filter.saturation(-1);
|
||||
},
|
||||
|
||||
contrast: (amount) => {
|
||||
contrast: (amount: number) => {
|
||||
const v = (amount || 0) + 1;
|
||||
const o = -128 * (v - 1);
|
||||
filter.colorMatrix([
|
||||
|
@ -229,7 +229,7 @@ export function GLImageFilter() {
|
|||
filter.contrast(-2);
|
||||
},
|
||||
|
||||
hue: (rotation) => {
|
||||
hue: (rotation: number) => {
|
||||
rotation = (rotation || 0) / 180 * Math.PI;
|
||||
const cos = Math.cos(rotation);
|
||||
const sin = Math.sin(rotation);
|
||||
|
@ -316,7 +316,7 @@ export function GLImageFilter() {
|
|||
]);
|
||||
},
|
||||
|
||||
convolution: (matrix) => { // general convolution Filter
|
||||
convolution: (matrix: number[]) => { // general convolution Filter
|
||||
const m = new Float32Array(matrix);
|
||||
const pixelSizeX = 1 / fxcanvas.width;
|
||||
const pixelSizeY = 1 / fxcanvas.height;
|
||||
|
@ -364,7 +364,7 @@ export function GLImageFilter() {
|
|||
]);
|
||||
},
|
||||
|
||||
emboss: (size) => {
|
||||
emboss: (size: number) => {
|
||||
const s = size || 1;
|
||||
// @ts-ignore this
|
||||
filter.convolution.call(this, [
|
||||
|
@ -374,7 +374,7 @@ export function GLImageFilter() {
|
|||
]);
|
||||
},
|
||||
|
||||
blur: (size) => {
|
||||
blur: (size: number) => {
|
||||
const blurSizeX = (size / 7) / fxcanvas.width;
|
||||
const blurSizeY = (size / 7) / fxcanvas.height;
|
||||
const program = compileShader(shaders.blur);
|
||||
|
@ -387,7 +387,7 @@ export function GLImageFilter() {
|
|||
draw();
|
||||
},
|
||||
|
||||
pixelate: (size) => {
|
||||
pixelate: (size: number) => {
|
||||
const blurSizeX = (size) / fxcanvas.width;
|
||||
const blurSizeY = (size) / fxcanvas.height;
|
||||
const program = compileShader(shaders.pixelate);
|
||||
|
|
|
@ -158,7 +158,7 @@ export function validateModel(newInstance: Human | null, model: GraphModel | nul
|
|||
interface Op { name: string, category: string, op: string }
|
||||
const url = model['modelUrl'] as string;
|
||||
const executor = model['executor'];
|
||||
if (executor && executor.graph.nodes) {
|
||||
if (executor?.graph?.nodes) {
|
||||
for (const kernel of Object.values(executor.graph.nodes)) {
|
||||
const op = (kernel as Op).op.toLowerCase();
|
||||
if (!ops.includes(op)) ops.push(op);
|
||||
|
|
|
@ -21,7 +21,7 @@ function registerCustomOps() {
|
|||
const kernelMod = {
|
||||
kernelName: 'FloorMod',
|
||||
backendName: tf.getBackend(),
|
||||
kernelFunc: (op) => tf.tidy(() => tf.floorDiv(op.inputs.a / op.inputs.b) * op.inputs.b + tf.mod(op.inputs.a, op.inputs.b)),
|
||||
kernelFunc: (op) => tf.tidy(() => tf.add(tf.mul(tf.floorDiv(op.inputs.a / op.inputs.b), op.inputs.b), tf.mod(op.inputs.a, op.inputs.b))),
|
||||
};
|
||||
tf.registerKernel(kernelMod);
|
||||
env.kernels.push('floormod');
|
||||
|
@ -71,8 +71,8 @@ export async function check(instance: Human, force = false) {
|
|||
}
|
||||
|
||||
// check available backends
|
||||
if (instance.config.backend === 'humangl') await humangl.register(instance);
|
||||
const available = Object.keys(tf.engine().registryFactory);
|
||||
if (instance.config.backend === 'humangl') humangl.register(instance);
|
||||
const available = Object.keys(tf.engine().registryFactory as Record<string, unknown>);
|
||||
if (instance.config.debug) log('available backends:', available);
|
||||
|
||||
if (!available.includes(instance.config.backend)) {
|
||||
|
@ -87,7 +87,7 @@ export async function check(instance: Human, force = false) {
|
|||
if (instance.config.backend === 'wasm') {
|
||||
if (tf.env().flagRegistry.CANVAS2D_WILL_READ_FREQUENTLY) tf.env().set('CANVAS2D_WILL_READ_FREQUENTLY', true);
|
||||
if (instance.config.debug) log('wasm path:', instance.config.wasmPath);
|
||||
if (typeof tf.setWasmPaths !== 'undefined') await tf.setWasmPaths(instance.config.wasmPath, instance.config.wasmPlatformFetch);
|
||||
if (typeof tf.setWasmPaths !== 'undefined') tf.setWasmPaths(instance.config.wasmPath, instance.config.wasmPlatformFetch);
|
||||
else throw new Error('backend error: attempting to use wasm backend but wasm path is not set');
|
||||
let mt = false;
|
||||
let simd = false;
|
||||
|
@ -127,7 +127,7 @@ export async function check(instance: Human, force = false) {
|
|||
}
|
||||
if (tf.backend().getGPGPUContext) {
|
||||
const gl = await tf.backend().getGPGPUContext().gl;
|
||||
if (instance.config.debug) log(`gl version:${gl.getParameter(gl.VERSION)} renderer:${gl.getParameter(gl.RENDERER)}`);
|
||||
if (instance.config.debug) log(`gl version:${gl.getParameter(gl.VERSION) as string} renderer:${gl.getParameter(gl.RENDERER) as string}`);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -165,5 +165,5 @@ export function fakeOps(kernelNames: string[], config) {
|
|||
};
|
||||
tf.registerKernel(kernelConfig);
|
||||
}
|
||||
env.kernels = tf.getKernelsForBackend(tf.getBackend()).map((kernel) => kernel.kernelName.toLowerCase()); // re-scan registered ops
|
||||
env.kernels = tf.getKernelsForBackend(tf.getBackend()).map((kernel) => (kernel.kernelName as string).toLowerCase()); // re-scan registered ops
|
||||
}
|
||||
|
|
|
@ -11,9 +11,9 @@ import type { AnyCanvas } from '../exports';
|
|||
export const config = {
|
||||
name: 'humangl',
|
||||
priority: 999,
|
||||
canvas: <null | AnyCanvas>null,
|
||||
gl: <null | WebGL2RenderingContext>null,
|
||||
extensions: <string[] | null> [],
|
||||
canvas: null as null | AnyCanvas,
|
||||
gl: null as null | WebGL2RenderingContext,
|
||||
extensions: [] as string[] | null,
|
||||
webGLattr: { // https://www.khronos.org/registry/webgl/specs/latest/1.0/#5.2
|
||||
alpha: false,
|
||||
antialias: false,
|
||||
|
@ -42,7 +42,7 @@ function extensions(): void {
|
|||
*
|
||||
* @returns void
|
||||
*/
|
||||
export async function register(instance: Human): Promise<void> {
|
||||
export function register(instance: Human): void {
|
||||
// force backend reload if gl context is not valid
|
||||
if (instance.config.backend !== 'humangl') return;
|
||||
if ((config.name in tf.engine().registry) && (!config.gl || !config.gl.getParameter(config.gl.VERSION))) {
|
||||
|
@ -56,7 +56,7 @@ export async function register(instance: Human): Promise<void> {
|
|||
}
|
||||
if (!tf.findBackend(config.name)) {
|
||||
try {
|
||||
config.canvas = await image.canvas(100, 100);
|
||||
config.canvas = image.canvas(100, 100);
|
||||
} catch (err) {
|
||||
log('error: cannot create canvas:', err);
|
||||
return;
|
||||
|
@ -74,7 +74,7 @@ export async function register(instance: Human): Promise<void> {
|
|||
return;
|
||||
}
|
||||
if (config.canvas) {
|
||||
config.canvas.addEventListener('webglcontextlost', async (e) => {
|
||||
config.canvas.addEventListener('webglcontextlost', (e) => {
|
||||
log('error: humangl:', e.type);
|
||||
log('possible browser memory leak using webgl or conflict with multiple backend registrations');
|
||||
instance.emit('error');
|
||||
|
@ -121,7 +121,7 @@ export async function register(instance: Human): Promise<void> {
|
|||
}
|
||||
const current = tf.backend().getGPGPUContext ? tf.backend().getGPGPUContext().gl : null;
|
||||
if (current) {
|
||||
log(`humangl webgl version:${current.getParameter(current.VERSION)} renderer:${current.getParameter(current.RENDERER)}`);
|
||||
log(`humangl webgl version:${current.getParameter(current.VERSION) as string} renderer:${current.getParameter(current.RENDERER) as string}`);
|
||||
} else {
|
||||
log('error: no current gl context:', current, config.gl);
|
||||
return;
|
||||
|
|
|
@ -23,7 +23,7 @@ export interface ModelInfo {
|
|||
|
||||
export const modelStats: Record<string, ModelInfo> = {};
|
||||
|
||||
async function httpHandler(url, init?): Promise<Response | null> {
|
||||
async function httpHandler(url: string, init?: RequestInit): Promise<Response | null> {
|
||||
if (options.debug) log('load model fetch:', url, init);
|
||||
return fetch(url, init);
|
||||
}
|
||||
|
@ -55,7 +55,7 @@ export async function loadModel(modelPath: string | undefined): Promise<GraphMod
|
|||
options.cacheSupported = false;
|
||||
}
|
||||
modelStats[shortModelName].inCache = (options.cacheSupported && options.cacheModels) && Object.keys(cachedModels).includes(cachedModelName); // is model found in cache
|
||||
const tfLoadOptions = typeof fetch === 'undefined' ? {} : { fetchFunc: (url, init?) => httpHandler(url, init) };
|
||||
const tfLoadOptions = typeof fetch === 'undefined' ? {} : { fetchFunc: (url: string, init?: RequestInit) => httpHandler(url, init) };
|
||||
const model: GraphModel = new tf.GraphModel(modelStats[shortModelName].inCache ? cachedModelName : modelUrl, tfLoadOptions) as unknown as GraphModel; // create model prototype and decide if load from cache or from original modelurl
|
||||
let loaded = false;
|
||||
try {
|
||||
|
@ -81,6 +81,6 @@ export async function loadModel(modelPath: string | undefined): Promise<GraphMod
|
|||
log('error saving model:', modelUrl, err);
|
||||
}
|
||||
}
|
||||
validateModel(null, model, `${modelPath}`);
|
||||
validateModel(null, model, `${modelPath || ''}`);
|
||||
return model;
|
||||
}
|
||||
|
|
|
@ -97,9 +97,9 @@ export class Env {
|
|||
this.worker = this.browser && this.offscreen ? (typeof WorkerGlobalScope !== 'undefined') : undefined;
|
||||
if (typeof navigator !== 'undefined') {
|
||||
const raw = navigator.userAgent.match(/\(([^()]+)\)/g);
|
||||
if (raw && raw[0]) {
|
||||
if (raw?.[0]) {
|
||||
const platformMatch = raw[0].match(/\(([^()]+)\)/g);
|
||||
this.platform = (platformMatch && platformMatch[0]) ? platformMatch[0].replace(/\(|\)/g, '') : '';
|
||||
this.platform = (platformMatch?.[0]) ? platformMatch[0].replace(/\(|\)/g, '') : '';
|
||||
this.agent = navigator.userAgent.replace(raw[0], '');
|
||||
if (this.platform[1]) this.agent = this.agent.replace(raw[1], '');
|
||||
this.agent = this.agent.replace(/ /g, ' ');
|
||||
|
@ -156,7 +156,7 @@ export class Env {
|
|||
this.webgpu.supported = false;
|
||||
}
|
||||
try {
|
||||
this.kernels = tf.getKernelsForBackend(tf.getBackend()).map((kernel) => kernel.kernelName.toLowerCase());
|
||||
this.kernels = tf.getKernelsForBackend(tf.getBackend()).map((kernel) => (kernel.kernelName as string).toLowerCase());
|
||||
} catch { /**/ }
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
* Results interpolation for smoothening of video detection results inbetween detected frames
|
||||
*/
|
||||
|
||||
import type { Result, FaceResult, BodyResult, HandResult, ObjectResult, GestureResult, PersonResult, Box, Point, BodyLandmark, BodyAnnotation } from '../result';
|
||||
import type { Result, FaceResult, BodyResult, HandResult, ObjectResult, PersonResult, Box, Point, BodyLandmark, BodyAnnotation } from '../result';
|
||||
import type { Config } from '../config';
|
||||
|
||||
import * as moveNetCoords from '../body/movenetcoords';
|
||||
|
@ -103,7 +103,7 @@ export function calc(newResult: Result, config: Config): Result {
|
|||
annotations = bufferedResult.hand[i].annotations;
|
||||
} else if (newResult.hand[i].annotations) {
|
||||
for (const key of Object.keys(newResult.hand[i].annotations)) { // update annotations
|
||||
annotations[key] = newResult.hand[i].annotations[key] && newResult.hand[i].annotations[key][0]
|
||||
annotations[key] = newResult.hand[i]?.annotations?.[key]?.[0]
|
||||
? newResult.hand[i].annotations[key]
|
||||
.map((val, j: number) => val
|
||||
.map((coord: number, k: number) => ((bufferedFactor - 1) * bufferedResult.hand[i].annotations[key][j][k] + coord) / bufferedFactor))
|
||||
|
@ -173,7 +173,7 @@ export function calc(newResult: Result, config: Config): Result {
|
|||
}
|
||||
|
||||
// just copy latest gestures without interpolation
|
||||
if (newResult.gesture) bufferedResult.gesture = newResult.gesture as GestureResult[];
|
||||
if (newResult.gesture) bufferedResult.gesture = newResult.gesture;
|
||||
|
||||
// append interpolation performance data
|
||||
const t1 = now();
|
||||
|
|
|
@ -59,7 +59,7 @@ export function join(faces: FaceResult[], bodies: BodyResult[], hands: HandResul
|
|||
person.box = [minX, minY, Math.max(...x) - minX, Math.max(...y) - minY]; // create new overarching box
|
||||
|
||||
// shape is known so we calculate boxRaw as well
|
||||
if (shape && shape[1] && shape[2]) person.boxRaw = [person.box[0] / shape[2], person.box[1] / shape[1], person.box[2] / shape[2], person.box[3] / shape[1]];
|
||||
if (shape?.[1] && shape?.[2]) person.boxRaw = [person.box[0] / shape[2], person.box[1] / shape[1], person.box[2] / shape[2], person.box[3] / shape[1]];
|
||||
|
||||
persons.push(person);
|
||||
}
|
||||
|
|
|
@ -7,15 +7,15 @@ import { log } from './util';
|
|||
|
||||
export const data = {};
|
||||
|
||||
export type ProfileData = {
|
||||
export interface ProfileData {
|
||||
newBytes: number,
|
||||
peakBytes: number,
|
||||
newTensors: number,
|
||||
kernels: Array<{
|
||||
kernels: {
|
||||
id: number,
|
||||
kernelTimeMs: number,
|
||||
totalBytesSnapshot: number,
|
||||
}>,
|
||||
}[],
|
||||
}
|
||||
|
||||
export function run(modelName: string, profileData: ProfileData): void { // profileData is tfjs internal type
|
||||
|
|
|
@ -10,6 +10,7 @@ import { env } from './util/env';
|
|||
import type { Config } from './config';
|
||||
import type { Result } from './result';
|
||||
import type { Human, Models } from './human';
|
||||
import type { Tensor } from './exports';
|
||||
|
||||
async function warmupBitmap(instance: Human): Promise<Result | undefined> {
|
||||
const b64toBlob = (base64: string, type = 'application/octet-stream') => fetch(`data:${type};base64,${base64}`).then((res) => res.blob());
|
||||
|
@ -76,10 +77,10 @@ async function warmupNode(instance: Human): Promise<Result | undefined> {
|
|||
let img;
|
||||
if (instance.config.warmup === 'face') img = atob(sample.face);
|
||||
else img = atob(sample.body);
|
||||
let res;
|
||||
let res: Result;
|
||||
if (('node' in tf) && (tf.getBackend() === 'tensorflow')) {
|
||||
const data = tf['node'].decodeJpeg(img); // eslint-disable-line import/namespace
|
||||
const expanded = data.expandDims(0);
|
||||
const data: Tensor = tf['node'].decodeJpeg(img); // eslint-disable-line import/namespace
|
||||
const expanded: Tensor = tf.expandDims(data, 0);
|
||||
instance.tf.dispose(data);
|
||||
// log('Input:', expanded);
|
||||
res = await instance.detect(expanded, instance.config);
|
||||
|
@ -94,6 +95,7 @@ async function warmupNode(instance: Human): Promise<Result | undefined> {
|
|||
res = await instance.detect(input, instance.config);
|
||||
*/
|
||||
}
|
||||
// @ts-ignore
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -118,8 +120,8 @@ export async function runCompile(allModels: Models) {
|
|||
const numTensorsStart = tf.engine().state.numTensors;
|
||||
const compiledModels: string[] = [];
|
||||
for (const [modelName, model] of Object.entries(allModels).filter(([key, val]) => (key !== null && val !== null))) {
|
||||
const shape = (model.inputs && model.inputs[0] && model.inputs[0].shape) ? [...model.inputs[0].shape] : [1, 64, 64, 3];
|
||||
const dtype = (model.inputs && model.inputs[0] && model.inputs[0].dtype) ? model.inputs[0].dtype : 'float32';
|
||||
const shape = (model.inputs?.[0]?.shape) ? [...model.inputs[0].shape] : [1, 64, 64, 3];
|
||||
const dtype: string = (model.inputs?.[0]?.dtype) ? model.inputs[0].dtype : 'float32';
|
||||
for (let dim = 0; dim < shape.length; dim++) {
|
||||
if (shape[dim] === -1) shape[dim] = dim === 0 ? 1 : 64; // override batch number and any dynamic dimensions
|
||||
}
|
||||
|
|
|
@ -1,39 +1,39 @@
|
|||
2022-08-21 13:32:23 [32mDATA: [39m Build {"name":"@vladmandic/human","version":"2.9.4"}
|
||||
2022-08-21 13:32:23 [36mINFO: [39m Application: {"name":"@vladmandic/human","version":"2.9.4"}
|
||||
2022-08-21 13:32:23 [36mINFO: [39m Environment: {"profile":"production","config":".build.json","package":"package.json","tsconfig":true,"eslintrc":true,"git":true}
|
||||
2022-08-21 13:32:23 [36mINFO: [39m Toolchain: {"build":"0.7.10","esbuild":"0.15.5","typescript":"4.7.4","typedoc":"0.23.10","eslint":"8.22.0"}
|
||||
2022-08-21 13:32:23 [36mINFO: [39m Build: {"profile":"production","steps":["clean","compile","typings","typedoc","lint","changelog"]}
|
||||
2022-08-21 13:32:23 [35mSTATE:[39m Clean: {"locations":["dist/*","types/lib/*","typedoc/*"]}
|
||||
2022-08-21 13:32:23 [35mSTATE:[39m Compile: {"name":"tfjs/nodejs/cpu","format":"cjs","platform":"node","input":"tfjs/tf-node.ts","output":"dist/tfjs.esm.js","files":1,"inputBytes":159,"outputBytes":608}
|
||||
2022-08-21 13:32:23 [35mSTATE:[39m Compile: {"name":"human/nodejs/cpu","format":"cjs","platform":"node","input":"src/human.ts","output":"dist/human.node.js","files":75,"inputBytes":652939,"outputBytes":306172}
|
||||
2022-08-21 13:32:23 [35mSTATE:[39m Compile: {"name":"tfjs/nodejs/gpu","format":"cjs","platform":"node","input":"tfjs/tf-node-gpu.ts","output":"dist/tfjs.esm.js","files":1,"inputBytes":167,"outputBytes":612}
|
||||
2022-08-21 13:32:23 [35mSTATE:[39m Compile: {"name":"human/nodejs/gpu","format":"cjs","platform":"node","input":"src/human.ts","output":"dist/human.node-gpu.js","files":75,"inputBytes":652943,"outputBytes":306176}
|
||||
2022-08-21 13:32:23 [35mSTATE:[39m Compile: {"name":"tfjs/nodejs/wasm","format":"cjs","platform":"node","input":"tfjs/tf-node-wasm.ts","output":"dist/tfjs.esm.js","files":1,"inputBytes":206,"outputBytes":664}
|
||||
2022-08-21 13:32:23 [35mSTATE:[39m Compile: {"name":"human/nodejs/wasm","format":"cjs","platform":"node","input":"src/human.ts","output":"dist/human.node-wasm.js","files":75,"inputBytes":652995,"outputBytes":306226}
|
||||
2022-08-21 13:32:23 [35mSTATE:[39m Compile: {"name":"tfjs/browser/version","format":"esm","platform":"browser","input":"tfjs/tf-version.ts","output":"dist/tfjs.version.js","files":1,"inputBytes":1125,"outputBytes":358}
|
||||
2022-08-21 13:32:23 [35mSTATE:[39m Compile: {"name":"tfjs/browser/esm/nobundle","format":"esm","platform":"browser","input":"tfjs/tf-browser.ts","output":"dist/tfjs.esm.js","files":2,"inputBytes":1088,"outputBytes":583}
|
||||
2022-08-21 13:32:23 [35mSTATE:[39m Compile: {"name":"human/browser/esm/nobundle","format":"esm","platform":"browser","input":"src/human.ts","output":"dist/human.esm-nobundle.js","files":75,"inputBytes":652914,"outputBytes":305046}
|
||||
2022-08-21 13:32:23 [35mSTATE:[39m Compile: {"name":"tfjs/browser/esm/custom","format":"esm","platform":"browser","input":"tfjs/tf-custom.ts","output":"dist/tfjs.esm.js","files":11,"inputBytes":1344,"outputBytes":2787569}
|
||||
2022-08-21 13:32:23 [35mSTATE:[39m Compile: {"name":"human/browser/iife/bundle","format":"iife","platform":"browser","input":"src/human.ts","output":"dist/human.js","files":75,"inputBytes":3439900,"outputBytes":1667925}
|
||||
2022-08-21 13:32:23 [35mSTATE:[39m Compile: {"name":"human/browser/esm/bundle","format":"esm","platform":"browser","input":"src/human.ts","output":"dist/human.esm.js","files":75,"inputBytes":3439900,"outputBytes":3070714}
|
||||
2022-08-21 13:32:28 [35mSTATE:[39m Typings: {"input":"src/human.ts","output":"types/lib","files":30}
|
||||
2022-08-21 13:32:29 [35mSTATE:[39m TypeDoc: {"input":"src/human.ts","output":"typedoc","objects":77,"generated":true}
|
||||
2022-08-21 13:32:29 [35mSTATE:[39m Compile: {"name":"demo/typescript","format":"esm","platform":"browser","input":"demo/typescript/index.ts","output":"demo/typescript/index.js","files":1,"inputBytes":6699,"outputBytes":3141}
|
||||
2022-08-21 13:32:29 [35mSTATE:[39m Compile: {"name":"demo/faceid","format":"esm","platform":"browser","input":"demo/faceid/index.ts","output":"demo/faceid/index.js","files":2,"inputBytes":15549,"outputBytes":7741}
|
||||
2022-08-21 13:32:39 [35mSTATE:[39m Lint: {"locations":["*.json","src/**/*.ts","test/**/*.js","demo/**/*.js"],"files":111,"errors":0,"warnings":0}
|
||||
2022-08-21 13:32:39 [35mSTATE:[39m ChangeLog: {"repository":"https://github.com/vladmandic/human","branch":"main","output":"CHANGELOG.md"}
|
||||
2022-08-21 13:32:39 [35mSTATE:[39m Copy: {"input":"tfjs/tfjs.esm.d.ts"}
|
||||
2022-08-21 13:32:39 [36mINFO: [39m Done...
|
||||
2022-08-21 13:32:40 [35mSTATE:[39m API-Extractor: {"succeeeded":true,"errors":0,"warnings":198}
|
||||
2022-08-21 13:32:40 [35mSTATE:[39m Copy: {"input":"types/human.d.ts"}
|
||||
2022-08-21 13:32:40 [36mINFO: [39m Analyze models: {"folders":8,"result":"models/models.json"}
|
||||
2022-08-21 13:32:40 [35mSTATE:[39m Models {"folder":"./models","models":13}
|
||||
2022-08-21 13:32:40 [35mSTATE:[39m Models {"folder":"../human-models/models","models":42}
|
||||
2022-08-21 13:32:40 [35mSTATE:[39m Models {"folder":"../blazepose/model/","models":4}
|
||||
2022-08-21 13:32:40 [35mSTATE:[39m Models {"folder":"../anti-spoofing/model","models":1}
|
||||
2022-08-21 13:32:40 [35mSTATE:[39m Models {"folder":"../efficientpose/models","models":3}
|
||||
2022-08-21 13:32:40 [35mSTATE:[39m Models {"folder":"../insightface/models","models":5}
|
||||
2022-08-21 13:32:40 [35mSTATE:[39m Models {"folder":"../movenet/models","models":3}
|
||||
2022-08-21 13:32:40 [35mSTATE:[39m Models {"folder":"../nanodet/models","models":4}
|
||||
2022-08-21 13:32:40 [35mSTATE:[39m Models: {"count":57,"totalSize":383017442}
|
||||
2022-08-21 13:32:40 [36mINFO: [39m Human Build complete... {"logFile":"test/build.log"}
|
||||
2022-08-21 15:21:09 [32mDATA: [39m Build {"name":"@vladmandic/human","version":"2.9.4"}
|
||||
2022-08-21 15:21:09 [36mINFO: [39m Application: {"name":"@vladmandic/human","version":"2.9.4"}
|
||||
2022-08-21 15:21:09 [36mINFO: [39m Environment: {"profile":"production","config":".build.json","package":"package.json","tsconfig":true,"eslintrc":true,"git":true}
|
||||
2022-08-21 15:21:09 [36mINFO: [39m Toolchain: {"build":"0.7.10","esbuild":"0.15.5","typescript":"4.7.4","typedoc":"0.23.10","eslint":"8.22.0"}
|
||||
2022-08-21 15:21:09 [36mINFO: [39m Build: {"profile":"production","steps":["clean","compile","typings","typedoc","lint","changelog"]}
|
||||
2022-08-21 15:21:09 [35mSTATE:[39m Clean: {"locations":["dist/*","types/lib/*","typedoc/*"]}
|
||||
2022-08-21 15:21:09 [35mSTATE:[39m Compile: {"name":"tfjs/nodejs/cpu","format":"cjs","platform":"node","input":"tfjs/tf-node.ts","output":"dist/tfjs.esm.js","files":1,"inputBytes":159,"outputBytes":608}
|
||||
2022-08-21 15:21:09 [35mSTATE:[39m Compile: {"name":"human/nodejs/cpu","format":"cjs","platform":"node","input":"src/human.ts","output":"dist/human.node.js","files":75,"inputBytes":653284,"outputBytes":306632}
|
||||
2022-08-21 15:21:09 [35mSTATE:[39m Compile: {"name":"tfjs/nodejs/gpu","format":"cjs","platform":"node","input":"tfjs/tf-node-gpu.ts","output":"dist/tfjs.esm.js","files":1,"inputBytes":167,"outputBytes":612}
|
||||
2022-08-21 15:21:09 [35mSTATE:[39m Compile: {"name":"human/nodejs/gpu","format":"cjs","platform":"node","input":"src/human.ts","output":"dist/human.node-gpu.js","files":75,"inputBytes":653288,"outputBytes":306636}
|
||||
2022-08-21 15:21:09 [35mSTATE:[39m Compile: {"name":"tfjs/nodejs/wasm","format":"cjs","platform":"node","input":"tfjs/tf-node-wasm.ts","output":"dist/tfjs.esm.js","files":1,"inputBytes":206,"outputBytes":664}
|
||||
2022-08-21 15:21:09 [35mSTATE:[39m Compile: {"name":"human/nodejs/wasm","format":"cjs","platform":"node","input":"src/human.ts","output":"dist/human.node-wasm.js","files":75,"inputBytes":653340,"outputBytes":306686}
|
||||
2022-08-21 15:21:09 [35mSTATE:[39m Compile: {"name":"tfjs/browser/version","format":"esm","platform":"browser","input":"tfjs/tf-version.ts","output":"dist/tfjs.version.js","files":1,"inputBytes":1125,"outputBytes":358}
|
||||
2022-08-21 15:21:09 [35mSTATE:[39m Compile: {"name":"tfjs/browser/esm/nobundle","format":"esm","platform":"browser","input":"tfjs/tf-browser.ts","output":"dist/tfjs.esm.js","files":2,"inputBytes":1088,"outputBytes":583}
|
||||
2022-08-21 15:21:09 [35mSTATE:[39m Compile: {"name":"human/browser/esm/nobundle","format":"esm","platform":"browser","input":"src/human.ts","output":"dist/human.esm-nobundle.js","files":75,"inputBytes":653259,"outputBytes":305505}
|
||||
2022-08-21 15:21:09 [35mSTATE:[39m Compile: {"name":"tfjs/browser/esm/custom","format":"esm","platform":"browser","input":"tfjs/tf-custom.ts","output":"dist/tfjs.esm.js","files":11,"inputBytes":1344,"outputBytes":2787569}
|
||||
2022-08-21 15:21:09 [35mSTATE:[39m Compile: {"name":"human/browser/iife/bundle","format":"iife","platform":"browser","input":"src/human.ts","output":"dist/human.js","files":75,"inputBytes":3440245,"outputBytes":1668404}
|
||||
2022-08-21 15:21:09 [35mSTATE:[39m Compile: {"name":"human/browser/esm/bundle","format":"esm","platform":"browser","input":"src/human.ts","output":"dist/human.esm.js","files":75,"inputBytes":3440245,"outputBytes":3071598}
|
||||
2022-08-21 15:21:14 [35mSTATE:[39m Typings: {"input":"src/human.ts","output":"types/lib","files":30}
|
||||
2022-08-21 15:21:16 [35mSTATE:[39m TypeDoc: {"input":"src/human.ts","output":"typedoc","objects":77,"generated":true}
|
||||
2022-08-21 15:21:16 [35mSTATE:[39m Compile: {"name":"demo/typescript","format":"esm","platform":"browser","input":"demo/typescript/index.ts","output":"demo/typescript/index.js","files":1,"inputBytes":6714,"outputBytes":3134}
|
||||
2022-08-21 15:21:16 [35mSTATE:[39m Compile: {"name":"demo/faceid","format":"esm","platform":"browser","input":"demo/faceid/index.ts","output":"demo/faceid/index.js","files":2,"inputBytes":15501,"outputBytes":7733}
|
||||
2022-08-21 15:21:27 [35mSTATE:[39m Lint: {"locations":["*.json","src/**/*.ts","test/**/*.js","demo/**/*.js"],"files":111,"errors":0,"warnings":0}
|
||||
2022-08-21 15:21:27 [35mSTATE:[39m ChangeLog: {"repository":"https://github.com/vladmandic/human","branch":"main","output":"CHANGELOG.md"}
|
||||
2022-08-21 15:21:27 [35mSTATE:[39m Copy: {"input":"tfjs/tfjs.esm.d.ts"}
|
||||
2022-08-21 15:21:27 [36mINFO: [39m Done...
|
||||
2022-08-21 15:21:27 [35mSTATE:[39m API-Extractor: {"succeeeded":true,"errors":0,"warnings":198}
|
||||
2022-08-21 15:21:27 [35mSTATE:[39m Copy: {"input":"types/human.d.ts"}
|
||||
2022-08-21 15:21:27 [36mINFO: [39m Analyze models: {"folders":8,"result":"models/models.json"}
|
||||
2022-08-21 15:21:27 [35mSTATE:[39m Models {"folder":"./models","models":13}
|
||||
2022-08-21 15:21:27 [35mSTATE:[39m Models {"folder":"../human-models/models","models":42}
|
||||
2022-08-21 15:21:27 [35mSTATE:[39m Models {"folder":"../blazepose/model/","models":4}
|
||||
2022-08-21 15:21:27 [35mSTATE:[39m Models {"folder":"../anti-spoofing/model","models":1}
|
||||
2022-08-21 15:21:27 [35mSTATE:[39m Models {"folder":"../efficientpose/models","models":3}
|
||||
2022-08-21 15:21:27 [35mSTATE:[39m Models {"folder":"../insightface/models","models":5}
|
||||
2022-08-21 15:21:27 [35mSTATE:[39m Models {"folder":"../movenet/models","models":3}
|
||||
2022-08-21 15:21:27 [35mSTATE:[39m Models {"folder":"../nanodet/models","models":4}
|
||||
2022-08-21 15:21:28 [35mSTATE:[39m Models: {"count":57,"totalSize":383017442}
|
||||
2022-08-21 15:21:28 [36mINFO: [39m Human Build complete... {"logFile":"test/build.log"}
|
||||
|
|
1940
test/test.log
1940
test/test.log
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue