refactoring

pull/356/head
Vladimir Mandic 2021-09-27 13:58:13 -04:00
parent a21e3c95ed
commit 6be1b062fb
27 changed files with 268 additions and 610 deletions

View File

@ -11,6 +11,7 @@
### **HEAD -> main** 2021/09/27 mandic00@live.com
- implement box caching for movenet
- autodetect number of bodies and hands
- upload new samples
- new samples gallery and major code folder restructure

161
src/body/blazepose.ts Normal file
View File

@ -0,0 +1,161 @@
/**
* BlazePose model implementation
*
* Based on : [**BlazePose**](https://github.com/google/mediapipe/blob/master/mediapipe/modules/pose_detection)
*/
import { log, join } from '../util/util';
import * as tf from '../../dist/tfjs.esm.js';
import type { BodyResult, Box, Point } from '../result';
import type { GraphModel, Tensor } from '../tfjs/types';
import type { Config } from '../config';
import { env } from '../util/env';
import * as annotations from './annotations';
// const boxScaleFact = 1.5; // hand finger model prefers slighly larger box
const models: [GraphModel | null, GraphModel | null] = [null, null];
const outputNodes = ['ld_3d', 'activation_segmentation', 'activation_heatmap', 'world_3d', 'output_poseflag'];
const inputSize = [[0, 0], [0, 0]];
// let skipped = 0;
let outputSize: [number, number] = [0, 0];
type Keypoints = { score: number, part: string, position: Point, positionRaw: Point };
/*
type BodyDetectResult = {
id: number,
score: number,
box: Box,
boxRaw: Box,
label: string,
yxBox: Box,
}
const cache: {
bodyBoxes: Array<BodyDetectResult>,
partBoxes: Array<BodyDetectResult>
tmpBoxes: Array<BodyDetectResult>
} = {
bodyBoxes: [],
partBoxes: [],
tmpBoxes: [],
};
*/
export async function loadDetect(config: Config): Promise<GraphModel> {
if (env.initial) models[0] = null;
if (!models[0]) {
models[0] = await tf.loadGraphModel(join(config.modelBasePath, config.body.detector?.modelPath || '')) as unknown as GraphModel;
const inputs = Object.values(models[0].modelSignature['inputs']);
inputSize[0][0] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[1].size) : 0;
inputSize[0][1] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : 0;
if (!models[0] || !models[0]['modelUrl']) log('load model failed:', config.object.modelPath);
else if (config.debug) log('load model:', models[0]['modelUrl']);
} else if (config.debug) log('cached model:', models[0]['modelUrl']);
return models[0];
}
export async function loadPose(config: Config): Promise<GraphModel> {
if (env.initial) models[1] = null;
if (!models[1]) {
models[1] = await tf.loadGraphModel(join(config.modelBasePath, config.body.modelPath || '')) as unknown as GraphModel;
const inputs = Object.values(models[1].modelSignature['inputs']);
inputSize[1][0] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[1].size) : 0;
inputSize[1][1] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : 0;
if (!models[1] || !models[1]['modelUrl']) log('load model failed:', config.object.modelPath);
else if (config.debug) log('load model:', models[1]['modelUrl']);
} else if (config.debug) log('cached model:', models[1]['modelUrl']);
return models[1];
}
export async function load(config: Config): Promise<[GraphModel | null, GraphModel | null]> {
if (!models[0]) await loadDetect(config);
if (!models[1]) await loadPose(config);
return models;
}
/*
async function detectBody(input: Tensor, config: Config): Promise<BodyDetectResult[]> {
if ((config.body.detector?.modelPath.length || 0) > 0 && models[0]) {
const t: Record<string, Tensor> = {};
t.resize = tf.image.resizeBilinear(input, [inputSize[0][0], inputSize[0][1]]);
t.res = await models[0]?.predict(t.resize) as Tensor; // [1,2254,13]
t.logits = tf.slice(t.res, [0, 0, 0], [1, -1, 1]);
t.sigmoid = tf.sigmoid(t.logits);
t.rawBoxes = tf.slice(t.res, [0, 0, 1], [1, -1, -1]);
t.packedBoxes = tf.squeeze(t.rawBoxes); // [2254,12]
t.scores = tf.squeeze(t.sigmoid); // [2254,1]
// boxes need to be decoded based on anchors
Object.keys(t).forEach((tensor) => tf.dispose(t[tensor]));
}
return [];
}
*/
async function detectParts(input: Tensor, config: Config): Promise<BodyResult> {
const t: Record<string, Tensor> = {};
t.resize = tf.image.resizeBilinear(input, [inputSize[1][0], inputSize[1][1]]);
[t.ld/* 1,195 */, t.segmentation/* 1,256,256,1 */, t.heatmap/* 1,64,64,39 */, t.world/* 1,117 */, t.poseflag/* 1,1 */] = await models[1]?.execute(t.resize, outputNodes) as Tensor[]; // [1,2254,13]
const points = await t.ld.data();
const keypoints: Array<Keypoints> = [];
const labels = points?.length === 195 ? annotations.full : annotations.upper; // full model has 39 keypoints, upper has 31 keypoints
const depth = 5; // each points has x,y,z,visibility,presence
for (let i = 0; i < points.length / depth; i++) {
const score = (100 - Math.trunc(100 / (1 + Math.exp(points[depth * i + 3])))) / 100; // reverse sigmoid value
// const presence = (100 - Math.trunc(100 / (1 + Math.exp(points[depth * i + 4])))) / 100; // reverse sigmoid value
if (score > (config.body.minConfidence || 0)) {
keypoints.push({
part: labels[i],
position: [
Math.trunc(outputSize[0] * points[depth * i + 0] / 255), // return normalized x value istead of 0..255
Math.trunc(outputSize[1] * points[depth * i + 1] / 255), // return normalized y value istead of 0..255
Math.trunc(points[depth * i + 2]) + 0, // fix negative zero
],
positionRaw: [
points[depth * i + 0] / 255, // return x value normalized to 0..1
points[depth * i + 1] / 255, // return y value normalized to 0..1
points[depth * i + 2] + 0, // fix negative zero
],
score,
});
}
}
const x = keypoints.map((a) => a.position[0]);
const y = keypoints.map((a) => a.position[1]);
const box: Box = [
Math.min(...x),
Math.min(...y),
Math.max(...x) - Math.min(...x),
Math.max(...y) - Math.min(...x),
];
const boxRaw: Box = [0, 0, 0, 0]; // not yet implemented
const score = keypoints.reduce((prev, curr) => (curr.score > prev ? curr.score : prev), 0);
Object.keys(t).forEach((tensor) => tf.dispose(t[tensor]));
return { id: 0, score, box, boxRaw, keypoints };
}
export async function predict(input: Tensor, config: Config): Promise<BodyResult[]> {
outputSize = [input.shape[2] || 0, input.shape[1] || 0];
const bodies: Array<BodyResult> = [];
const body = await detectParts(input, config);
bodies.push(body);
/*
cache.tmpBoxes = []; // clear temp cache
if ((skipped < (config.body.skipFrames || 0)) && config.skipFrame) { // just run part detection while reusing cached boxes
skipped++;
bodies = await Promise.all(cache.partBoxes.map((body) => detectParts(input, body, config))); // run from parts box cache
} else { // calculate new boxes and run part detection
skipped = 0;
bodies = await Promise.all(cache.partBoxes.map((body) => detectParts(input, body, config))); // run from part box cache
if (bodies.length !== config.body.maxDetected) { // run body detection only if we dont have enough bodies in cache
cache.bodyBoxes = await detectBody(input, config);
const newBodies = await Promise.all(cache.bodyBoxes.map((body) => detectParts(input, body, config)));
bodies = bodies.concat(newBodies);
}
}
cache.partBoxes = [...cache.tmpBoxes]; // repopulate cache with validated bodies
*/
return bodies as BodyResult[];
}

View File

@ -4,12 +4,12 @@
* Based on: [**EfficientPose**](https://github.com/daniegr/EfficientPose)
*/
import { log, join } from '../util';
import { log, join } from '../util/util';
import * as tf from '../../dist/tfjs.esm.js';
import type { BodyResult, Box } from '../result';
import type { GraphModel, Tensor } from '../tfjs/types';
import type { Config } from '../config';
import { env } from '../env';
import { env } from '../util/env';
let model: GraphModel | null;

View File

@ -4,19 +4,20 @@
* Based on: [**MoveNet**](https://blog.tensorflow.org/2021/05/next-generation-pose-detection-with-movenet-and-tensorflowjs.html)
*/
import { log, join, scaleBox } from '../util';
import { log, join } from '../util/util';
import { scale } from '../util/box';
import * as tf from '../../dist/tfjs.esm.js';
import type { BodyResult, Box } from '../result';
import type { BodyResult, Box, Point } from '../result';
import type { GraphModel, Tensor } from '../tfjs/types';
import type { Config } from '../config';
import { fakeOps } from '../tfjs/backend';
import { env } from '../env';
import { env } from '../util/env';
let model: GraphModel | null;
let inputSize = 0;
const cachedBoxes: Array<Box> = [];
type Keypoints = { score: number, part: string, position: [number, number], positionRaw: [number, number] };
type Keypoints = { score: number, part: string, position: Point, positionRaw: Point };
type Body = { id: number, score: number, box: Box, boxRaw: Box, keypoints: Array<Keypoints> }
let skipped = Number.MAX_SAFE_INTEGER;
@ -157,7 +158,7 @@ export async function predict(input: Tensor, config: Config): Promise<BodyResult
for (let i = 0; i < bodies.length; i++) {
if (bodies[i].keypoints.length > 10) { // only update cache if we detected sufficient number of keypoints
const kpts = bodies[i].keypoints.map((kpt) => kpt.position);
const newBox = scaleBox(kpts, 1.5, [input.shape[2], input.shape[1]]);
const newBox = scale(kpts, 1.5, [input.shape[2], input.shape[1]]);
cachedBoxes.push([...newBox.yxBox]);
}
}

View File

@ -70,6 +70,7 @@ export interface FaceConfig {
* - modelPath: body pose model, can be absolute path or relative to modelBasePath
* - minConfidence: threshold for discarding a prediction
* - maxDetected: maximum number of people detected in the input, should be set to the minimum number for performance
* - detector: optional body detector
*
* `maxDetected` is valid for `posenet` and `movenet-multipose` as other models are single-pose only
* `maxDetected` can be set to -1 to auto-detect based on number of detected faces
@ -83,6 +84,9 @@ export interface BodyConfig {
maxDetected: number,
minConfidence: number,
skipFrames: number,
detector?: {
modelPath: string
},
}
/** Controlls and configures all hand detection specific options
@ -399,6 +403,9 @@ const config: Config = {
enabled: true,
modelPath: 'movenet-lightning.json', // body model, can be absolute path or relative to modelBasePath
// can be 'posenet', 'blazepose', 'efficientpose', 'movenet-lightning', 'movenet-thunder'
detector: {
modelPath: '', // optional body detector
},
maxDetected: -1, // maximum number of people detected in the input
// should be set to the minimum number for performance
// only valid for posenet and movenet-multipose as other models detects single pose

View File

@ -1,515 +0,0 @@
/**
* Module that implements helper draw functions, exposed as human.draw
*/
import { TRI468 as triangulation } from './blazeface/coords';
import { mergeDeep, now } from './util';
import type { Result, FaceResult, BodyResult, HandResult, ObjectResult, GestureResult, PersonResult } from './result';
/**
* Draw Options
* Accessed via `human.draw.options` or provided per each draw method as the drawOptions optional parameter
* -color: draw color
* -labelColor: color for labels
* -shadowColor: optional shadow color for labels
* -font: font for labels
* -lineHeight: line height for labels, used for multi-line labels,
* -lineWidth: width of any lines,
* -pointSize: size of any point,
* -roundRect: for boxes, round corners by this many pixels,
* -drawPoints: should points be drawn,
* -drawLabels: should labels be drawn,
* -drawBoxes: should boxes be drawn,
* -drawPolygons: should polygons be drawn,
* -fillPolygons: should drawn polygons be filled,
* -useDepth: use z-axis coordinate as color shade,
* -useCurves: draw polygons as cures or as lines,
* -bufferedOutput: experimental: allows to call draw methods multiple times for each detection and interpolate results between results thus achieving smoother animations
*/
export interface DrawOptions {
color: string,
labelColor: string,
shadowColor: string,
font: string,
lineHeight: number,
lineWidth: number,
pointSize: number,
roundRect: number,
drawPoints: boolean,
drawLabels: boolean,
drawBoxes: boolean,
drawPolygons: boolean,
drawGaze: boolean,
fillPolygons: boolean,
useDepth: boolean,
useCurves: boolean,
bufferedOutput: boolean,
}
export const options: DrawOptions = {
color: <string>'rgba(173, 216, 230, 0.6)', // 'lightblue' with light alpha channel
labelColor: <string>'rgba(173, 216, 230, 1)', // 'lightblue' with dark alpha channel
shadowColor: <string>'black',
font: <string>'small-caps 14px "Segoe UI"',
lineHeight: <number>18,
lineWidth: <number>4,
pointSize: <number>2,
roundRect: <number>8,
drawPoints: <boolean>false,
drawLabels: <boolean>true,
drawBoxes: <boolean>true,
drawPolygons: <boolean>true,
drawGaze: <boolean>true,
fillPolygons: <boolean>false,
useDepth: <boolean>true,
useCurves: <boolean>false,
bufferedOutput: <boolean>true,
};
const getCanvasContext = (input) => {
if (input && input.getContext) return input.getContext('2d');
throw new Error('invalid canvas');
};
const rad2deg = (theta) => Math.round((theta * 180) / Math.PI);
function point(ctx, x, y, z = 0, localOptions) {
ctx.fillStyle = localOptions.useDepth && z ? `rgba(${127.5 + (2 * z)}, ${127.5 - (2 * z)}, 255, 0.3)` : localOptions.color;
ctx.beginPath();
ctx.arc(x, y, localOptions.pointSize, 0, 2 * Math.PI);
ctx.fill();
}
function rect(ctx, x, y, width, height, localOptions) {
ctx.beginPath();
if (localOptions.useCurves) {
const cx = (x + x + width) / 2;
const cy = (y + y + height) / 2;
ctx.ellipse(cx, cy, width / 2, height / 2, 0, 0, 2 * Math.PI);
} else {
ctx.lineWidth = localOptions.lineWidth;
ctx.moveTo(x + localOptions.roundRect, y);
ctx.lineTo(x + width - localOptions.roundRect, y);
ctx.quadraticCurveTo(x + width, y, x + width, y + localOptions.roundRect);
ctx.lineTo(x + width, y + height - localOptions.roundRect);
ctx.quadraticCurveTo(x + width, y + height, x + width - localOptions.roundRect, y + height);
ctx.lineTo(x + localOptions.roundRect, y + height);
ctx.quadraticCurveTo(x, y + height, x, y + height - localOptions.roundRect);
ctx.lineTo(x, y + localOptions.roundRect);
ctx.quadraticCurveTo(x, y, x + localOptions.roundRect, y);
ctx.closePath();
}
ctx.stroke();
}
function lines(ctx, points: [number, number, number?][] = [], localOptions) {
if (points === undefined || points.length === 0) return;
ctx.beginPath();
ctx.moveTo(points[0][0], points[0][1]);
for (const pt of points) {
const z = pt[2] || 0;
ctx.strokeStyle = localOptions.useDepth && z ? `rgba(${127.5 + (2 * z)}, ${127.5 - (2 * z)}, 255, 0.3)` : localOptions.color;
ctx.fillStyle = localOptions.useDepth && z ? `rgba(${127.5 + (2 * z)}, ${127.5 - (2 * z)}, 255, 0.3)` : localOptions.color;
ctx.lineTo(pt[0], Math.round(pt[1]));
}
ctx.stroke();
if (localOptions.fillPolygons) {
ctx.closePath();
ctx.fill();
}
}
function curves(ctx, points: [number, number, number?][] = [], localOptions) {
if (points === undefined || points.length === 0) return;
if (!localOptions.useCurves || points.length <= 2) {
lines(ctx, points, localOptions);
return;
}
ctx.moveTo(points[0][0], points[0][1]);
for (let i = 0; i < points.length - 2; i++) {
const xc = (points[i][0] + points[i + 1][0]) / 2;
const yc = (points[i][1] + points[i + 1][1]) / 2;
ctx.quadraticCurveTo(points[i][0], points[i][1], xc, yc);
}
ctx.quadraticCurveTo(points[points.length - 2][0], points[points.length - 2][1], points[points.length - 1][0], points[points.length - 1][1]);
ctx.stroke();
if (localOptions.fillPolygons) {
ctx.closePath();
ctx.fill();
}
}
export async function gesture(inCanvas: HTMLCanvasElement | OffscreenCanvas, result: Array<GestureResult>, drawOptions?: Partial<DrawOptions>) {
const localOptions = mergeDeep(options, drawOptions);
if (!result || !inCanvas) return;
const ctx = getCanvasContext(inCanvas);
ctx.font = localOptions.font;
ctx.fillStyle = localOptions.color;
let i = 1;
for (let j = 0; j < result.length; j++) {
let where: unknown[] = []; // what&where is a record
let what: unknown[] = []; // what&where is a record
[where, what] = Object.entries(result[j]);
if ((what.length > 1) && ((what[1] as string).length > 0)) {
const who = where[1] as number > 0 ? `#${where[1]}` : '';
const label = `${where[0]} ${who}: ${what[1]}`;
if (localOptions.shadowColor && localOptions.shadowColor !== '') {
ctx.fillStyle = localOptions.shadowColor;
ctx.fillText(label, 8, 2 + (i * localOptions.lineHeight));
}
ctx.fillStyle = localOptions.labelColor;
ctx.fillText(label, 6, 0 + (i * localOptions.lineHeight));
i += 1;
}
}
}
export async function face(inCanvas: HTMLCanvasElement | OffscreenCanvas, result: Array<FaceResult>, drawOptions?: Partial<DrawOptions>) {
const localOptions = mergeDeep(options, drawOptions);
if (!result || !inCanvas) return;
const ctx = getCanvasContext(inCanvas);
for (const f of result) {
ctx.font = localOptions.font;
ctx.strokeStyle = localOptions.color;
ctx.fillStyle = localOptions.color;
if (localOptions.drawBoxes) rect(ctx, f.box[0], f.box[1], f.box[2], f.box[3], localOptions);
// silly hack since fillText does not suport new line
const labels:string[] = [];
labels.push(`face: ${Math.trunc(100 * f.score)}%`);
if (f.genderScore) labels.push(`${f.gender || ''} ${Math.trunc(100 * f.genderScore)}%`);
if (f.age) labels.push(`age: ${f.age || ''}`);
if (f.iris) labels.push(`distance: ${f.iris}`);
if (f.emotion && f.emotion.length > 0) {
const emotion = f.emotion.map((a) => `${Math.trunc(100 * a.score)}% ${a.emotion}`);
if (emotion.length > 3) emotion.length = 3;
labels.push(emotion.join(' '));
}
if (f.rotation && f.rotation.angle && f.rotation.gaze) {
if (f.rotation.angle.roll) labels.push(`roll: ${rad2deg(f.rotation.angle.roll)}° yaw:${rad2deg(f.rotation.angle.yaw)}° pitch:${rad2deg(f.rotation.angle.pitch)}°`);
if (f.rotation.gaze.bearing) labels.push(`gaze: ${rad2deg(f.rotation.gaze.bearing)}°`);
}
if (labels.length === 0) labels.push('face');
ctx.fillStyle = localOptions.color;
for (let i = labels.length - 1; i >= 0; i--) {
const x = Math.max(f.box[0], 0);
const y = i * localOptions.lineHeight + f.box[1];
if (localOptions.shadowColor && localOptions.shadowColor !== '') {
ctx.fillStyle = localOptions.shadowColor;
ctx.fillText(labels[i], x + 5, y + 16);
}
ctx.fillStyle = localOptions.labelColor;
ctx.fillText(labels[i], x + 4, y + 15);
}
ctx.lineWidth = 1;
if (f.mesh && f.mesh.length > 0) {
if (localOptions.drawPoints) {
for (const pt of f.mesh) point(ctx, pt[0], pt[1], pt[2], localOptions);
// for (const pt of f.meshRaw) point(ctx, pt[0] * inCanvas.offsetWidth, pt[1] * inCanvas.offsetHeight, pt[2]);
}
if (localOptions.drawPolygons) {
ctx.lineWidth = 1;
for (let i = 0; i < triangulation.length / 3; i++) {
const points = [
triangulation[i * 3 + 0],
triangulation[i * 3 + 1],
triangulation[i * 3 + 2],
].map((index) => f.mesh[index]);
lines(ctx, points, localOptions);
}
// iris: array[center, left, top, right, bottom]
if (f.annotations && f.annotations['leftEyeIris']) {
ctx.strokeStyle = localOptions.useDepth ? 'rgba(255, 200, 255, 0.3)' : localOptions.color;
ctx.beginPath();
const sizeX = Math.abs(f.annotations['leftEyeIris'][3][0] - f.annotations['leftEyeIris'][1][0]) / 2;
const sizeY = Math.abs(f.annotations['leftEyeIris'][4][1] - f.annotations['leftEyeIris'][2][1]) / 2;
ctx.ellipse(f.annotations['leftEyeIris'][0][0], f.annotations['leftEyeIris'][0][1], sizeX, sizeY, 0, 0, 2 * Math.PI);
ctx.stroke();
if (localOptions.fillPolygons) {
ctx.fillStyle = localOptions.useDepth ? 'rgba(255, 255, 200, 0.3)' : localOptions.color;
ctx.fill();
}
}
if (f.annotations && f.annotations['rightEyeIris']) {
ctx.strokeStyle = localOptions.useDepth ? 'rgba(255, 200, 255, 0.3)' : localOptions.color;
ctx.beginPath();
const sizeX = Math.abs(f.annotations['rightEyeIris'][3][0] - f.annotations['rightEyeIris'][1][0]) / 2;
const sizeY = Math.abs(f.annotations['rightEyeIris'][4][1] - f.annotations['rightEyeIris'][2][1]) / 2;
ctx.ellipse(f.annotations['rightEyeIris'][0][0], f.annotations['rightEyeIris'][0][1], sizeX, sizeY, 0, 0, 2 * Math.PI);
ctx.stroke();
if (localOptions.fillPolygons) {
ctx.fillStyle = localOptions.useDepth ? 'rgba(255, 255, 200, 0.3)' : localOptions.color;
ctx.fill();
}
}
if (localOptions.drawGaze && f.rotation?.gaze?.strength && f.rotation?.gaze?.bearing && f.annotations['leftEyeIris'] && f.annotations['rightEyeIris'] && f.annotations['leftEyeIris'][0] && f.annotations['rightEyeIris'][0]) {
ctx.strokeStyle = 'pink';
ctx.beginPath();
const leftGaze = [
f.annotations['leftEyeIris'][0][0] + (Math.sin(f.rotation.gaze.bearing) * f.rotation.gaze.strength * f.box[3]),
f.annotations['leftEyeIris'][0][1] + (Math.cos(f.rotation.gaze.bearing) * f.rotation.gaze.strength * f.box[2]),
];
ctx.moveTo(f.annotations['leftEyeIris'][0][0], f.annotations['leftEyeIris'][0][1]);
ctx.lineTo(leftGaze[0], leftGaze[1]);
const rightGaze = [
f.annotations['rightEyeIris'][0][0] + (Math.sin(f.rotation.gaze.bearing) * f.rotation.gaze.strength * f.box[3]),
f.annotations['rightEyeIris'][0][1] + (Math.cos(f.rotation.gaze.bearing) * f.rotation.gaze.strength * f.box[2]),
];
ctx.moveTo(f.annotations['rightEyeIris'][0][0], f.annotations['rightEyeIris'][0][1]);
ctx.lineTo(rightGaze[0], rightGaze[1]);
ctx.stroke();
}
}
}
}
}
export async function body(inCanvas: HTMLCanvasElement | OffscreenCanvas, result: Array<BodyResult>, drawOptions?: Partial<DrawOptions>) {
const localOptions = mergeDeep(options, drawOptions);
if (!result || !inCanvas) return;
const ctx = getCanvasContext(inCanvas);
ctx.lineJoin = 'round';
for (let i = 0; i < result.length; i++) {
ctx.strokeStyle = localOptions.color;
ctx.fillStyle = localOptions.color;
ctx.lineWidth = localOptions.lineWidth;
ctx.font = localOptions.font;
if (localOptions.drawBoxes && result[i].box && result[i].box?.length === 4) {
rect(ctx, result[i].box[0], result[i].box[1], result[i].box[2], result[i].box[3], localOptions);
if (localOptions.drawLabels) {
if (localOptions.shadowColor && localOptions.shadowColor !== '') {
ctx.fillStyle = localOptions.shadowColor;
ctx.fillText(`body ${100 * result[i].score}%`, result[i].box[0] + 3, 1 + result[i].box[1] + localOptions.lineHeight, result[i].box[2]);
}
ctx.fillStyle = localOptions.labelColor;
ctx.fillText(`body ${100 * result[i].score}%`, result[i].box[0] + 2, 0 + result[i].box[1] + localOptions.lineHeight, result[i].box[2]);
}
}
if (localOptions.drawPoints) {
for (let pt = 0; pt < result[i].keypoints.length; pt++) {
ctx.fillStyle = localOptions.useDepth && result[i].keypoints[pt].position[2] ? `rgba(${127.5 + (2 * (result[i].keypoints[pt].position[2] || 0))}, ${127.5 - (2 * (result[i].keypoints[pt].position[2] || 0))}, 255, 0.5)` : localOptions.color;
point(ctx, result[i].keypoints[pt].position[0], result[i].keypoints[pt].position[1], 0, localOptions);
}
}
if (localOptions.drawLabels) {
ctx.font = localOptions.font;
if (result[i].keypoints) {
for (const pt of result[i].keypoints) {
ctx.fillStyle = localOptions.useDepth && pt.position[2] ? `rgba(${127.5 + (2 * pt.position[2])}, ${127.5 - (2 * pt.position[2])}, 255, 0.5)` : localOptions.color;
ctx.fillText(`${pt.part} ${Math.trunc(100 * pt.score)}%`, pt.position[0] + 4, pt.position[1] + 4);
}
}
}
if (localOptions.drawPolygons && result[i].keypoints) {
let part;
const points: [number, number, number?][] = [];
// shoulder line
points.length = 0;
part = result[i].keypoints.find((a) => a.part === 'leftShoulder');
if (part) points.push([part.position[0], part.position[1]]);
part = result[i].keypoints.find((a) => a.part === 'rightShoulder');
if (part) points.push([part.position[0], part.position[1]]);
curves(ctx, points, localOptions);
// torso main
points.length = 0;
part = result[i].keypoints.find((a) => a.part === 'rightShoulder');
if (part) points.push([part.position[0], part.position[1]]);
part = result[i].keypoints.find((a) => a.part === 'rightHip');
if (part) points.push([part.position[0], part.position[1]]);
part = result[i].keypoints.find((a) => a.part === 'leftHip');
if (part) points.push([part.position[0], part.position[1]]);
part = result[i].keypoints.find((a) => a.part === 'leftShoulder');
if (part) points.push([part.position[0], part.position[1]]);
if (points.length === 4) lines(ctx, points, localOptions); // only draw if we have complete torso
// leg left
points.length = 0;
part = result[i].keypoints.find((a) => a.part === 'leftHip');
if (part) points.push([part.position[0], part.position[1]]);
part = result[i].keypoints.find((a) => a.part === 'leftKnee');
if (part) points.push([part.position[0], part.position[1]]);
part = result[i].keypoints.find((a) => a.part === 'leftAnkle');
if (part) points.push([part.position[0], part.position[1]]);
part = result[i].keypoints.find((a) => a.part === 'leftHeel');
if (part) points.push([part.position[0], part.position[1]]);
part = result[i].keypoints.find((a) => a.part === 'leftFoot');
if (part) points.push([part.position[0], part.position[1]]);
curves(ctx, points, localOptions);
// leg right
points.length = 0;
part = result[i].keypoints.find((a) => a.part === 'rightHip');
if (part) points.push([part.position[0], part.position[1]]);
part = result[i].keypoints.find((a) => a.part === 'rightKnee');
if (part) points.push([part.position[0], part.position[1]]);
part = result[i].keypoints.find((a) => a.part === 'rightAnkle');
if (part) points.push([part.position[0], part.position[1]]);
part = result[i].keypoints.find((a) => a.part === 'rightHeel');
if (part) points.push([part.position[0], part.position[1]]);
part = result[i].keypoints.find((a) => a.part === 'rightFoot');
if (part) points.push([part.position[0], part.position[1]]);
curves(ctx, points, localOptions);
// arm left
points.length = 0;
part = result[i].keypoints.find((a) => a.part === 'leftShoulder');
if (part) points.push([part.position[0], part.position[1]]);
part = result[i].keypoints.find((a) => a.part === 'leftElbow');
if (part) points.push([part.position[0], part.position[1]]);
part = result[i].keypoints.find((a) => a.part === 'leftWrist');
if (part) points.push([part.position[0], part.position[1]]);
part = result[i].keypoints.find((a) => a.part === 'leftPalm');
if (part) points.push([part.position[0], part.position[1]]);
curves(ctx, points, localOptions);
// arm right
points.length = 0;
part = result[i].keypoints.find((a) => a.part === 'rightShoulder');
if (part) points.push([part.position[0], part.position[1]]);
part = result[i].keypoints.find((a) => a.part === 'rightElbow');
if (part) points.push([part.position[0], part.position[1]]);
part = result[i].keypoints.find((a) => a.part === 'rightWrist');
if (part) points.push([part.position[0], part.position[1]]);
part = result[i].keypoints.find((a) => a.part === 'rightPalm');
if (part) points.push([part.position[0], part.position[1]]);
curves(ctx, points, localOptions);
// draw all
}
}
}
export async function hand(inCanvas: HTMLCanvasElement | OffscreenCanvas, result: Array<HandResult>, drawOptions?: Partial<DrawOptions>) {
const localOptions = mergeDeep(options, drawOptions);
if (!result || !inCanvas) return;
const ctx = getCanvasContext(inCanvas);
ctx.lineJoin = 'round';
ctx.font = localOptions.font;
for (const h of result) {
if (localOptions.drawBoxes) {
ctx.strokeStyle = localOptions.color;
ctx.fillStyle = localOptions.color;
rect(ctx, h.box[0], h.box[1], h.box[2], h.box[3], localOptions);
if (localOptions.drawLabels) {
if (localOptions.shadowColor && localOptions.shadowColor !== '') {
ctx.fillStyle = localOptions.shadowColor;
ctx.fillText(`${h.label}:${Math.trunc(100 * h.score)}%`, h.box[0] + 3, 1 + h.box[1] + localOptions.lineHeight, h.box[2]);
}
ctx.fillStyle = localOptions.labelColor;
ctx.fillText(`${h.label}:${Math.trunc(100 * h.score)}%`, h.box[0] + 2, 0 + h.box[1] + localOptions.lineHeight, h.box[2]);
}
ctx.stroke();
}
if (localOptions.drawPoints) {
if (h.keypoints && h.keypoints.length > 0) {
for (const pt of h.keypoints) {
ctx.fillStyle = localOptions.useDepth ? `rgba(${127.5 + (2 * (pt[2] || 0))}, ${127.5 - (2 * (pt[2] || 0))}, 255, 0.5)` : localOptions.color;
point(ctx, pt[0], pt[1], 0, localOptions);
}
}
}
if (localOptions.drawLabels && h.annotations) {
const addHandLabel = (part, title) => {
if (!part || part.length === 0 || !part[0]) return;
ctx.fillStyle = localOptions.useDepth ? `rgba(${127.5 + (2 * part[part.length - 1][2])}, ${127.5 - (2 * part[part.length - 1][2])}, 255, 0.5)` : localOptions.color;
ctx.fillText(title, part[part.length - 1][0] + 4, part[part.length - 1][1] + 4);
};
ctx.font = localOptions.font;
addHandLabel(h.annotations['index'], 'index');
addHandLabel(h.annotations['middle'], 'middle');
addHandLabel(h.annotations['ring'], 'ring');
addHandLabel(h.annotations['pinky'], 'pinky');
addHandLabel(h.annotations['thumb'], 'thumb');
addHandLabel(h.annotations['palm'], 'palm');
}
if (localOptions.drawPolygons && h.annotations) {
const addHandLine = (part) => {
if (!part || part.length === 0 || !part[0]) return;
for (let i = 0; i < part.length; i++) {
ctx.beginPath();
ctx.strokeStyle = localOptions.useDepth ? `rgba(${127.5 + (2 * part[i][2])}, ${127.5 - (2 * part[i][2])}, 255, 0.5)` : localOptions.color;
ctx.moveTo(part[i > 0 ? i - 1 : 0][0], part[i > 0 ? i - 1 : 0][1]);
ctx.lineTo(part[i][0], part[i][1]);
ctx.stroke();
}
};
ctx.lineWidth = localOptions.lineWidth;
addHandLine(h.annotations['index']);
addHandLine(h.annotations['middle']);
addHandLine(h.annotations['ring']);
addHandLine(h.annotations['pinky']);
addHandLine(h.annotations['thumb']);
// addPart(h.annotations.palm);
}
}
}
export async function object(inCanvas: HTMLCanvasElement | OffscreenCanvas, result: Array<ObjectResult>, drawOptions?: Partial<DrawOptions>) {
const localOptions = mergeDeep(options, drawOptions);
if (!result || !inCanvas) return;
const ctx = getCanvasContext(inCanvas);
ctx.lineJoin = 'round';
ctx.font = localOptions.font;
for (const h of result) {
if (localOptions.drawBoxes) {
ctx.strokeStyle = localOptions.color;
ctx.fillStyle = localOptions.color;
rect(ctx, h.box[0], h.box[1], h.box[2], h.box[3], localOptions);
if (localOptions.drawLabels) {
const label = `${h.label} ${Math.round(100 * h.score)}%`;
if (localOptions.shadowColor && localOptions.shadowColor !== '') {
ctx.fillStyle = localOptions.shadowColor;
ctx.fillText(label, h.box[0] + 3, 1 + h.box[1] + localOptions.lineHeight, h.box[2]);
}
ctx.fillStyle = localOptions.labelColor;
ctx.fillText(label, h.box[0] + 2, 0 + h.box[1] + localOptions.lineHeight, h.box[2]);
}
ctx.stroke();
}
}
}
export async function person(inCanvas: HTMLCanvasElement | OffscreenCanvas, result: Array<PersonResult>, drawOptions?: Partial<DrawOptions>) {
const localOptions = mergeDeep(options, drawOptions);
if (!result || !inCanvas) return;
const ctx = getCanvasContext(inCanvas);
ctx.lineJoin = 'round';
ctx.font = localOptions.font;
for (let i = 0; i < result.length; i++) {
if (localOptions.drawBoxes) {
ctx.strokeStyle = localOptions.color;
ctx.fillStyle = localOptions.color;
rect(ctx, result[i].box[0], result[i].box[1], result[i].box[2], result[i].box[3], localOptions);
if (localOptions.drawLabels) {
const label = `person #${i}`;
if (localOptions.shadowColor && localOptions.shadowColor !== '') {
ctx.fillStyle = localOptions.shadowColor;
ctx.fillText(label, result[i].box[0] + 3, 1 + result[i].box[1] + localOptions.lineHeight, result[i].box[2]);
}
ctx.fillStyle = localOptions.labelColor;
ctx.fillText(label, result[i].box[0] + 2, 0 + result[i].box[1] + localOptions.lineHeight, result[i].box[2]);
}
ctx.stroke();
}
}
}
export async function canvas(input: HTMLCanvasElement | OffscreenCanvas | HTMLImageElement | HTMLMediaElement | HTMLVideoElement, output: HTMLCanvasElement) {
if (!input || !output) return;
const ctx = getCanvasContext(output);
ctx.drawImage(input, 0, 0);
}
export async function all(inCanvas: HTMLCanvasElement | OffscreenCanvas, result: Result, drawOptions?: Partial<DrawOptions>) {
if (!result || !result.performance || !result || !inCanvas) return null;
const timestamp = now();
const localOptions = mergeDeep(options, drawOptions);
const promise = Promise.all([
face(inCanvas, result.face, localOptions),
body(inCanvas, result.body, localOptions),
hand(inCanvas, result.hand, localOptions),
object(inCanvas, result.object, localOptions),
gesture(inCanvas, result.gesture, localOptions), // gestures do not have buffering
// person(inCanvas, result.persons, localOptions); // already included above
]);
result.performance.draw = Math.trunc(now() - timestamp);
return promise;
}

View File

@ -3,13 +3,13 @@
* Uses FaceMesh, Emotion and FaceRes models to create a unified pipeline
*/
import { log, now } from './util';
import * as tf from '../dist/tfjs.esm.js';
import * as facemesh from './blazeface/facemesh';
import * as emotion from './emotion/emotion';
import * as faceres from './faceres/faceres';
import type { FaceResult } from './result';
import type { Tensor } from './tfjs/types';
import { log, now } from '../util/util';
import * as tf from '../../dist/tfjs.esm.js';
import * as facemesh from '../blazeface/facemesh';
import * as emotion from '../gear/emotion';
import * as faceres from './faceres';
import type { FaceResult } from '../result';
import type { Tensor } from '../tfjs/types';
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
const rad2deg = (theta) => Math.round((theta * 180) / Math.PI);

View File

@ -7,11 +7,11 @@
* Based on: [**HSE-FaceRes**](https://github.com/HSE-asavchenko/HSE_FaceRec_tf)
*/
import { log, join } from '../util';
import { log, join } from '../util/util';
import * as tf from '../../dist/tfjs.esm.js';
import type { Tensor, GraphModel } from '../tfjs/types';
import type { Config } from '../config';
import { env } from '../env';
import { env } from '../util/env';
let model: GraphModel | null;
const last: Array<{

View File

@ -4,11 +4,11 @@
* [**Oarriaga**](https://github.com/oarriaga/face_classification)
*/
import { log, join } from '../util';
import { log, join } from '../util/util';
import type { Config } from '../config';
import type { GraphModel, Tensor } from '../tfjs/types';
import * as tf from '../../dist/tfjs.esm.js';
import { env } from '../env';
import { env } from '../util/env';
const annotations = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral'];
let model: GraphModel | null;

View File

@ -6,11 +6,11 @@
* Obsolete and replaced by `faceres` that performs age/gender/descriptor analysis
*/
import { log, join } from '../util';
import { log, join } from '../util/util';
import * as tf from '../../dist/tfjs.esm.js';
import type { Config } from '../config';
import type { GraphModel, Tensor } from '../tfjs/types';
import { env } from '../env';
import { env } from '../util/env';
let model: GraphModel | null;

View File

@ -6,11 +6,11 @@
* Obsolete and replaced by `faceres` that performs age/gender/descriptor analysis
*/
import { log, join } from '../util';
import { log, join } from '../util/util';
import * as tf from '../../dist/tfjs.esm.js';
import type { Config } from '../config';
import type { GraphModel, Tensor } from '../tfjs/types';
import { env } from '../env';
import { env } from '../util/env';
let model: GraphModel | null;
let last = { gender: '' };

View File

@ -6,12 +6,13 @@
* - Hand Tracking: [**HandTracking**](https://github.com/victordibia/handtracking)
*/
import { log, join, scaleBox } from '../util';
import { log, join } from '../util/util';
import { scale } from '../util/box';
import * as tf from '../../dist/tfjs.esm.js';
import type { HandResult, Box } from '../result';
import type { GraphModel, Tensor } from '../tfjs/types';
import type { Config } from '../config';
import { env } from '../env';
import { env } from '../util/env';
import * as fingerPose from '../fingerpose/fingerpose';
import { fakeOps } from '../tfjs/backend';
@ -168,7 +169,7 @@ async function detectFingers(input: Tensor, h: HandDetectResult, config: Config)
(h.box[3] * coord[1] / inputSize[1][1]) + h.box[1],
(h.box[2] + h.box[3]) / 2 / inputSize[1][0] * coord[2],
]);
const updatedBox = scaleBox(hand.keypoints, boxScaleFact, outputSize); // replace detected box with box calculated around keypoints
const updatedBox = scale(hand.keypoints, boxScaleFact, outputSize); // replace detected box with box calculated around keypoints
h.box = updatedBox.box;
h.boxRaw = updatedBox.boxRaw;
h.yxBox = updatedBox.yxBox;

View File

@ -8,7 +8,7 @@ import * as box from './box';
import * as util from './util';
import type * as detector from './handdetector';
import type { Tensor, GraphModel } from '../tfjs/types';
import { env } from '../env';
import { env } from '../util/env';
const palmBoxEnlargeFactor = 5; // default 3
const handBoxEnlargeFactor = 1.65; // default 1.65

View File

@ -4,7 +4,7 @@
* Based on: [**MediaPipe HandPose**](https://drive.google.com/file/d/1sv4sSb9BSNVZhLzxXJ0jBv9DqD-4jnAz/view)
*/
import { log, join } from '../util';
import { log, join } from '../util/util';
import * as tf from '../../dist/tfjs.esm.js';
import * as handdetector from './handdetector';
import * as handpipeline from './handpipeline';
@ -12,7 +12,7 @@ import * as fingerPose from '../fingerpose/fingerpose';
import type { HandResult, Box, Point } from '../result';
import type { Tensor, GraphModel } from '../tfjs/types';
import type { Config } from '../config';
import { env } from '../env';
import { env } from '../util/env';
const meshAnnotations = {
thumb: [1, 2, 3, 4],

View File

@ -2,41 +2,42 @@
* Human main module
*/
import { log, now, mergeDeep, validate } from './util';
import { log, now, mergeDeep, validate } from './util/util';
import { Config, defaults } from './config';
import type { Result, FaceResult, HandResult, BodyResult, ObjectResult, GestureResult, PersonResult } from './result';
import * as tf from '../dist/tfjs.esm.js';
import * as models from './models';
import * as face from './face';
import * as face from './face/face';
import * as facemesh from './blazeface/facemesh';
import * as faceres from './faceres/faceres';
import * as faceres from './face/faceres';
import * as posenet from './posenet/posenet';
import * as handtrack from './handtrack/handtrack';
import * as handtrack from './hand/handtrack';
import * as handpose from './handpose/handpose';
import * as blazepose from './blazepose/blazepose';
import * as efficientpose from './efficientpose/efficientpose';
import * as movenet from './movenet/movenet';
// import * as blazepose from './body/blazepose-v1';
import * as blazepose from './body/blazepose';
import * as efficientpose from './body/efficientpose';
import * as movenet from './body/movenet';
import * as nanodet from './object/nanodet';
import * as centernet from './object/centernet';
import * as segmentation from './segmentation/segmentation';
import * as gesture from './gesture/gesture';
import * as image from './image/image';
import * as draw from './draw';
import * as draw from './util/draw';
import * as persons from './persons';
import * as interpolate from './interpolate';
import * as env from './env';
import * as interpolate from './util/interpolate';
import * as env from './util/env';
import * as backend from './tfjs/backend';
import * as humangl from './tfjs/humangl';
import * as app from '../package.json';
import * as warmups from './warmup';
import type { Tensor } from './tfjs/types';
import type { DrawOptions } from './draw';
import type { DrawOptions } from './util/draw';
// export types
export * from './config';
export * from './result';
export type { DrawOptions } from './draw';
export { env, Env } from './env';
export type { DrawOptions } from './util/draw';
export { env, Env } from './util/env';
export { Box, Point } from './result';
export { Models } from './models';

View File

@ -6,8 +6,8 @@ import * as tf from '../../dist/tfjs.esm.js';
import * as fxImage from './imagefx';
import type { Tensor } from '../tfjs/types';
import type { Config } from '../config';
import { env } from '../env';
import { log } from '../util';
import { env } from '../util/env';
import { log } from '../util/util';
type Input = Tensor | ImageData | ImageBitmap | HTMLImageElement | HTMLMediaElement | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas | typeof Image | typeof env.Canvas;

View File

@ -2,23 +2,23 @@
* Loader and Validator for all models used by Human
*/
import { log } from './util';
import { log } from './util/util';
import type { GraphModel } from './tfjs/types';
import * as facemesh from './blazeface/facemesh';
import * as faceres from './faceres/faceres';
import * as emotion from './emotion/emotion';
import * as faceres from './face/faceres';
import * as emotion from './gear/emotion';
import * as posenet from './posenet/posenet';
import * as handpose from './handpose/handpose';
import * as handtrack from './handtrack/handtrack';
import * as blazepose from './blazepose/blazepose';
import * as efficientpose from './efficientpose/efficientpose';
import * as movenet from './movenet/movenet';
import * as handtrack from './hand/handtrack';
import * as blazepose from './body/blazepose';
import * as efficientpose from './body/efficientpose';
import * as movenet from './body/movenet';
import * as nanodet from './object/nanodet';
import * as centernet from './object/centernet';
import * as segmentation from './segmentation/segmentation';
import type { Human } from './human';
import { env } from './env';
import * as agegenderrace from './gear/agegenderrace';
import { env } from './util/env';
import * as agegenderrace from './gear/gear-agegenderrace';
/** Instances of all possible TFJS Graph Models used by Human
* - loaded as needed based on configuration
@ -29,6 +29,7 @@ import * as agegenderrace from './gear/agegenderrace';
export class Models {
age: null | GraphModel | Promise<GraphModel> = null;
agegenderrace: null | GraphModel | Promise<GraphModel> = null;
blazeposedetect: null | GraphModel | Promise<GraphModel> = null;
blazepose: null | GraphModel | Promise<GraphModel> = null;
centernet: null | GraphModel | Promise<GraphModel> = null;
efficientpose: null | GraphModel | Promise<GraphModel> = null;
@ -69,8 +70,9 @@ export async function load(instance: Human) {
if (instance.config.hand.enabled && instance.config.hand.landmarks && !instance.models.handskeleton && instance.config.hand.detector?.modelPath?.includes('handtrack')) instance.models.handskeleton = handtrack.loadSkeleton(instance.config);
if (instance.config.body.enabled && !instance.models.posenet && instance.config.body?.modelPath?.includes('posenet')) instance.models.posenet = posenet.load(instance.config);
if (instance.config.body.enabled && !instance.models.efficientpose && instance.config.body?.modelPath?.includes('efficientpose')) instance.models.efficientpose = efficientpose.load(instance.config);
if (instance.config.body.enabled && !instance.models.blazepose && instance.config.body?.modelPath?.includes('blazepose')) instance.models.blazepose = blazepose.load(instance.config);
if (instance.config.body.enabled && !instance.models.efficientpose && instance.config.body?.modelPath?.includes('efficientpose')) instance.models.efficientpose = blazepose.load(instance.config);
if (instance.config.body.enabled && !instance.models.blazepose && instance.config.body?.modelPath?.includes('blazepose')) instance.models.blazepose = blazepose.loadPose(instance.config);
if (instance.config.body.enabled && !instance.models.blazeposedetect && instance.config.body.detector?.modelPath && instance.config.body?.modelPath?.includes('blazepose')) instance.models.blazeposedetect = blazepose.loadDetect(instance.config);
if (instance.config.body.enabled && !instance.models.efficientpose && instance.config.body?.modelPath?.includes('efficientpose')) instance.models.efficientpose = efficientpose.load(instance.config);
if (instance.config.body.enabled && !instance.models.movenet && instance.config.body?.modelPath?.includes('movenet')) instance.models.movenet = movenet.load(instance.config);
if (instance.config.object.enabled && !instance.models.nanodet && instance.config.object?.modelPath?.includes('nanodet')) instance.models.nanodet = nanodet.load(instance.config);
if (instance.config.object.enabled && !instance.models.centernet && instance.config.object?.modelPath?.includes('centernet')) instance.models.centernet = centernet.load(instance.config);

View File

@ -4,13 +4,13 @@
* Based on: [**NanoDet**](https://github.com/RangiLyu/nanodet)
*/
import { log, join } from '../util';
import { log, join } from '../util/util';
import * as tf from '../../dist/tfjs.esm.js';
import { labels } from './labels';
import type { ObjectResult, Box } from '../result';
import type { GraphModel, Tensor } from '../tfjs/types';
import type { Config } from '../config';
import { env } from '../env';
import { env } from '../util/env';
import { fakeOps } from '../tfjs/backend';
let model: GraphModel | null;

View File

@ -4,13 +4,13 @@
* Based on: [**MB3-CenterNet**](https://github.com/610265158/mobilenetv3_centernet)
*/
import { log, join } from '../util';
import { log, join } from '../util/util';
import * as tf from '../../dist/tfjs.esm.js';
import { labels } from './labels';
import type { ObjectResult, Box } from '../result';
import type { GraphModel, Tensor } from '../tfjs/types';
import type { Config } from '../config';
import { env } from '../env';
import { env } from '../util/env';
let model;
let last: Array<ObjectResult> = [];

View File

@ -1,8 +1,8 @@
/** TFJS backend initialization and customization */
import { log, now } from '../util';
import { log, now } from '../util/util';
import * as humangl from './humangl';
import * as env from '../env';
import * as env from '../util/env';
import * as tf from '../../dist/tfjs.esm.js';
export async function check(instance, force = false) {

View File

@ -1,6 +1,6 @@
/** TFJS custom backend registration */
import { log } from '../util';
import { log } from '../util/util';
import * as tf from '../../dist/tfjs.esm.js';
import * as image from '../image/image';
import * as models from '../models';

28
src/util/box.ts Normal file
View File

@ -0,0 +1,28 @@
import type { Box } from '../result';
// helper function: find box around keypoints, square it and scale it
export function scale(keypoints, boxScaleFact, outputSize) {
const coords = [keypoints.map((pt) => pt[0]), keypoints.map((pt) => pt[1])]; // all x/y coords
const maxmin = [Math.max(...coords[0]), Math.min(...coords[0]), Math.max(...coords[1]), Math.min(...coords[1])]; // find min/max x/y coordinates
const center = [(maxmin[0] + maxmin[1]) / 2, (maxmin[2] + maxmin[3]) / 2]; // find center x and y coord of all fingers
const diff = Math.max(center[0] - maxmin[1], center[1] - maxmin[3], -center[0] + maxmin[0], -center[1] + maxmin[2]) * boxScaleFact; // largest distance from center in any direction
const box = [
Math.trunc(center[0] - diff),
Math.trunc(center[1] - diff),
Math.trunc(2 * diff),
Math.trunc(2 * diff),
] as Box;
const boxRaw = [ // work backwards
box[0] / outputSize[0],
box[1] / outputSize[1],
box[2] / outputSize[0],
box[3] / outputSize[1],
] as Box;
const yxBox = [ // work backwards
boxRaw[1],
boxRaw[0],
boxRaw[3] + boxRaw[1],
boxRaw[2] + boxRaw[0],
] as Box;
return { box, boxRaw, yxBox };
}

View File

@ -1,5 +1,5 @@
import * as tf from '../dist/tfjs.esm.js';
import * as image from './image/image';
import * as tf from '../../dist/tfjs.esm.js';
import * as image from '../image/image';
import { mergeDeep } from './util';
export type Env = {

View File

@ -2,7 +2,7 @@
* Results interpolation for smoothening of video detection results inbetween detected frames
*/
import type { Result, FaceResult, BodyResult, HandResult, ObjectResult, GestureResult, PersonResult, Box, Point } from './result';
import type { Result, FaceResult, BodyResult, HandResult, ObjectResult, GestureResult, PersonResult, Box, Point } from '../result';
const bufferedResult: Result = { face: [], body: [], hand: [], gesture: [], object: [], persons: [], performance: {}, timestamp: 0 };

View File

@ -2,8 +2,6 @@
* Simple helper functions used accross codebase
*/
import type { Box } from './result';
// helper function: join two paths
export function join(folder: string, file: string): string {
const separator = folder.endsWith('/') ? '' : '/';
@ -71,30 +69,3 @@ export async function wait(time) {
const waiting = new Promise((resolve) => setTimeout(() => resolve(true), time));
await waiting;
}
// helper function: find box around keypoints, square it and scale it
export function scaleBox(keypoints, boxScaleFact, outputSize) {
const coords = [keypoints.map((pt) => pt[0]), keypoints.map((pt) => pt[1])]; // all x/y coords
const maxmin = [Math.max(...coords[0]), Math.min(...coords[0]), Math.max(...coords[1]), Math.min(...coords[1])]; // find min/max x/y coordinates
const center = [(maxmin[0] + maxmin[1]) / 2, (maxmin[2] + maxmin[3]) / 2]; // find center x and y coord of all fingers
const diff = Math.max(center[0] - maxmin[1], center[1] - maxmin[3], -center[0] + maxmin[0], -center[1] + maxmin[2]) * boxScaleFact; // largest distance from center in any direction
const box = [
Math.trunc(center[0] - diff),
Math.trunc(center[1] - diff),
Math.trunc(2 * diff),
Math.trunc(2 * diff),
] as Box;
const boxRaw = [ // work backwards
box[0] / outputSize[0],
box[1] / outputSize[1],
box[2] / outputSize[0],
box[3] / outputSize[1],
] as Box;
const yxBox = [ // work backwards
boxRaw[1],
boxRaw[0],
boxRaw[3] + boxRaw[1],
boxRaw[2] + boxRaw[0],
] as Box;
return { box, boxRaw, yxBox };
}

View File

@ -2,13 +2,13 @@
* Warmup algorithm that uses embedded images to excercise loaded models for faster future inference
*/
import { log, now, mergeDeep } from './util';
import { log, now, mergeDeep } from './util/util';
import * as sample from './sample';
import * as tf from '../dist/tfjs.esm.js';
import * as image from './image/image';
import type { Config } from './config';
import type { Result } from './result';
import { env } from './env';
import { env } from './util/env';
async function warmupBitmap(instance) {
const b64toBlob = (base64: string, type = 'application/octet-stream') => fetch(`data:${type};base64,${base64}`).then((res) => res.blob());