fix multiple memory leaks

pull/356/head
Vladimir Mandic 2021-09-13 13:28:35 -04:00
parent fd0f85a8e9
commit a0f5922b9a
26 changed files with 182 additions and 130 deletions

View File

@ -13,7 +13,7 @@
"locations": ["dist/*", "types/*", "typedoc/*"]
},
"lint": {
"locations": [ "src/**/*.ts", "test/*.js", "demo/**/*.js" ],
"locations": [ "*.json", "src/**/*.ts", "test/**/*.js", "demo/**/*.js" ],
"rules": { }
},
"changelog": {
@ -133,7 +133,7 @@
]
},
"watch": {
"locations": [ "src/**", "tfjs/*" ]
"locations": [ "src/**/*", "tfjs/**/*" ]
},
"typescript": {
"allowJs": false

View File

@ -5,8 +5,8 @@
import { log, join } from '../helpers';
import * as tf from '../../dist/tfjs.esm.js';
import { Config } from '../config';
import { GraphModel, Tensor } from '../tfjs/types';
import type { Config } from '../config';
import type { GraphModel, Tensor } from '../tfjs/types';
let model: GraphModel;

View File

@ -186,8 +186,8 @@ export interface GestureConfig {
*/
export interface Config {
/** Backend used for TFJS operations */
// backend: '' | 'cpu' | 'wasm' | 'webgl' | 'humangl' | 'tensorflow' | 'webgpu' | null,
backend: string;
backend: '' | 'cpu' | 'wasm' | 'webgl' | 'humangl' | 'tensorflow' | 'webgpu',
// backend: string;
/** Path to *.wasm files if backend is set to `wasm` */
wasmPath: string,
@ -202,8 +202,8 @@ export interface Config {
* - warmup pre-initializes all models for faster inference but can take significant time on startup
* - only used for `webgl` and `humangl` backends
*/
// warmup: 'none' | 'face' | 'full' | 'body' | string,
warmup: string;
warmup: 'none' | 'face' | 'full' | 'body',
// warmup: string;
/** Base model path (typically starting with file://, http:// or https://) for all models
* - individual modelPath values are relative to this path

View File

@ -4,9 +4,9 @@
import { log, join } from '../helpers';
import * as tf from '../../dist/tfjs.esm.js';
import { BodyResult } from '../result';
import { GraphModel, Tensor } from '../tfjs/types';
import { Config } from '../config';
import type { BodyResult } from '../result';
import type { GraphModel, Tensor } from '../tfjs/types';
import type { Config } from '../config';
let model: GraphModel;

View File

@ -3,8 +3,8 @@
*/
import { log, join } from '../helpers';
import { Config } from '../config';
import { Tensor, GraphModel } from '../tfjs/types';
import type { Config } from '../config';
import type { Tensor, GraphModel } from '../tfjs/types';
import * as tf from '../../dist/tfjs.esm.js';
const annotations = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral'];

View File

@ -8,8 +8,8 @@ import * as tf from '../dist/tfjs.esm.js';
import * as facemesh from './blazeface/facemesh';
import * as emotion from './emotion/emotion';
import * as faceres from './faceres/faceres';
import { FaceResult } from './result';
import { Tensor } from './tfjs/types';
import type { FaceResult } from './result';
import type { Tensor } from './tfjs/types';
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
const rad2deg = (theta) => Math.round((theta * 180) / Math.PI);
@ -250,7 +250,6 @@ export const detectFace = async (parent /* instance of human */, input: Tensor):
rotation,
tensor,
});
parent.analyze('End Face');
}
parent.analyze('End FaceMesh:');

View File

@ -6,8 +6,8 @@
import { log, join } from '../helpers';
import * as tf from '../../dist/tfjs.esm.js';
import { Tensor, GraphModel } from '../tfjs/types';
import { Config } from '../config';
import type { Tensor, GraphModel } from '../tfjs/types';
import type { Config } from '../config';
let model: GraphModel;
const last: Array<{
@ -140,7 +140,8 @@ export async function predict(image: Tensor, config: Config, idx, count) {
}
const argmax = tf.argMax(resT.find((t) => t.shape[1] === 100), 1);
const age = (await argmax.data())[0];
const all = await resT.find((t) => t.shape[1] === 100).data(); // inside tf.tidy
tf.dispose(argmax);
const all = await resT.find((t) => t.shape[1] === 100).data();
obj.age = Math.round(all[age - 1] > all[age + 1] ? 10 * age - 100 * all[age - 1] : 10 * age + 100 * all[age + 1]) / 10;
const desc = resT.find((t) => t.shape[1] === 1024);
@ -151,7 +152,6 @@ export async function predict(image: Tensor, config: Config, idx, count) {
obj.descriptor = [...descriptor];
resT.forEach((t) => tf.dispose(t));
}
last[idx] = obj;
lastCount = count;
resolve(obj);

View File

@ -5,8 +5,8 @@
import { log, join } from '../helpers';
import * as tf from '../../dist/tfjs.esm.js';
import { Config } from '../config';
import { GraphModel, Tensor } from '../tfjs/types';
import type { Config } from '../config';
import type { GraphModel, Tensor } from '../tfjs/types';
let model: GraphModel;
let last = { gender: '' };

View File

@ -2,7 +2,7 @@
* Gesture detection module
*/
import { GestureResult } from '../result';
import type { GestureResult } from '../result';
import * as fingerPose from '../fingerpose/fingerpose';
/**

View File

@ -1,7 +1,7 @@
import * as tf from '../../dist/tfjs.esm.js';
import * as box from './box';
import * as anchors from './anchors';
import { Tensor, GraphModel } from '../tfjs/types';
import type { Tensor, GraphModel } from '../tfjs/types';
export class HandDetector {
model: GraphModel;

View File

@ -1,8 +1,8 @@
import * as tf from '../../dist/tfjs.esm.js';
import * as box from './box';
import * as util from './util';
import * as detector from './handdetector';
import { Tensor, GraphModel } from '../tfjs/types';
import type * as detector from './handdetector';
import type { Tensor, GraphModel } from '../tfjs/types';
import { env } from '../env';
const palmBoxEnlargeFactor = 5; // default 3

View File

@ -7,9 +7,9 @@ import * as tf from '../../dist/tfjs.esm.js';
import * as handdetector from './handdetector';
import * as handpipeline from './handpipeline';
import * as fingerPose from '../fingerpose/fingerpose';
import { HandResult } from '../result';
import { Tensor, GraphModel } from '../tfjs/types';
import { Config } from '../config';
import type { HandResult } from '../result';
import type { Tensor, GraphModel } from '../tfjs/types';
import type { Config } from '../config';
const meshAnnotations = {
thumb: [1, 2, 3, 4],

View File

@ -4,7 +4,7 @@
import { log, now, mergeDeep } from './helpers';
import { Config, defaults } from './config';
import { Result, FaceResult, HandResult, BodyResult, ObjectResult, GestureResult } from './result';
import type { Result, FaceResult, HandResult, BodyResult, ObjectResult, GestureResult } from './result';
import * as tf from '../dist/tfjs.esm.js';
import * as models from './models';
import * as face from './face';
@ -27,7 +27,7 @@ import * as env from './env';
import * as backend from './tfjs/backend';
import * as app from '../package.json';
import * as warmups from './warmup';
import { Tensor, GraphModel } from './tfjs/types';
import type { Tensor, GraphModel } from './tfjs/types';
// export types
export * from './config';
@ -38,7 +38,7 @@ export { env } from './env';
/** Defines all possible input types for **Human** detection
* @typedef Input Type
*/
export type Input = Tensor | typeof Image | ImageData | ImageBitmap | HTMLImageElement | HTMLMediaElement | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas;
export type Input = Tensor | ImageData | ImageBitmap | HTMLImageElement | HTMLMediaElement | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas | typeof Image | typeof env.env.Canvas;
/** Events dispatched by `human.events`
* - `create`: triggered when Human object is instantiated
@ -257,7 +257,7 @@ export class Human {
* @returns Canvas
*/
segmentation(input: Input, background?: Input) {
return segmentation.process(input, background, this.config);
return input ? segmentation.process(input, background, this.config) : null;
}
/** Enhance method performs additional enhacements to face image previously detected for futher this.processing
@ -373,28 +373,28 @@ export class Human {
await this.load();
timeStamp = now();
this.process = image.process(input, this.config);
const inputTensor = this.process.tensor;
let img = image.process(input, this.config);
this.process = img;
this.performance.image = Math.trunc(now() - timeStamp);
this.analyze('Get Image:');
// run segmentation prethis.processing
if (this.config.segmentation.enabled && this.process && inputTensor) {
if (this.config.segmentation.enabled && this.process && img.tensor && img.canvas) {
this.analyze('Start Segmentation:');
this.state = 'run:segmentation';
timeStamp = now();
await segmentation.predict(this.process);
await segmentation.predict(img);
elapsedTime = Math.trunc(now() - timeStamp);
if (elapsedTime > 0) this.performance.segmentation = elapsedTime;
if (this.process.canvas) {
if (img.canvas) {
// replace input
tf.dispose(inputTensor);
this.process = image.process(this.process.canvas, this.config);
tf.dispose(img.tensor);
img = image.process(img.canvas, this.config);
}
this.analyze('End Segmentation:');
}
if (!this.process || !inputTensor) {
if (!img.tensor) {
log('could not convert input to tensor');
resolve({ error: 'could not convert input to tensor' });
return;
@ -402,7 +402,7 @@ export class Human {
this.emit('image');
timeStamp = now();
this.config.skipFrame = await image.skip(this, inputTensor);
this.config.skipFrame = await image.skip(this.config, img.tensor);
if (!this.performance.frames) this.performance.frames = 0;
if (!this.performance.cached) this.performance.cached = 0;
(this.performance.frames as number)++;
@ -419,12 +419,12 @@ export class Human {
// run face detection followed by all models that rely on face bounding box: face mesh, age, gender, emotion
if (this.config.async) {
faceRes = this.config.face.enabled ? face.detectFace(this, inputTensor) : [];
faceRes = this.config.face.enabled ? face.detectFace(this, img.tensor) : [];
if (this.performance.face) delete this.performance.face;
} else {
this.state = 'run:face';
timeStamp = now();
faceRes = this.config.face.enabled ? await face.detectFace(this, inputTensor) : [];
faceRes = this.config.face.enabled ? await face.detectFace(this, img.tensor) : [];
elapsedTime = Math.trunc(now() - timeStamp);
if (elapsedTime > 0) this.performance.face = elapsedTime;
}
@ -432,18 +432,18 @@ export class Human {
// run body: can be posenet, blazepose, efficientpose, movenet
this.analyze('Start Body:');
if (this.config.async) {
if (this.config.body.modelPath?.includes('posenet')) bodyRes = this.config.body.enabled ? posenet.predict(inputTensor, this.config) : [];
else if (this.config.body.modelPath?.includes('blazepose')) bodyRes = this.config.body.enabled ? blazepose.predict(inputTensor, this.config) : [];
else if (this.config.body.modelPath?.includes('efficientpose')) bodyRes = this.config.body.enabled ? efficientpose.predict(inputTensor, this.config) : [];
else if (this.config.body.modelPath?.includes('movenet')) bodyRes = this.config.body.enabled ? movenet.predict(inputTensor, this.config) : [];
if (this.config.body.modelPath?.includes('posenet')) bodyRes = this.config.body.enabled ? posenet.predict(img.tensor, this.config) : [];
else if (this.config.body.modelPath?.includes('blazepose')) bodyRes = this.config.body.enabled ? blazepose.predict(img.tensor, this.config) : [];
else if (this.config.body.modelPath?.includes('efficientpose')) bodyRes = this.config.body.enabled ? efficientpose.predict(img.tensor, this.config) : [];
else if (this.config.body.modelPath?.includes('movenet')) bodyRes = this.config.body.enabled ? movenet.predict(img.tensor, this.config) : [];
if (this.performance.body) delete this.performance.body;
} else {
this.state = 'run:body';
timeStamp = now();
if (this.config.body.modelPath?.includes('posenet')) bodyRes = this.config.body.enabled ? await posenet.predict(inputTensor, this.config) : [];
else if (this.config.body.modelPath?.includes('blazepose')) bodyRes = this.config.body.enabled ? await blazepose.predict(inputTensor, this.config) : [];
else if (this.config.body.modelPath?.includes('efficientpose')) bodyRes = this.config.body.enabled ? await efficientpose.predict(inputTensor, this.config) : [];
else if (this.config.body.modelPath?.includes('movenet')) bodyRes = this.config.body.enabled ? await movenet.predict(inputTensor, this.config) : [];
if (this.config.body.modelPath?.includes('posenet')) bodyRes = this.config.body.enabled ? await posenet.predict(img.tensor, this.config) : [];
else if (this.config.body.modelPath?.includes('blazepose')) bodyRes = this.config.body.enabled ? await blazepose.predict(img.tensor, this.config) : [];
else if (this.config.body.modelPath?.includes('efficientpose')) bodyRes = this.config.body.enabled ? await efficientpose.predict(img.tensor, this.config) : [];
else if (this.config.body.modelPath?.includes('movenet')) bodyRes = this.config.body.enabled ? await movenet.predict(img.tensor, this.config) : [];
elapsedTime = Math.trunc(now() - timeStamp);
if (elapsedTime > 0) this.performance.body = elapsedTime;
}
@ -452,12 +452,12 @@ export class Human {
// run handpose
this.analyze('Start Hand:');
if (this.config.async) {
handRes = this.config.hand.enabled ? handpose.predict(inputTensor, this.config) : [];
handRes = this.config.hand.enabled ? handpose.predict(img.tensor, this.config) : [];
if (this.performance.hand) delete this.performance.hand;
} else {
this.state = 'run:hand';
timeStamp = now();
handRes = this.config.hand.enabled ? await handpose.predict(inputTensor, this.config) : [];
handRes = this.config.hand.enabled ? await handpose.predict(img.tensor, this.config) : [];
elapsedTime = Math.trunc(now() - timeStamp);
if (elapsedTime > 0) this.performance.hand = elapsedTime;
}
@ -466,14 +466,14 @@ export class Human {
// run nanodet
this.analyze('Start Object:');
if (this.config.async) {
if (this.config.object.modelPath?.includes('nanodet')) objectRes = this.config.object.enabled ? nanodet.predict(inputTensor, this.config) : [];
else if (this.config.object.modelPath?.includes('centernet')) objectRes = this.config.object.enabled ? centernet.predict(inputTensor, this.config) : [];
if (this.config.object.modelPath?.includes('nanodet')) objectRes = this.config.object.enabled ? nanodet.predict(img.tensor, this.config) : [];
else if (this.config.object.modelPath?.includes('centernet')) objectRes = this.config.object.enabled ? centernet.predict(img.tensor, this.config) : [];
if (this.performance.object) delete this.performance.object;
} else {
this.state = 'run:object';
timeStamp = now();
if (this.config.object.modelPath?.includes('nanodet')) objectRes = this.config.object.enabled ? await nanodet.predict(inputTensor, this.config) : [];
else if (this.config.object.modelPath?.includes('centernet')) objectRes = this.config.object.enabled ? await centernet.predict(inputTensor, this.config) : [];
if (this.config.object.modelPath?.includes('nanodet')) objectRes = this.config.object.enabled ? await nanodet.predict(img.tensor, this.config) : [];
else if (this.config.object.modelPath?.includes('centernet')) objectRes = this.config.object.enabled ? await centernet.predict(img.tensor, this.config) : [];
elapsedTime = Math.trunc(now() - timeStamp);
if (elapsedTime > 0) this.performance.object = elapsedTime;
}
@ -507,7 +507,7 @@ export class Human {
};
// finally dispose input tensor
tf.dispose(inputTensor);
tf.dispose(img.tensor);
// log('Result:', result);
this.emit('detect');

View File

@ -4,11 +4,11 @@
import * as tf from '../../dist/tfjs.esm.js';
import * as fxImage from './imagefx';
import { Tensor } from '../tfjs/types';
import { Config } from '../config';
import type { Tensor } from '../tfjs/types';
import type { Config } from '../config';
import { env } from '../env';
type Input = Tensor | typeof Image | ImageData | ImageBitmap | HTMLImageElement | HTMLMediaElement | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas;
type Input = Tensor | ImageData | ImageBitmap | HTMLImageElement | HTMLMediaElement | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas | typeof Image | typeof env.Canvas;
const maxSize = 2048;
// internal temp canvases
@ -17,6 +17,25 @@ let outCanvas;
// @ts-ignore // imagefx is js module that should be converted to a class
let fx: fxImage.GLImageFilter | null; // instance of imagefx
export function canvas(width, height) {
let c;
if (env.browser) {
if (typeof OffscreenCanvas !== 'undefined') {
c = new OffscreenCanvas(width, height);
} else {
c = document.createElement('canvas');
c.width = width;
c.height = height;
}
} else {
// @ts-ignore // env.canvas is an external monkey-patch
// eslint-disable-next-line new-cap
c = (typeof env.Canvas !== 'undefined') ? new env.Canvas(width, height) : null;
}
if (!c) throw new Error('Human: Cannot create canvas');
return c;
}
// process input image and return tensor
// input can be tensor, imagedata, htmlimageelement, htmlvideoelement
// input is resized and run through imagefx filter
@ -27,6 +46,7 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
if (
!(input instanceof tf.Tensor)
&& !(typeof Image !== 'undefined' && input instanceof Image)
&& !(typeof env.Canvas !== 'undefined' && input instanceof env.Canvas)
&& !(typeof ImageData !== 'undefined' && input instanceof ImageData)
&& !(typeof ImageBitmap !== 'undefined' && input instanceof ImageBitmap)
&& !(typeof HTMLImageElement !== 'undefined' && input instanceof HTMLImageElement)
@ -39,8 +59,8 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
}
if (input instanceof tf.Tensor) {
// if input is tensor, use as-is
if ((input as Tensor).shape && (input as Tensor).shape.length === 4 && (input as Tensor).shape[0] === 1 && (input as Tensor).shape[3] === 3) tensor = tf.clone(input);
else throw new Error(`Human: Input tensor shape must be [1, height, width, 3] and instead was ${(input as Tensor).shape}`);
if ((input as unknown as Tensor).shape && (input as unknown as Tensor).shape.length === 4 && (input as unknown as Tensor).shape[0] === 1 && (input as unknown as Tensor).shape[3] === 3) tensor = tf.clone(input);
else throw new Error(`Human: Input tensor shape must be [1, height, width, 3] and instead was ${(input as unknown as Tensor).shape}`);
} else {
// check if resizing will be needed
const originalWidth = input['naturalWidth'] || input['videoWidth'] || input['width'] || (input['shape'] && (input['shape'][1] > 0));
@ -63,15 +83,11 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
if ((config.filter.height || 0) > 0) targetHeight = config.filter.height;
else if ((config.filter.width || 0) > 0) targetHeight = originalHeight * ((config.filter.width || 0) / originalWidth);
if (!targetWidth || !targetHeight) throw new Error('Human: Input cannot determine dimension');
if (!inCanvas || (inCanvas?.width !== targetWidth) || (inCanvas?.height !== targetHeight)) {
inCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement('canvas');
if (inCanvas?.width !== targetWidth) inCanvas.width = targetWidth;
if (inCanvas?.height !== targetHeight) inCanvas.height = targetHeight;
}
if (!inCanvas || (inCanvas?.width !== targetWidth) || (inCanvas?.height !== targetHeight)) inCanvas = canvas(targetWidth, targetHeight);
// draw input to our canvas
const ctx = inCanvas.getContext('2d');
if (input instanceof ImageData) {
if ((typeof ImageData !== 'undefined') && (input instanceof ImageData)) {
ctx.putImageData(input, 0, 0);
} else {
if (config.filter.flip && typeof ctx.translate !== 'undefined') {
@ -83,11 +99,10 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
ctx.drawImage(input, 0, 0, originalWidth, originalHeight, 0, 0, inCanvas?.width, inCanvas?.height);
}
}
// imagefx transforms using gl
if (config.filter.enabled) {
if (!fx || !outCanvas || (inCanvas.width !== outCanvas.width) || (inCanvas?.height !== outCanvas?.height)) {
outCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(inCanvas?.width, inCanvas?.height) : document.createElement('canvas');
outCanvas = canvas(inCanvas?.width, inCanvas?.height);
if (outCanvas?.width !== inCanvas?.width) outCanvas.width = inCanvas?.width;
if (outCanvas?.height !== inCanvas?.height) outCanvas.height = inCanvas?.height;
// log('created FX filter');
@ -146,45 +161,58 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
if (outCanvas.data) { // if we have data, just convert to tensor
const shape = [outCanvas.height, outCanvas.width, 3];
pixels = tf.tensor3d(outCanvas.data, shape, 'int32');
} else if (outCanvas instanceof ImageData) { // if input is imagedata, just use it
} else if ((typeof ImageData !== 'undefined') && (outCanvas instanceof ImageData)) { // if input is imagedata, just use it
pixels = tf.browser ? tf.browser.fromPixels(outCanvas) : null;
} else if (config.backend === 'webgl' || config.backend === 'humangl') { // tf kernel-optimized method to get imagedata
// we cant use canvas as-is as it already has a context, so we do a silly one more canvas
const tempCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement('canvas');
const tempCanvas = canvas(targetWidth, targetHeight);
tempCanvas.width = targetWidth;
tempCanvas.height = targetHeight;
const tempCtx = tempCanvas.getContext('2d');
tempCtx?.drawImage(outCanvas, 0, 0);
pixels = tf.browser ? tf.browser.fromPixels(tempCanvas) : null;
pixels = (tf.browser && env.browser) ? tf.browser.fromPixels(tempCanvas) : null;
} else { // cpu and wasm kernel does not implement efficient fromPixels method
// we cant use canvas as-is as it already has a context, so we do a silly one more canvas and do fromPixels on ImageData instead
const tempCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement('canvas');
const tempCanvas = canvas(targetWidth, targetHeight);
tempCanvas.width = targetWidth;
tempCanvas.height = targetHeight;
const tempCtx = tempCanvas.getContext('2d');
tempCtx?.drawImage(outCanvas, 0, 0);
const data = tempCtx?.getImageData(0, 0, targetWidth, targetHeight);
pixels = tf.browser ? tf.browser.fromPixels(data) : null;
tempCtx.drawImage(outCanvas, 0, 0);
const data = tempCtx.getImageData(0, 0, targetWidth, targetHeight);
if (tf.browser && env.browser) {
pixels = tf.browser.fromPixels(data);
} else {
pixels = tf.tidy(() => {
const imageData = tf.tensor(Array.from(data.data), [targetWidth, targetHeight, 4]);
const channels = tf.split(imageData, 4, 2); // split rgba to channels
const rgb = tf.stack([channels[0], channels[1], channels[2]], 2); // stack channels back to rgb and ignore alpha
const expand = tf.reshape(rgb, [imageData.shape[0], imageData.shape[1], 3]); // move extra dim from the end of tensor and use it as batch number instead
return expand;
});
}
}
if (pixels) {
const casted = tf.cast(pixels, 'float32');
tensor = tf.expandDims(casted, 0);
tf.dispose(pixels);
tf.dispose(casted);
} else {
tensor = tf.zeros([1, targetWidth, targetHeight, 3]);
throw new Error('Human: Cannot create tensor from input');
}
}
}
const canvas = config.filter.return ? outCanvas : null;
return { tensor, canvas };
return { tensor, canvas: (config.filter.return ? outCanvas : null) };
}
let lastInputSum = 0;
let lastCacheDiff = 1;
export async function skip(instance, input: Tensor) {
if (instance.config.cacheSensitivity === 0) return false;
export async function skip(config, input: Tensor) {
if (config.cacheSensitivity === 0) return false;
const resizeFact = 32;
if (!input.shape[1] || !input.shape[2]) return false;
const reduced: Tensor = tf.image.resizeBilinear(input, [Math.trunc(input.shape[1] / resizeFact), Math.trunc(input.shape[2] / resizeFact)]);
// use tensor sum
/*
const sumT = this.tf.sum(reduced);
@ -193,17 +221,17 @@ export async function skip(instance, input: Tensor) {
*/
// use js loop sum, faster than uploading tensor to gpu calculating and downloading back
const reducedData = await reduced.data(); // raw image rgb array
tf.dispose(reduced);
let sum = 0;
for (let i = 0; i < reducedData.length / 3; i++) sum += reducedData[3 * i + 2]; // look only at green value of each pixel
reduced.dispose();
const diff = 100 * (Math.max(sum, lastInputSum) / Math.min(sum, lastInputSum) - 1);
lastInputSum = sum;
// if previous frame was skipped, skip this frame if changed more than cacheSensitivity
// if previous frame was not skipped, then look for cacheSensitivity or difference larger than one in previous frame to avoid resetting cache in subsequent frames unnecessarily
const skipFrame = diff < Math.max(instance.config.cacheSensitivity, lastCacheDiff);
const skipFrame = diff < Math.max(config.cacheSensitivity, lastCacheDiff);
// if difference is above 10x threshold, don't use last value to force reset cache for significant change of scenes or images
lastCacheDiff = diff > 10 * instance.config.cacheSensitivity ? 0 : diff;
lastCacheDiff = diff > 10 * config.cacheSensitivity ? 0 : diff;
// console.log('skipFrame', skipFrame, this.config.cacheSensitivity, diff);
return skipFrame;
}

View File

@ -1,5 +1,5 @@
import { log } from './helpers';
import { GraphModel } from './tfjs/types';
import type { GraphModel } from './tfjs/types';
import * as facemesh from './blazeface/facemesh';
import * as faceres from './faceres/faceres';
import * as emotion from './emotion/emotion';

View File

@ -4,9 +4,9 @@
import { log, join } from '../helpers';
import * as tf from '../../dist/tfjs.esm.js';
import { BodyResult } from '../result';
import { GraphModel, Tensor } from '../tfjs/types';
import { Config } from '../config';
import type { BodyResult } from '../result';
import type { GraphModel, Tensor } from '../tfjs/types';
import type { Config } from '../config';
let model: GraphModel;

View File

@ -5,9 +5,9 @@
import { log, join } from '../helpers';
import * as tf from '../../dist/tfjs.esm.js';
import { labels } from './labels';
import { ObjectResult } from '../result';
import { GraphModel, Tensor } from '../tfjs/types';
import { Config } from '../config';
import type { ObjectResult } from '../result';
import type { GraphModel, Tensor } from '../tfjs/types';
import type { Config } from '../config';
import { env } from '../env';
let model;
@ -36,6 +36,7 @@ async function process(res: Tensor, inputSize, outputShape, config: Config) {
tf.dispose(squeezeT);
const stackT = tf.stack([arr[1], arr[0], arr[3], arr[2]], 1); // reorder dims as tf.nms expects y, x
const boxesT = tf.squeeze(stackT);
tf.dispose(stackT);
const scoresT = tf.squeeze(arr[4]);
const classesT = tf.squeeze(arr[5]);
arr.forEach((t) => tf.dispose(t));
@ -86,6 +87,7 @@ export async function predict(input: Tensor, config: Config): Promise<ObjectResu
const obj = await process(objectT, model.inputSize, outputSize, config);
last = obj;
resolve(obj);
});
}

View File

@ -5,9 +5,9 @@
import { log, join } from '../helpers';
import * as tf from '../../dist/tfjs.esm.js';
import { labels } from './labels';
import { ObjectResult } from '../result';
import { GraphModel, Tensor } from '../tfjs/types';
import { Config } from '../config';
import type { ObjectResult } from '../result';
import type { GraphModel, Tensor } from '../tfjs/types';
import type { Config } from '../config';
import { env } from '../env';
let model;

View File

@ -2,7 +2,7 @@
* Module that analyzes existing results and recombines them into a unified person object
*/
import { FaceResult, BodyResult, HandResult, GestureResult, PersonResult } from './result';
import type { FaceResult, BodyResult, HandResult, GestureResult, PersonResult } from './result';
export function join(faces: Array<FaceResult>, bodies: Array<BodyResult>, hands: Array<HandResult>, gestures: Array<GestureResult>, shape: Array<number> | undefined): Array<PersonResult> {
let id = 0;

View File

@ -1,5 +1,5 @@
import * as kpt from './keypoints';
import { BodyResult } from '../result';
import type { BodyResult } from '../result';
export function eitherPointDoesntMeetConfidence(a: number, b: number, minConfidence: number) {
return (a < minConfidence || b < minConfidence);

View File

@ -2,8 +2,8 @@
* Type definitions for Human result object
*/
import { Tensor } from './tfjs/types';
import { FaceGesture, BodyGesture, HandGesture, IrisGesture } from './gesture/gesture';
import type { Tensor } from './tfjs/types';
import type { FaceGesture, BodyGesture, HandGesture, IrisGesture } from './gesture/gesture';
/** Face results
* Combined results of face detector, face mesh, age, gender, emotion, embedding, iris models
@ -186,7 +186,7 @@ export interface Result {
/** global performance object with timing values for each operation */
performance: Record<string, unknown>,
/** optional processed canvas that can be used to draw input on screen */
canvas?: OffscreenCanvas | HTMLCanvasElement | null,
canvas?: OffscreenCanvas | HTMLCanvasElement | null | undefined,
/** timestamp of detection representing the milliseconds elapsed since the UNIX epoch */
readonly timestamp: number,
/** getter property that returns unified persons object */

View File

@ -1,8 +1,10 @@
import { log, now, mergeDeep } from './helpers';
import * as sample from './sample';
import * as tf from '../dist/tfjs.esm.js';
import { Config } from './config';
import { Result } from './result';
import * as image from './image/image';
import type { Config } from './config';
import type { Result } from './result';
import { env } from './env';
async function warmupBitmap(instance) {
const b64toBlob = (base64: string, type = 'application/octet-stream') => fetch(`data:${type};base64,${base64}`).then((res) => res.blob());
@ -24,31 +26,38 @@ async function warmupBitmap(instance) {
async function warmupCanvas(instance) {
return new Promise((resolve) => {
let src;
let size = 0;
// let size = 0;
switch (instance.config.warmup) {
case 'face':
size = 256;
// size = 256;
src = 'data:image/jpeg;base64,' + sample.face;
break;
case 'full':
case 'body':
size = 1200;
// size = 1200;
src = 'data:image/jpeg;base64,' + sample.body;
break;
default:
src = null;
}
// src = encodeURI('../assets/human-sample-upper.jpg');
const img = new Image();
let img;
if (typeof Image !== 'undefined') img = new Image();
// @ts-ignore env.image is an external monkey-patch
else if (env.Image) img = new env.Image();
img.onload = async () => {
const canvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(size, size) : document.createElement('canvas');
canvas.width = img.naturalWidth;
canvas.height = img.naturalHeight;
const ctx = canvas.getContext('2d');
ctx?.drawImage(img, 0, 0);
// const data = ctx?.getImageData(0, 0, canvas.height, canvas.width);
const res = await instance.detect(canvas, instance.config);
resolve(res);
const canvas = image.canvas(img.naturalWidth, img.naturalHeight);
if (!canvas) {
log('Warmup: Canvas not found');
resolve({});
} else {
const ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
// const data = ctx?.getImageData(0, 0, canvas.height, canvas.width);
const tensor = await instance.image(canvas);
const res = await instance.detect(tensor.tensor, instance.config);
resolve(res);
}
};
if (src) img.src = src;
else resolve(null);
@ -93,7 +102,7 @@ export async function warmup(instance, userConfig?: Partial<Config>): Promise<Re
if (!instance.config.warmup || instance.config.warmup === 'none') return { error: 'null' };
let res;
if (typeof createImageBitmap === 'function') res = await warmupBitmap(instance);
else if (typeof Image !== 'undefined') res = await warmupCanvas(instance);
else if (typeof Image !== 'undefined' || env.Canvas !== undefined) res = await warmupCanvas(instance);
else res = await warmupNode(instance);
const t1 = now();
if (instance.config.debug) log('Warmup', instance.config.warmup, Math.round(t1 - t0), 'ms');

View File

@ -5,7 +5,7 @@ const config = {
modelBasePath: 'file://models/',
backend: 'tensorflow',
debug: false,
async: false,
async: true,
face: {
enabled: true,
detector: { enabled: true, rotation: true },

View File

@ -1,10 +1,15 @@
const tf = require('@tensorflow/tfjs/dist/tf.node.js'); // wasm backend requires tfjs to be loaded first
const wasm = require('@tensorflow/tfjs-backend-wasm/dist/tf-backend-wasm.node.js'); // wasm backend does not get auto-loaded in nodejs
const Human = require('../dist/human.node-wasm.js').default;
const tf = require('@tensorflow/tfjs'); // wasm backend requires tfjs to be loaded first
const wasm = require('@tensorflow/tfjs-backend-wasm'); // wasm backend does not get auto-loaded in nodejs
const { Canvas, Image } = require('canvas');
const Human = require('../dist/human.node-wasm.js');
const test = require('./test-main.js').test;
Human.env.Canvas = Canvas;
Human.env.Image = Image;
const config = {
modelBasePath: 'http://localhost:10030/models/',
// modelBasePath: 'http://localhost:10030/models/',
modelBasePath: 'https://vladmandic.github.io/human/models/',
backend: 'wasm',
wasmPath: 'node_modules/@tensorflow/tfjs-backend-wasm/dist/',
// wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.9.0/dist/',
@ -20,12 +25,17 @@ const config = {
},
hand: { enabled: true, rotation: false },
body: { enabled: true },
object: { enabled: true },
object: { enabled: false },
segmentation: { enabled: true },
filter: { enabled: false },
};
// @ts-ignore // in nodejs+wasm must set explicitly before using human
wasm.setWasmPaths(config.wasmPath); tf.setBackend('wasm');
async function main() {
wasm.setWasmPaths(config.wasmPath);
await tf.setBackend('wasm');
await tf.ready();
test(Human.Human, config);
}
test(Human, config);
main();
// @ts-ignore // in nodejs+wasm must set explicitly before using human

View File

@ -14,10 +14,10 @@ const config = {
description: { enabled: true },
emotion: { enabled: true },
},
hand: { enabled: true, rotation: true },
body: { enabled: true },
object: { enabled: true },
segmentation: { enabled: true },
hand: { enabled: false, rotation: true },
body: { enabled: false },
object: { enabled: false },
segmentation: { enabled: false },
filter: { enabled: false },
};

View File

@ -19,6 +19,8 @@ const ignoreMessages = [
'cudart_stub.cc',
'cuda_driver.cc:326',
'cpu_allocator_impl.cc',
'--trace-warnings',
'ExperimentalWarning',
];
const status = {
@ -48,8 +50,9 @@ function logStdIO(ok, test, buffer) {
}
async function runTest(test) {
log.info();
log.info(test, 'start');
return new Promise((resolve) => {
log.info(test, 'start');
const child = fork(path.join(__dirname, test), [], { silent: true });
child.on('message', (data) => logMessage(test, data));
child.on('error', (data) => log.error(test, ':', data.message || data));
@ -68,6 +71,7 @@ async function testAll() {
process.on('uncaughtException', (data) => log.error('nodejs unhandled exception', data));
log.info('tests:', tests);
for (const test of tests) await runTest(test);
log.info();
log.info('status:', status);
}