fix multiple memory leaks

pull/280/head
Vladimir Mandic 2021-09-13 13:28:35 -04:00
parent 97ce559e60
commit 45af8e225c
27 changed files with 192 additions and 135 deletions

View File

@ -13,7 +13,7 @@
"locations": ["dist/*", "types/*", "typedoc/*"] "locations": ["dist/*", "types/*", "typedoc/*"]
}, },
"lint": { "lint": {
"locations": [ "src/**/*.ts", "test/*.js", "demo/**/*.js" ], "locations": [ "*.json", "src/**/*.ts", "test/**/*.js", "demo/**/*.js" ],
"rules": { } "rules": { }
}, },
"changelog": { "changelog": {
@ -133,7 +133,7 @@
] ]
}, },
"watch": { "watch": {
"locations": [ "src/**", "tfjs/*" ] "locations": [ "src/**/*", "tfjs/**/*" ]
}, },
"typescript": { "typescript": {
"allowJs": false "allowJs": false

View File

@ -5,8 +5,8 @@
import { log, join } from '../helpers'; import { log, join } from '../helpers';
import * as tf from '../../dist/tfjs.esm.js'; import * as tf from '../../dist/tfjs.esm.js';
import { Config } from '../config'; import type { Config } from '../config';
import { GraphModel, Tensor } from '../tfjs/types'; import type { GraphModel, Tensor } from '../tfjs/types';
let model: GraphModel; let model: GraphModel;

View File

@ -186,8 +186,8 @@ export interface GestureConfig {
*/ */
export interface Config { export interface Config {
/** Backend used for TFJS operations */ /** Backend used for TFJS operations */
// backend: '' | 'cpu' | 'wasm' | 'webgl' | 'humangl' | 'tensorflow' | 'webgpu' | null, backend: '' | 'cpu' | 'wasm' | 'webgl' | 'humangl' | 'tensorflow' | 'webgpu',
backend: string; // backend: string;
/** Path to *.wasm files if backend is set to `wasm` */ /** Path to *.wasm files if backend is set to `wasm` */
wasmPath: string, wasmPath: string,
@ -202,8 +202,8 @@ export interface Config {
* - warmup pre-initializes all models for faster inference but can take significant time on startup * - warmup pre-initializes all models for faster inference but can take significant time on startup
* - only used for `webgl` and `humangl` backends * - only used for `webgl` and `humangl` backends
*/ */
// warmup: 'none' | 'face' | 'full' | 'body' | string, warmup: 'none' | 'face' | 'full' | 'body',
warmup: string; // warmup: string;
/** Base model path (typically starting with file://, http:// or https://) for all models /** Base model path (typically starting with file://, http:// or https://) for all models
* - individual modelPath values are relative to this path * - individual modelPath values are relative to this path

View File

@ -4,9 +4,9 @@
import { log, join } from '../helpers'; import { log, join } from '../helpers';
import * as tf from '../../dist/tfjs.esm.js'; import * as tf from '../../dist/tfjs.esm.js';
import { BodyResult } from '../result'; import type { BodyResult } from '../result';
import { GraphModel, Tensor } from '../tfjs/types'; import type { GraphModel, Tensor } from '../tfjs/types';
import { Config } from '../config'; import type { Config } from '../config';
let model: GraphModel; let model: GraphModel;

View File

@ -3,8 +3,8 @@
*/ */
import { log, join } from '../helpers'; import { log, join } from '../helpers';
import { Config } from '../config'; import type { Config } from '../config';
import { Tensor, GraphModel } from '../tfjs/types'; import type { Tensor, GraphModel } from '../tfjs/types';
import * as tf from '../../dist/tfjs.esm.js'; import * as tf from '../../dist/tfjs.esm.js';
const annotations = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']; const annotations = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral'];

View File

@ -8,8 +8,8 @@ import * as tf from '../dist/tfjs.esm.js';
import * as facemesh from './blazeface/facemesh'; import * as facemesh from './blazeface/facemesh';
import * as emotion from './emotion/emotion'; import * as emotion from './emotion/emotion';
import * as faceres from './faceres/faceres'; import * as faceres from './faceres/faceres';
import { FaceResult } from './result'; import type { FaceResult } from './result';
import { Tensor } from './tfjs/types'; import type { Tensor } from './tfjs/types';
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars // eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
const rad2deg = (theta) => Math.round((theta * 180) / Math.PI); const rad2deg = (theta) => Math.round((theta * 180) / Math.PI);
@ -250,7 +250,6 @@ export const detectFace = async (parent /* instance of human */, input: Tensor):
rotation, rotation,
tensor, tensor,
}); });
parent.analyze('End Face'); parent.analyze('End Face');
} }
parent.analyze('End FaceMesh:'); parent.analyze('End FaceMesh:');

View File

@ -6,8 +6,8 @@
import { log, join } from '../helpers'; import { log, join } from '../helpers';
import * as tf from '../../dist/tfjs.esm.js'; import * as tf from '../../dist/tfjs.esm.js';
import { Tensor, GraphModel } from '../tfjs/types'; import type { Tensor, GraphModel } from '../tfjs/types';
import { Config } from '../config'; import type { Config } from '../config';
let model: GraphModel; let model: GraphModel;
const last: Array<{ const last: Array<{
@ -140,7 +140,8 @@ export async function predict(image: Tensor, config: Config, idx, count) {
} }
const argmax = tf.argMax(resT.find((t) => t.shape[1] === 100), 1); const argmax = tf.argMax(resT.find((t) => t.shape[1] === 100), 1);
const age = (await argmax.data())[0]; const age = (await argmax.data())[0];
const all = await resT.find((t) => t.shape[1] === 100).data(); // inside tf.tidy tf.dispose(argmax);
const all = await resT.find((t) => t.shape[1] === 100).data();
obj.age = Math.round(all[age - 1] > all[age + 1] ? 10 * age - 100 * all[age - 1] : 10 * age + 100 * all[age + 1]) / 10; obj.age = Math.round(all[age - 1] > all[age + 1] ? 10 * age - 100 * all[age - 1] : 10 * age + 100 * all[age + 1]) / 10;
const desc = resT.find((t) => t.shape[1] === 1024); const desc = resT.find((t) => t.shape[1] === 1024);
@ -151,7 +152,6 @@ export async function predict(image: Tensor, config: Config, idx, count) {
obj.descriptor = [...descriptor]; obj.descriptor = [...descriptor];
resT.forEach((t) => tf.dispose(t)); resT.forEach((t) => tf.dispose(t));
} }
last[idx] = obj; last[idx] = obj;
lastCount = count; lastCount = count;
resolve(obj); resolve(obj);

View File

@ -5,8 +5,8 @@
import { log, join } from '../helpers'; import { log, join } from '../helpers';
import * as tf from '../../dist/tfjs.esm.js'; import * as tf from '../../dist/tfjs.esm.js';
import { Config } from '../config'; import type { Config } from '../config';
import { GraphModel, Tensor } from '../tfjs/types'; import type { GraphModel, Tensor } from '../tfjs/types';
let model: GraphModel; let model: GraphModel;
let last = { gender: '' }; let last = { gender: '' };

View File

@ -2,7 +2,7 @@
* Gesture detection module * Gesture detection module
*/ */
import { GestureResult } from '../result'; import type { GestureResult } from '../result';
import * as fingerPose from '../fingerpose/fingerpose'; import * as fingerPose from '../fingerpose/fingerpose';
/** /**

View File

@ -1,7 +1,7 @@
import * as tf from '../../dist/tfjs.esm.js'; import * as tf from '../../dist/tfjs.esm.js';
import * as box from './box'; import * as box from './box';
import * as anchors from './anchors'; import * as anchors from './anchors';
import { Tensor, GraphModel } from '../tfjs/types'; import type { Tensor, GraphModel } from '../tfjs/types';
export class HandDetector { export class HandDetector {
model: GraphModel; model: GraphModel;

View File

@ -1,8 +1,8 @@
import * as tf from '../../dist/tfjs.esm.js'; import * as tf from '../../dist/tfjs.esm.js';
import * as box from './box'; import * as box from './box';
import * as util from './util'; import * as util from './util';
import * as detector from './handdetector'; import type * as detector from './handdetector';
import { Tensor, GraphModel } from '../tfjs/types'; import type { Tensor, GraphModel } from '../tfjs/types';
import { env } from '../env'; import { env } from '../env';
const palmBoxEnlargeFactor = 5; // default 3 const palmBoxEnlargeFactor = 5; // default 3

View File

@ -7,9 +7,9 @@ import * as tf from '../../dist/tfjs.esm.js';
import * as handdetector from './handdetector'; import * as handdetector from './handdetector';
import * as handpipeline from './handpipeline'; import * as handpipeline from './handpipeline';
import * as fingerPose from '../fingerpose/fingerpose'; import * as fingerPose from '../fingerpose/fingerpose';
import { HandResult } from '../result'; import type { HandResult } from '../result';
import { Tensor, GraphModel } from '../tfjs/types'; import type { Tensor, GraphModel } from '../tfjs/types';
import { Config } from '../config'; import type { Config } from '../config';
const meshAnnotations = { const meshAnnotations = {
thumb: [1, 2, 3, 4], thumb: [1, 2, 3, 4],

View File

@ -4,7 +4,7 @@
import { log, now, mergeDeep } from './helpers'; import { log, now, mergeDeep } from './helpers';
import { Config, defaults } from './config'; import { Config, defaults } from './config';
import { Result, FaceResult, HandResult, BodyResult, ObjectResult, GestureResult } from './result'; import type { Result, FaceResult, HandResult, BodyResult, ObjectResult, GestureResult } from './result';
import * as tf from '../dist/tfjs.esm.js'; import * as tf from '../dist/tfjs.esm.js';
import * as models from './models'; import * as models from './models';
import * as face from './face'; import * as face from './face';
@ -27,7 +27,7 @@ import * as env from './env';
import * as backend from './tfjs/backend'; import * as backend from './tfjs/backend';
import * as app from '../package.json'; import * as app from '../package.json';
import * as warmups from './warmup'; import * as warmups from './warmup';
import { Tensor, GraphModel } from './tfjs/types'; import type { Tensor, GraphModel } from './tfjs/types';
// export types // export types
export * from './config'; export * from './config';
@ -38,7 +38,7 @@ export { env } from './env';
/** Defines all possible input types for **Human** detection /** Defines all possible input types for **Human** detection
* @typedef Input Type * @typedef Input Type
*/ */
export type Input = Tensor | typeof Image | ImageData | ImageBitmap | HTMLImageElement | HTMLMediaElement | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas; export type Input = Tensor | ImageData | ImageBitmap | HTMLImageElement | HTMLMediaElement | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas | typeof Image | typeof env.env.Canvas;
/** Events dispatched by `human.events` /** Events dispatched by `human.events`
* - `create`: triggered when Human object is instantiated * - `create`: triggered when Human object is instantiated
@ -257,7 +257,7 @@ export class Human {
* @returns Canvas * @returns Canvas
*/ */
segmentation(input: Input, background?: Input) { segmentation(input: Input, background?: Input) {
return segmentation.process(input, background, this.config); return input ? segmentation.process(input, background, this.config) : null;
} }
/** Enhance method performs additional enhacements to face image previously detected for futher this.processing /** Enhance method performs additional enhacements to face image previously detected for futher this.processing
@ -373,28 +373,28 @@ export class Human {
await this.load(); await this.load();
timeStamp = now(); timeStamp = now();
this.process = image.process(input, this.config); let img = image.process(input, this.config);
const inputTensor = this.process.tensor; this.process = img;
this.performance.image = Math.trunc(now() - timeStamp); this.performance.image = Math.trunc(now() - timeStamp);
this.analyze('Get Image:'); this.analyze('Get Image:');
// run segmentation prethis.processing // run segmentation prethis.processing
if (this.config.segmentation.enabled && this.process && inputTensor) { if (this.config.segmentation.enabled && this.process && img.tensor && img.canvas) {
this.analyze('Start Segmentation:'); this.analyze('Start Segmentation:');
this.state = 'run:segmentation'; this.state = 'run:segmentation';
timeStamp = now(); timeStamp = now();
await segmentation.predict(this.process); await segmentation.predict(img);
elapsedTime = Math.trunc(now() - timeStamp); elapsedTime = Math.trunc(now() - timeStamp);
if (elapsedTime > 0) this.performance.segmentation = elapsedTime; if (elapsedTime > 0) this.performance.segmentation = elapsedTime;
if (this.process.canvas) { if (img.canvas) {
// replace input // replace input
tf.dispose(inputTensor); tf.dispose(img.tensor);
this.process = image.process(this.process.canvas, this.config); img = image.process(img.canvas, this.config);
} }
this.analyze('End Segmentation:'); this.analyze('End Segmentation:');
} }
if (!this.process || !inputTensor) { if (!img.tensor) {
log('could not convert input to tensor'); log('could not convert input to tensor');
resolve({ error: 'could not convert input to tensor' }); resolve({ error: 'could not convert input to tensor' });
return; return;
@ -402,7 +402,7 @@ export class Human {
this.emit('image'); this.emit('image');
timeStamp = now(); timeStamp = now();
this.config.skipFrame = await image.skip(this, inputTensor); this.config.skipFrame = await image.skip(this.config, img.tensor);
if (!this.performance.frames) this.performance.frames = 0; if (!this.performance.frames) this.performance.frames = 0;
if (!this.performance.cached) this.performance.cached = 0; if (!this.performance.cached) this.performance.cached = 0;
(this.performance.frames as number)++; (this.performance.frames as number)++;
@ -419,12 +419,12 @@ export class Human {
// run face detection followed by all models that rely on face bounding box: face mesh, age, gender, emotion // run face detection followed by all models that rely on face bounding box: face mesh, age, gender, emotion
if (this.config.async) { if (this.config.async) {
faceRes = this.config.face.enabled ? face.detectFace(this, inputTensor) : []; faceRes = this.config.face.enabled ? face.detectFace(this, img.tensor) : [];
if (this.performance.face) delete this.performance.face; if (this.performance.face) delete this.performance.face;
} else { } else {
this.state = 'run:face'; this.state = 'run:face';
timeStamp = now(); timeStamp = now();
faceRes = this.config.face.enabled ? await face.detectFace(this, inputTensor) : []; faceRes = this.config.face.enabled ? await face.detectFace(this, img.tensor) : [];
elapsedTime = Math.trunc(now() - timeStamp); elapsedTime = Math.trunc(now() - timeStamp);
if (elapsedTime > 0) this.performance.face = elapsedTime; if (elapsedTime > 0) this.performance.face = elapsedTime;
} }
@ -432,18 +432,18 @@ export class Human {
// run body: can be posenet, blazepose, efficientpose, movenet // run body: can be posenet, blazepose, efficientpose, movenet
this.analyze('Start Body:'); this.analyze('Start Body:');
if (this.config.async) { if (this.config.async) {
if (this.config.body.modelPath?.includes('posenet')) bodyRes = this.config.body.enabled ? posenet.predict(inputTensor, this.config) : []; if (this.config.body.modelPath?.includes('posenet')) bodyRes = this.config.body.enabled ? posenet.predict(img.tensor, this.config) : [];
else if (this.config.body.modelPath?.includes('blazepose')) bodyRes = this.config.body.enabled ? blazepose.predict(inputTensor, this.config) : []; else if (this.config.body.modelPath?.includes('blazepose')) bodyRes = this.config.body.enabled ? blazepose.predict(img.tensor, this.config) : [];
else if (this.config.body.modelPath?.includes('efficientpose')) bodyRes = this.config.body.enabled ? efficientpose.predict(inputTensor, this.config) : []; else if (this.config.body.modelPath?.includes('efficientpose')) bodyRes = this.config.body.enabled ? efficientpose.predict(img.tensor, this.config) : [];
else if (this.config.body.modelPath?.includes('movenet')) bodyRes = this.config.body.enabled ? movenet.predict(inputTensor, this.config) : []; else if (this.config.body.modelPath?.includes('movenet')) bodyRes = this.config.body.enabled ? movenet.predict(img.tensor, this.config) : [];
if (this.performance.body) delete this.performance.body; if (this.performance.body) delete this.performance.body;
} else { } else {
this.state = 'run:body'; this.state = 'run:body';
timeStamp = now(); timeStamp = now();
if (this.config.body.modelPath?.includes('posenet')) bodyRes = this.config.body.enabled ? await posenet.predict(inputTensor, this.config) : []; if (this.config.body.modelPath?.includes('posenet')) bodyRes = this.config.body.enabled ? await posenet.predict(img.tensor, this.config) : [];
else if (this.config.body.modelPath?.includes('blazepose')) bodyRes = this.config.body.enabled ? await blazepose.predict(inputTensor, this.config) : []; else if (this.config.body.modelPath?.includes('blazepose')) bodyRes = this.config.body.enabled ? await blazepose.predict(img.tensor, this.config) : [];
else if (this.config.body.modelPath?.includes('efficientpose')) bodyRes = this.config.body.enabled ? await efficientpose.predict(inputTensor, this.config) : []; else if (this.config.body.modelPath?.includes('efficientpose')) bodyRes = this.config.body.enabled ? await efficientpose.predict(img.tensor, this.config) : [];
else if (this.config.body.modelPath?.includes('movenet')) bodyRes = this.config.body.enabled ? await movenet.predict(inputTensor, this.config) : []; else if (this.config.body.modelPath?.includes('movenet')) bodyRes = this.config.body.enabled ? await movenet.predict(img.tensor, this.config) : [];
elapsedTime = Math.trunc(now() - timeStamp); elapsedTime = Math.trunc(now() - timeStamp);
if (elapsedTime > 0) this.performance.body = elapsedTime; if (elapsedTime > 0) this.performance.body = elapsedTime;
} }
@ -452,12 +452,12 @@ export class Human {
// run handpose // run handpose
this.analyze('Start Hand:'); this.analyze('Start Hand:');
if (this.config.async) { if (this.config.async) {
handRes = this.config.hand.enabled ? handpose.predict(inputTensor, this.config) : []; handRes = this.config.hand.enabled ? handpose.predict(img.tensor, this.config) : [];
if (this.performance.hand) delete this.performance.hand; if (this.performance.hand) delete this.performance.hand;
} else { } else {
this.state = 'run:hand'; this.state = 'run:hand';
timeStamp = now(); timeStamp = now();
handRes = this.config.hand.enabled ? await handpose.predict(inputTensor, this.config) : []; handRes = this.config.hand.enabled ? await handpose.predict(img.tensor, this.config) : [];
elapsedTime = Math.trunc(now() - timeStamp); elapsedTime = Math.trunc(now() - timeStamp);
if (elapsedTime > 0) this.performance.hand = elapsedTime; if (elapsedTime > 0) this.performance.hand = elapsedTime;
} }
@ -466,14 +466,14 @@ export class Human {
// run nanodet // run nanodet
this.analyze('Start Object:'); this.analyze('Start Object:');
if (this.config.async) { if (this.config.async) {
if (this.config.object.modelPath?.includes('nanodet')) objectRes = this.config.object.enabled ? nanodet.predict(inputTensor, this.config) : []; if (this.config.object.modelPath?.includes('nanodet')) objectRes = this.config.object.enabled ? nanodet.predict(img.tensor, this.config) : [];
else if (this.config.object.modelPath?.includes('centernet')) objectRes = this.config.object.enabled ? centernet.predict(inputTensor, this.config) : []; else if (this.config.object.modelPath?.includes('centernet')) objectRes = this.config.object.enabled ? centernet.predict(img.tensor, this.config) : [];
if (this.performance.object) delete this.performance.object; if (this.performance.object) delete this.performance.object;
} else { } else {
this.state = 'run:object'; this.state = 'run:object';
timeStamp = now(); timeStamp = now();
if (this.config.object.modelPath?.includes('nanodet')) objectRes = this.config.object.enabled ? await nanodet.predict(inputTensor, this.config) : []; if (this.config.object.modelPath?.includes('nanodet')) objectRes = this.config.object.enabled ? await nanodet.predict(img.tensor, this.config) : [];
else if (this.config.object.modelPath?.includes('centernet')) objectRes = this.config.object.enabled ? await centernet.predict(inputTensor, this.config) : []; else if (this.config.object.modelPath?.includes('centernet')) objectRes = this.config.object.enabled ? await centernet.predict(img.tensor, this.config) : [];
elapsedTime = Math.trunc(now() - timeStamp); elapsedTime = Math.trunc(now() - timeStamp);
if (elapsedTime > 0) this.performance.object = elapsedTime; if (elapsedTime > 0) this.performance.object = elapsedTime;
} }
@ -507,7 +507,7 @@ export class Human {
}; };
// finally dispose input tensor // finally dispose input tensor
tf.dispose(inputTensor); tf.dispose(img.tensor);
// log('Result:', result); // log('Result:', result);
this.emit('detect'); this.emit('detect');

View File

@ -4,11 +4,11 @@
import * as tf from '../../dist/tfjs.esm.js'; import * as tf from '../../dist/tfjs.esm.js';
import * as fxImage from './imagefx'; import * as fxImage from './imagefx';
import { Tensor } from '../tfjs/types'; import type { Tensor } from '../tfjs/types';
import { Config } from '../config'; import type { Config } from '../config';
import { env } from '../env'; import { env } from '../env';
type Input = Tensor | typeof Image | ImageData | ImageBitmap | HTMLImageElement | HTMLMediaElement | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas; type Input = Tensor | ImageData | ImageBitmap | HTMLImageElement | HTMLMediaElement | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas | typeof Image | typeof env.Canvas;
const maxSize = 2048; const maxSize = 2048;
// internal temp canvases // internal temp canvases
@ -17,6 +17,25 @@ let outCanvas;
// @ts-ignore // imagefx is js module that should be converted to a class // @ts-ignore // imagefx is js module that should be converted to a class
let fx: fxImage.GLImageFilter | null; // instance of imagefx let fx: fxImage.GLImageFilter | null; // instance of imagefx
export function canvas(width, height) {
let c;
if (env.browser) {
if (typeof OffscreenCanvas !== 'undefined') {
c = new OffscreenCanvas(width, height);
} else {
c = document.createElement('canvas');
c.width = width;
c.height = height;
}
} else {
// @ts-ignore // env.canvas is an external monkey-patch
// eslint-disable-next-line new-cap
c = (typeof env.Canvas !== 'undefined') ? new env.Canvas(width, height) : null;
}
if (!c) throw new Error('Human: Cannot create canvas');
return c;
}
// process input image and return tensor // process input image and return tensor
// input can be tensor, imagedata, htmlimageelement, htmlvideoelement // input can be tensor, imagedata, htmlimageelement, htmlvideoelement
// input is resized and run through imagefx filter // input is resized and run through imagefx filter
@ -27,6 +46,7 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
if ( if (
!(input instanceof tf.Tensor) !(input instanceof tf.Tensor)
&& !(typeof Image !== 'undefined' && input instanceof Image) && !(typeof Image !== 'undefined' && input instanceof Image)
&& !(typeof env.Canvas !== 'undefined' && input instanceof env.Canvas)
&& !(typeof ImageData !== 'undefined' && input instanceof ImageData) && !(typeof ImageData !== 'undefined' && input instanceof ImageData)
&& !(typeof ImageBitmap !== 'undefined' && input instanceof ImageBitmap) && !(typeof ImageBitmap !== 'undefined' && input instanceof ImageBitmap)
&& !(typeof HTMLImageElement !== 'undefined' && input instanceof HTMLImageElement) && !(typeof HTMLImageElement !== 'undefined' && input instanceof HTMLImageElement)
@ -39,8 +59,8 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
} }
if (input instanceof tf.Tensor) { if (input instanceof tf.Tensor) {
// if input is tensor, use as-is // if input is tensor, use as-is
if ((input as Tensor).shape && (input as Tensor).shape.length === 4 && (input as Tensor).shape[0] === 1 && (input as Tensor).shape[3] === 3) tensor = tf.clone(input); if ((input as unknown as Tensor).shape && (input as unknown as Tensor).shape.length === 4 && (input as unknown as Tensor).shape[0] === 1 && (input as unknown as Tensor).shape[3] === 3) tensor = tf.clone(input);
else throw new Error(`Human: Input tensor shape must be [1, height, width, 3] and instead was ${(input as Tensor).shape}`); else throw new Error(`Human: Input tensor shape must be [1, height, width, 3] and instead was ${(input as unknown as Tensor).shape}`);
} else { } else {
// check if resizing will be needed // check if resizing will be needed
const originalWidth = input['naturalWidth'] || input['videoWidth'] || input['width'] || (input['shape'] && (input['shape'][1] > 0)); const originalWidth = input['naturalWidth'] || input['videoWidth'] || input['width'] || (input['shape'] && (input['shape'][1] > 0));
@ -63,15 +83,11 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
if ((config.filter.height || 0) > 0) targetHeight = config.filter.height; if ((config.filter.height || 0) > 0) targetHeight = config.filter.height;
else if ((config.filter.width || 0) > 0) targetHeight = originalHeight * ((config.filter.width || 0) / originalWidth); else if ((config.filter.width || 0) > 0) targetHeight = originalHeight * ((config.filter.width || 0) / originalWidth);
if (!targetWidth || !targetHeight) throw new Error('Human: Input cannot determine dimension'); if (!targetWidth || !targetHeight) throw new Error('Human: Input cannot determine dimension');
if (!inCanvas || (inCanvas?.width !== targetWidth) || (inCanvas?.height !== targetHeight)) { if (!inCanvas || (inCanvas?.width !== targetWidth) || (inCanvas?.height !== targetHeight)) inCanvas = canvas(targetWidth, targetHeight);
inCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement('canvas');
if (inCanvas?.width !== targetWidth) inCanvas.width = targetWidth;
if (inCanvas?.height !== targetHeight) inCanvas.height = targetHeight;
}
// draw input to our canvas // draw input to our canvas
const ctx = inCanvas.getContext('2d'); const ctx = inCanvas.getContext('2d');
if (input instanceof ImageData) { if ((typeof ImageData !== 'undefined') && (input instanceof ImageData)) {
ctx.putImageData(input, 0, 0); ctx.putImageData(input, 0, 0);
} else { } else {
if (config.filter.flip && typeof ctx.translate !== 'undefined') { if (config.filter.flip && typeof ctx.translate !== 'undefined') {
@ -83,11 +99,10 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
ctx.drawImage(input, 0, 0, originalWidth, originalHeight, 0, 0, inCanvas?.width, inCanvas?.height); ctx.drawImage(input, 0, 0, originalWidth, originalHeight, 0, 0, inCanvas?.width, inCanvas?.height);
} }
} }
// imagefx transforms using gl // imagefx transforms using gl
if (config.filter.enabled) { if (config.filter.enabled) {
if (!fx || !outCanvas || (inCanvas.width !== outCanvas.width) || (inCanvas?.height !== outCanvas?.height)) { if (!fx || !outCanvas || (inCanvas.width !== outCanvas.width) || (inCanvas?.height !== outCanvas?.height)) {
outCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(inCanvas?.width, inCanvas?.height) : document.createElement('canvas'); outCanvas = canvas(inCanvas?.width, inCanvas?.height);
if (outCanvas?.width !== inCanvas?.width) outCanvas.width = inCanvas?.width; if (outCanvas?.width !== inCanvas?.width) outCanvas.width = inCanvas?.width;
if (outCanvas?.height !== inCanvas?.height) outCanvas.height = inCanvas?.height; if (outCanvas?.height !== inCanvas?.height) outCanvas.height = inCanvas?.height;
// log('created FX filter'); // log('created FX filter');
@ -146,45 +161,58 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
if (outCanvas.data) { // if we have data, just convert to tensor if (outCanvas.data) { // if we have data, just convert to tensor
const shape = [outCanvas.height, outCanvas.width, 3]; const shape = [outCanvas.height, outCanvas.width, 3];
pixels = tf.tensor3d(outCanvas.data, shape, 'int32'); pixels = tf.tensor3d(outCanvas.data, shape, 'int32');
} else if (outCanvas instanceof ImageData) { // if input is imagedata, just use it } else if ((typeof ImageData !== 'undefined') && (outCanvas instanceof ImageData)) { // if input is imagedata, just use it
pixels = tf.browser ? tf.browser.fromPixels(outCanvas) : null; pixels = tf.browser ? tf.browser.fromPixels(outCanvas) : null;
} else if (config.backend === 'webgl' || config.backend === 'humangl') { // tf kernel-optimized method to get imagedata } else if (config.backend === 'webgl' || config.backend === 'humangl') { // tf kernel-optimized method to get imagedata
// we cant use canvas as-is as it already has a context, so we do a silly one more canvas // we cant use canvas as-is as it already has a context, so we do a silly one more canvas
const tempCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement('canvas'); const tempCanvas = canvas(targetWidth, targetHeight);
tempCanvas.width = targetWidth; tempCanvas.width = targetWidth;
tempCanvas.height = targetHeight; tempCanvas.height = targetHeight;
const tempCtx = tempCanvas.getContext('2d'); const tempCtx = tempCanvas.getContext('2d');
tempCtx?.drawImage(outCanvas, 0, 0); tempCtx?.drawImage(outCanvas, 0, 0);
pixels = tf.browser ? tf.browser.fromPixels(tempCanvas) : null; pixels = (tf.browser && env.browser) ? tf.browser.fromPixels(tempCanvas) : null;
} else { // cpu and wasm kernel does not implement efficient fromPixels method } else { // cpu and wasm kernel does not implement efficient fromPixels method
// we cant use canvas as-is as it already has a context, so we do a silly one more canvas and do fromPixels on ImageData instead // we cant use canvas as-is as it already has a context, so we do a silly one more canvas and do fromPixels on ImageData instead
const tempCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement('canvas'); const tempCanvas = canvas(targetWidth, targetHeight);
tempCanvas.width = targetWidth; tempCanvas.width = targetWidth;
tempCanvas.height = targetHeight; tempCanvas.height = targetHeight;
const tempCtx = tempCanvas.getContext('2d'); const tempCtx = tempCanvas.getContext('2d');
tempCtx?.drawImage(outCanvas, 0, 0); tempCtx.drawImage(outCanvas, 0, 0);
const data = tempCtx?.getImageData(0, 0, targetWidth, targetHeight); const data = tempCtx.getImageData(0, 0, targetWidth, targetHeight);
pixels = tf.browser ? tf.browser.fromPixels(data) : null; if (tf.browser && env.browser) {
pixels = tf.browser.fromPixels(data);
} else {
pixels = tf.tidy(() => {
const imageData = tf.tensor(Array.from(data.data), [targetWidth, targetHeight, 4]);
const channels = tf.split(imageData, 4, 2); // split rgba to channels
const rgb = tf.stack([channels[0], channels[1], channels[2]], 2); // stack channels back to rgb and ignore alpha
const expand = tf.reshape(rgb, [imageData.shape[0], imageData.shape[1], 3]); // move extra dim from the end of tensor and use it as batch number instead
return expand;
});
}
} }
if (pixels) { if (pixels) {
const casted = tf.cast(pixels, 'float32'); const casted = tf.cast(pixels, 'float32');
tensor = tf.expandDims(casted, 0); tensor = tf.expandDims(casted, 0);
tf.dispose(pixels); tf.dispose(pixels);
tf.dispose(casted); tf.dispose(casted);
} else {
tensor = tf.zeros([1, targetWidth, targetHeight, 3]);
throw new Error('Human: Cannot create tensor from input');
} }
} }
} }
const canvas = config.filter.return ? outCanvas : null; return { tensor, canvas: (config.filter.return ? outCanvas : null) };
return { tensor, canvas };
} }
let lastInputSum = 0; let lastInputSum = 0;
let lastCacheDiff = 1; let lastCacheDiff = 1;
export async function skip(instance, input: Tensor) { export async function skip(config, input: Tensor) {
if (instance.config.cacheSensitivity === 0) return false; if (config.cacheSensitivity === 0) return false;
const resizeFact = 32; const resizeFact = 32;
if (!input.shape[1] || !input.shape[2]) return false; if (!input.shape[1] || !input.shape[2]) return false;
const reduced: Tensor = tf.image.resizeBilinear(input, [Math.trunc(input.shape[1] / resizeFact), Math.trunc(input.shape[2] / resizeFact)]); const reduced: Tensor = tf.image.resizeBilinear(input, [Math.trunc(input.shape[1] / resizeFact), Math.trunc(input.shape[2] / resizeFact)]);
// use tensor sum // use tensor sum
/* /*
const sumT = this.tf.sum(reduced); const sumT = this.tf.sum(reduced);
@ -193,17 +221,17 @@ export async function skip(instance, input: Tensor) {
*/ */
// use js loop sum, faster than uploading tensor to gpu calculating and downloading back // use js loop sum, faster than uploading tensor to gpu calculating and downloading back
const reducedData = await reduced.data(); // raw image rgb array const reducedData = await reduced.data(); // raw image rgb array
tf.dispose(reduced);
let sum = 0; let sum = 0;
for (let i = 0; i < reducedData.length / 3; i++) sum += reducedData[3 * i + 2]; // look only at green value of each pixel for (let i = 0; i < reducedData.length / 3; i++) sum += reducedData[3 * i + 2]; // look only at green value of each pixel
reduced.dispose();
const diff = 100 * (Math.max(sum, lastInputSum) / Math.min(sum, lastInputSum) - 1); const diff = 100 * (Math.max(sum, lastInputSum) / Math.min(sum, lastInputSum) - 1);
lastInputSum = sum; lastInputSum = sum;
// if previous frame was skipped, skip this frame if changed more than cacheSensitivity // if previous frame was skipped, skip this frame if changed more than cacheSensitivity
// if previous frame was not skipped, then look for cacheSensitivity or difference larger than one in previous frame to avoid resetting cache in subsequent frames unnecessarily // if previous frame was not skipped, then look for cacheSensitivity or difference larger than one in previous frame to avoid resetting cache in subsequent frames unnecessarily
const skipFrame = diff < Math.max(instance.config.cacheSensitivity, lastCacheDiff); const skipFrame = diff < Math.max(config.cacheSensitivity, lastCacheDiff);
// if difference is above 10x threshold, don't use last value to force reset cache for significant change of scenes or images // if difference is above 10x threshold, don't use last value to force reset cache for significant change of scenes or images
lastCacheDiff = diff > 10 * instance.config.cacheSensitivity ? 0 : diff; lastCacheDiff = diff > 10 * config.cacheSensitivity ? 0 : diff;
// console.log('skipFrame', skipFrame, this.config.cacheSensitivity, diff); // console.log('skipFrame', skipFrame, this.config.cacheSensitivity, diff);
return skipFrame; return skipFrame;
} }

View File

@ -1,5 +1,5 @@
import { log } from './helpers'; import { log } from './helpers';
import { GraphModel } from './tfjs/types'; import type { GraphModel } from './tfjs/types';
import * as facemesh from './blazeface/facemesh'; import * as facemesh from './blazeface/facemesh';
import * as faceres from './faceres/faceres'; import * as faceres from './faceres/faceres';
import * as emotion from './emotion/emotion'; import * as emotion from './emotion/emotion';

View File

@ -4,9 +4,9 @@
import { log, join } from '../helpers'; import { log, join } from '../helpers';
import * as tf from '../../dist/tfjs.esm.js'; import * as tf from '../../dist/tfjs.esm.js';
import { BodyResult } from '../result'; import type { BodyResult } from '../result';
import { GraphModel, Tensor } from '../tfjs/types'; import type { GraphModel, Tensor } from '../tfjs/types';
import { Config } from '../config'; import type { Config } from '../config';
let model: GraphModel; let model: GraphModel;

View File

@ -5,9 +5,9 @@
import { log, join } from '../helpers'; import { log, join } from '../helpers';
import * as tf from '../../dist/tfjs.esm.js'; import * as tf from '../../dist/tfjs.esm.js';
import { labels } from './labels'; import { labels } from './labels';
import { ObjectResult } from '../result'; import type { ObjectResult } from '../result';
import { GraphModel, Tensor } from '../tfjs/types'; import type { GraphModel, Tensor } from '../tfjs/types';
import { Config } from '../config'; import type { Config } from '../config';
import { env } from '../env'; import { env } from '../env';
let model; let model;
@ -36,6 +36,7 @@ async function process(res: Tensor, inputSize, outputShape, config: Config) {
tf.dispose(squeezeT); tf.dispose(squeezeT);
const stackT = tf.stack([arr[1], arr[0], arr[3], arr[2]], 1); // reorder dims as tf.nms expects y, x const stackT = tf.stack([arr[1], arr[0], arr[3], arr[2]], 1); // reorder dims as tf.nms expects y, x
const boxesT = tf.squeeze(stackT); const boxesT = tf.squeeze(stackT);
tf.dispose(stackT);
const scoresT = tf.squeeze(arr[4]); const scoresT = tf.squeeze(arr[4]);
const classesT = tf.squeeze(arr[5]); const classesT = tf.squeeze(arr[5]);
arr.forEach((t) => tf.dispose(t)); arr.forEach((t) => tf.dispose(t));
@ -86,6 +87,7 @@ export async function predict(input: Tensor, config: Config): Promise<ObjectResu
const obj = await process(objectT, model.inputSize, outputSize, config); const obj = await process(objectT, model.inputSize, outputSize, config);
last = obj; last = obj;
resolve(obj); resolve(obj);
}); });
} }

View File

@ -5,9 +5,9 @@
import { log, join } from '../helpers'; import { log, join } from '../helpers';
import * as tf from '../../dist/tfjs.esm.js'; import * as tf from '../../dist/tfjs.esm.js';
import { labels } from './labels'; import { labels } from './labels';
import { ObjectResult } from '../result'; import type { ObjectResult } from '../result';
import { GraphModel, Tensor } from '../tfjs/types'; import type { GraphModel, Tensor } from '../tfjs/types';
import { Config } from '../config'; import type { Config } from '../config';
import { env } from '../env'; import { env } from '../env';
let model; let model;

View File

@ -2,7 +2,7 @@
* Module that analyzes existing results and recombines them into a unified person object * Module that analyzes existing results and recombines them into a unified person object
*/ */
import { FaceResult, BodyResult, HandResult, GestureResult, PersonResult } from './result'; import type { FaceResult, BodyResult, HandResult, GestureResult, PersonResult } from './result';
export function join(faces: Array<FaceResult>, bodies: Array<BodyResult>, hands: Array<HandResult>, gestures: Array<GestureResult>, shape: Array<number> | undefined): Array<PersonResult> { export function join(faces: Array<FaceResult>, bodies: Array<BodyResult>, hands: Array<HandResult>, gestures: Array<GestureResult>, shape: Array<number> | undefined): Array<PersonResult> {
let id = 0; let id = 0;

View File

@ -1,5 +1,5 @@
import * as kpt from './keypoints'; import * as kpt from './keypoints';
import { BodyResult } from '../result'; import type { BodyResult } from '../result';
export function eitherPointDoesntMeetConfidence(a: number, b: number, minConfidence: number) { export function eitherPointDoesntMeetConfidence(a: number, b: number, minConfidence: number) {
return (a < minConfidence || b < minConfidence); return (a < minConfidence || b < minConfidence);

View File

@ -2,8 +2,8 @@
* Type definitions for Human result object * Type definitions for Human result object
*/ */
import { Tensor } from './tfjs/types'; import type { Tensor } from './tfjs/types';
import { FaceGesture, BodyGesture, HandGesture, IrisGesture } from './gesture/gesture'; import type { FaceGesture, BodyGesture, HandGesture, IrisGesture } from './gesture/gesture';
/** Face results /** Face results
* Combined results of face detector, face mesh, age, gender, emotion, embedding, iris models * Combined results of face detector, face mesh, age, gender, emotion, embedding, iris models
@ -186,7 +186,7 @@ export interface Result {
/** global performance object with timing values for each operation */ /** global performance object with timing values for each operation */
performance: Record<string, unknown>, performance: Record<string, unknown>,
/** optional processed canvas that can be used to draw input on screen */ /** optional processed canvas that can be used to draw input on screen */
canvas?: OffscreenCanvas | HTMLCanvasElement | null, canvas?: OffscreenCanvas | HTMLCanvasElement | null | undefined,
/** timestamp of detection representing the milliseconds elapsed since the UNIX epoch */ /** timestamp of detection representing the milliseconds elapsed since the UNIX epoch */
readonly timestamp: number, readonly timestamp: number,
/** getter property that returns unified persons object */ /** getter property that returns unified persons object */

View File

@ -5,8 +5,9 @@
import { log, join } from '../helpers'; import { log, join } from '../helpers';
import * as tf from '../../dist/tfjs.esm.js'; import * as tf from '../../dist/tfjs.esm.js';
import * as image from '../image/image'; import * as image from '../image/image';
import { GraphModel, Tensor } from '../tfjs/types'; import type { GraphModel, Tensor } from '../tfjs/types';
import { Config } from '../config'; import type { Config } from '../config';
import { env } from '../env';
type Input = Tensor | typeof Image | ImageData | ImageBitmap | HTMLImageElement | HTMLMediaElement | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas; type Input = Tensor | typeof Image | ImageData | ImageBitmap | HTMLImageElement | HTMLMediaElement | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas;
@ -36,6 +37,7 @@ export async function predict(input: { tensor: Tensor | null, canvas: OffscreenC
tf.dispose(norm); tf.dispose(norm);
const squeeze = tf.squeeze(res, 0); const squeeze = tf.squeeze(res, 0);
tf.dispose(res);
let resizeOutput; let resizeOutput;
if (squeeze.shape[2] === 2) { if (squeeze.shape[2] === 2) {
// model meet has two channels for fg and bg // model meet has two channels for fg and bg
@ -57,16 +59,19 @@ export async function predict(input: { tensor: Tensor | null, canvas: OffscreenC
} else { // model selfie has a single channel that we can use directly } else { // model selfie has a single channel that we can use directly
resizeOutput = tf.image.resizeBilinear(squeeze, [width, height]); resizeOutput = tf.image.resizeBilinear(squeeze, [width, height]);
} }
tf.dispose(squeeze);
if (typeof document === 'undefined') return resizeOutput.data(); // we're running in nodejs so return alpha array as-is if (env.node) {
const data = await resizeOutput.data();
tf.dispose(resizeOutput);
return data; // we're running in nodejs so return alpha array as-is
}
const overlay = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(width, height) : document.createElement('canvas'); const overlay = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(width, height) : document.createElement('canvas');
overlay.width = width; overlay.width = width;
overlay.height = height; overlay.height = height;
if (tf.browser) await tf.browser.toPixels(resizeOutput, overlay); if (tf.browser) await tf.browser.toPixels(resizeOutput, overlay);
tf.dispose(resizeOutput); tf.dispose(resizeOutput);
tf.dispose(squeeze);
tf.dispose(res);
// get alpha channel data // get alpha channel data
const alphaCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(width, height) : document.createElement('canvas'); // need one more copy since input may already have gl context so 2d context fails const alphaCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(width, height) : document.createElement('canvas'); // need one more copy since input may already have gl context so 2d context fails

View File

@ -1,8 +1,10 @@
import { log, now, mergeDeep } from './helpers'; import { log, now, mergeDeep } from './helpers';
import * as sample from './sample'; import * as sample from './sample';
import * as tf from '../dist/tfjs.esm.js'; import * as tf from '../dist/tfjs.esm.js';
import { Config } from './config'; import * as image from './image/image';
import { Result } from './result'; import type { Config } from './config';
import type { Result } from './result';
import { env } from './env';
async function warmupBitmap(instance) { async function warmupBitmap(instance) {
const b64toBlob = (base64: string, type = 'application/octet-stream') => fetch(`data:${type};base64,${base64}`).then((res) => res.blob()); const b64toBlob = (base64: string, type = 'application/octet-stream') => fetch(`data:${type};base64,${base64}`).then((res) => res.blob());
@ -24,31 +26,38 @@ async function warmupBitmap(instance) {
async function warmupCanvas(instance) { async function warmupCanvas(instance) {
return new Promise((resolve) => { return new Promise((resolve) => {
let src; let src;
let size = 0; // let size = 0;
switch (instance.config.warmup) { switch (instance.config.warmup) {
case 'face': case 'face':
size = 256; // size = 256;
src = 'data:image/jpeg;base64,' + sample.face; src = 'data:image/jpeg;base64,' + sample.face;
break; break;
case 'full': case 'full':
case 'body': case 'body':
size = 1200; // size = 1200;
src = 'data:image/jpeg;base64,' + sample.body; src = 'data:image/jpeg;base64,' + sample.body;
break; break;
default: default:
src = null; src = null;
} }
// src = encodeURI('../assets/human-sample-upper.jpg'); // src = encodeURI('../assets/human-sample-upper.jpg');
const img = new Image(); let img;
if (typeof Image !== 'undefined') img = new Image();
// @ts-ignore env.image is an external monkey-patch
else if (env.Image) img = new env.Image();
img.onload = async () => { img.onload = async () => {
const canvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(size, size) : document.createElement('canvas'); const canvas = image.canvas(img.naturalWidth, img.naturalHeight);
canvas.width = img.naturalWidth; if (!canvas) {
canvas.height = img.naturalHeight; log('Warmup: Canvas not found');
const ctx = canvas.getContext('2d'); resolve({});
ctx?.drawImage(img, 0, 0); } else {
// const data = ctx?.getImageData(0, 0, canvas.height, canvas.width); const ctx = canvas.getContext('2d');
const res = await instance.detect(canvas, instance.config); ctx.drawImage(img, 0, 0);
resolve(res); // const data = ctx?.getImageData(0, 0, canvas.height, canvas.width);
const tensor = await instance.image(canvas);
const res = await instance.detect(tensor.tensor, instance.config);
resolve(res);
}
}; };
if (src) img.src = src; if (src) img.src = src;
else resolve(null); else resolve(null);
@ -93,7 +102,7 @@ export async function warmup(instance, userConfig?: Partial<Config>): Promise<Re
if (!instance.config.warmup || instance.config.warmup === 'none') return { error: 'null' }; if (!instance.config.warmup || instance.config.warmup === 'none') return { error: 'null' };
let res; let res;
if (typeof createImageBitmap === 'function') res = await warmupBitmap(instance); if (typeof createImageBitmap === 'function') res = await warmupBitmap(instance);
else if (typeof Image !== 'undefined') res = await warmupCanvas(instance); else if (typeof Image !== 'undefined' || env.Canvas !== undefined) res = await warmupCanvas(instance);
else res = await warmupNode(instance); else res = await warmupNode(instance);
const t1 = now(); const t1 = now();
if (instance.config.debug) log('Warmup', instance.config.warmup, Math.round(t1 - t0), 'ms'); if (instance.config.debug) log('Warmup', instance.config.warmup, Math.round(t1 - t0), 'ms');

View File

@ -5,7 +5,7 @@ const config = {
modelBasePath: 'file://models/', modelBasePath: 'file://models/',
backend: 'tensorflow', backend: 'tensorflow',
debug: false, debug: false,
async: false, async: true,
face: { face: {
enabled: true, enabled: true,
detector: { enabled: true, rotation: true }, detector: { enabled: true, rotation: true },

View File

@ -1,10 +1,15 @@
const tf = require('@tensorflow/tfjs/dist/tf.node.js'); // wasm backend requires tfjs to be loaded first const tf = require('@tensorflow/tfjs'); // wasm backend requires tfjs to be loaded first
const wasm = require('@tensorflow/tfjs-backend-wasm/dist/tf-backend-wasm.node.js'); // wasm backend does not get auto-loaded in nodejs const wasm = require('@tensorflow/tfjs-backend-wasm'); // wasm backend does not get auto-loaded in nodejs
const Human = require('../dist/human.node-wasm.js').default; const { Canvas, Image } = require('canvas');
const Human = require('../dist/human.node-wasm.js');
const test = require('./test-main.js').test; const test = require('./test-main.js').test;
Human.env.Canvas = Canvas;
Human.env.Image = Image;
const config = { const config = {
modelBasePath: 'http://localhost:10030/models/', // modelBasePath: 'http://localhost:10030/models/',
modelBasePath: 'https://vladmandic.github.io/human/models/',
backend: 'wasm', backend: 'wasm',
wasmPath: 'node_modules/@tensorflow/tfjs-backend-wasm/dist/', wasmPath: 'node_modules/@tensorflow/tfjs-backend-wasm/dist/',
// wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.9.0/dist/', // wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.9.0/dist/',
@ -20,12 +25,17 @@ const config = {
}, },
hand: { enabled: true, rotation: false }, hand: { enabled: true, rotation: false },
body: { enabled: true }, body: { enabled: true },
object: { enabled: true }, object: { enabled: false },
segmentation: { enabled: true }, segmentation: { enabled: true },
filter: { enabled: false }, filter: { enabled: false },
}; };
// @ts-ignore // in nodejs+wasm must set explicitly before using human async function main() {
wasm.setWasmPaths(config.wasmPath); tf.setBackend('wasm'); wasm.setWasmPaths(config.wasmPath);
await tf.setBackend('wasm');
await tf.ready();
test(Human.Human, config);
}
test(Human, config); main();
// @ts-ignore // in nodejs+wasm must set explicitly before using human

View File

@ -14,10 +14,10 @@ const config = {
description: { enabled: true }, description: { enabled: true },
emotion: { enabled: true }, emotion: { enabled: true },
}, },
hand: { enabled: true, rotation: true }, hand: { enabled: false, rotation: true },
body: { enabled: true }, body: { enabled: false },
object: { enabled: true }, object: { enabled: false },
segmentation: { enabled: true }, segmentation: { enabled: false },
filter: { enabled: false }, filter: { enabled: false },
}; };

View File

@ -19,6 +19,8 @@ const ignoreMessages = [
'cudart_stub.cc', 'cudart_stub.cc',
'cuda_driver.cc:326', 'cuda_driver.cc:326',
'cpu_allocator_impl.cc', 'cpu_allocator_impl.cc',
'--trace-warnings',
'ExperimentalWarning',
]; ];
const status = { const status = {
@ -48,8 +50,9 @@ function logStdIO(ok, test, buffer) {
} }
async function runTest(test) { async function runTest(test) {
log.info();
log.info(test, 'start');
return new Promise((resolve) => { return new Promise((resolve) => {
log.info(test, 'start');
const child = fork(path.join(__dirname, test), [], { silent: true }); const child = fork(path.join(__dirname, test), [], { silent: true });
child.on('message', (data) => logMessage(test, data)); child.on('message', (data) => logMessage(test, data));
child.on('error', (data) => log.error(test, ':', data.message || data)); child.on('error', (data) => log.error(test, ':', data.message || data));
@ -68,6 +71,7 @@ async function testAll() {
process.on('uncaughtException', (data) => log.error('nodejs unhandled exception', data)); process.on('uncaughtException', (data) => log.error('nodejs unhandled exception', data));
log.info('tests:', tests); log.info('tests:', tests);
for (const test of tests) await runTest(test); for (const test of tests) await runTest(test);
log.info();
log.info('status:', status); log.info('status:', status);
} }