initial automated browser tests

pull/356/head
Vladimir Mandic 2021-09-20 17:17:13 -04:00
parent 384d94c0cb
commit 04fcbc7e6a
9 changed files with 189 additions and 18 deletions

View File

@ -24,7 +24,7 @@
"start": "node --no-warnings demo/nodejs/node.js",
"dev": "build --profile development",
"build": "rimraf test/build.log && build --profile production",
"test": "node --no-warnings --unhandled-rejections=strict --trace-uncaught test/test.js",
"test": "node --no-warnings --unhandled-rejections=strict --trace-uncaught test/node.js",
"lint": "eslint src demo test",
"scan": "npx auditjs@latest ossi --dev --quiet"
},

View File

@ -216,10 +216,10 @@ export interface Config {
*/
cacheSensitivity: number;
/** Cache sensitivity
* - values 0..1 where 0.01 means reset cache if input changed more than 1%
* - set to 0 to disable caching
*/
/** Yield to main thread periodically */
yield: boolean;
/** Internal Variable */
skipFrame: boolean;
/** Run input through image filters before inference
@ -262,6 +262,7 @@ const config: Config = {
cacheSensitivity: 0.75, // cache sensitivity
// values 0..1 where 0.01 means reset cache if input changed more than 1%
// set to 0 to disable caching
yield: false, // yield to main thread periodically
skipFrame: false, // internal & dynamic
filter: { // run input through image filters before inference
// image filters run with near-zero latency as they are executed on the GPU

View File

@ -1,7 +1,8 @@
import * as tf from '../dist/tfjs.esm.js';
import * as image from './image/image';
import { mergeDeep } from './helpers';
export interface Env {
export type Env = {
browser: undefined | boolean,
node: undefined | boolean,
worker: undefined | boolean,
@ -12,6 +13,7 @@ export interface Env {
tfjs: {
version: undefined | string,
},
offscreen: undefined | boolean,
wasm: {
supported: undefined | boolean,
backend: undefined | boolean,
@ -34,7 +36,8 @@ export interface Env {
Image: undefined,
}
export const env: Env = {
// eslint-disable-next-line import/no-mutable-exports
export let env: Env = {
browser: undefined,
node: undefined,
worker: undefined,
@ -42,6 +45,7 @@ export const env: Env = {
agent: undefined,
initial: true,
backends: [],
offscreen: undefined,
tfjs: {
version: undefined,
},
@ -127,6 +131,8 @@ export async function get() {
env.worker = env.browser ? (typeof WorkerGlobalScope !== 'undefined') : undefined;
env.tfjs.version = tf.version_core;
// offscreencanvas supported?
env.offscreen = typeof env.offscreen === 'undefined' ? typeof OffscreenCanvas !== undefined : env.offscreen;
// get platform and agent
if (typeof navigator !== 'undefined') {
const raw = navigator.userAgent.match(/\(([^()]+)\)/g);
@ -141,9 +147,12 @@ export async function get() {
env.platform = `${process.platform} ${process.arch}`;
env.agent = `NodeJS ${process.version}`;
}
await backendInfo();
// get cpu info
// await cpuInfo();
}
export async function set(obj) {
env = mergeDeep(env, obj);
}

View File

@ -63,3 +63,9 @@ export const minmax = (data: Array<number>) => data.reduce((acc: Array<number>,
acc[1] = (acc[1] === undefined || val > acc[1]) ? val : acc[1];
return acc;
}, []);
// helper function: async wait
export async function wait(time) {
const waiting = new Promise((resolve) => setTimeout(() => resolve(true), time));
await waiting;
}

View File

@ -2,7 +2,7 @@
* Human main module
*/
import { log, now, mergeDeep, validate } from './helpers';
import { log, now, mergeDeep, validate, wait } from './helpers';
import { Config, defaults } from './config';
import type { Result, FaceResult, HandResult, BodyResult, ObjectResult, GestureResult, PersonResult } from './result';
import * as tf from '../dist/tfjs.esm.js';
@ -35,7 +35,7 @@ import type { DrawOptions } from './draw';
export * from './config';
export * from './result';
export type { DrawOptions } from './draw';
export { env } from './env';
export { env, Env } from './env';
/** Defines all possible input types for **Human** detection
* @typedef Input Type
@ -320,6 +320,7 @@ export class Human {
*/
init() {
backend.check(this);
env.set(this.env);
}
/** Load method preloads all configured models on-demand
@ -395,6 +396,7 @@ export class Human {
*/
async detect(input: Input, userConfig?: Partial<Config>): Promise<Result | Error> {
// detection happens inside a promise
if (this.config.yield) await wait(1);
return new Promise(async (resolve) => {
this.state = 'config';
let timeStamp;
@ -419,6 +421,7 @@ export class Human {
// load models if enabled
await this.load();
if (this.config.yield) await wait(1);
timeStamp = now();
let img = image.process(input, this.config);
this.process = img;
@ -465,11 +468,11 @@ export class Human {
let objectRes: ObjectResult[] | Promise<ObjectResult[]> | never[] = [];
// run face detection followed by all models that rely on face bounding box: face mesh, age, gender, emotion
this.state = 'run:face';
if (this.config.async) {
faceRes = this.config.face.enabled ? face.detectFace(this, img.tensor) : [];
if (this.performance.face) delete this.performance.face;
} else {
this.state = 'run:face';
timeStamp = now();
faceRes = this.config.face.enabled ? await face.detectFace(this, img.tensor) : [];
elapsedTime = Math.trunc(now() - timeStamp);
@ -478,6 +481,7 @@ export class Human {
// run body: can be posenet, blazepose, efficientpose, movenet
this.analyze('Start Body:');
this.state = 'run:body';
if (this.config.async) {
if (this.config.body.modelPath?.includes('posenet')) bodyRes = this.config.body.enabled ? posenet.predict(img.tensor, this.config) : [];
else if (this.config.body.modelPath?.includes('blazepose')) bodyRes = this.config.body.enabled ? blazepose.predict(img.tensor, this.config) : [];
@ -485,7 +489,6 @@ export class Human {
else if (this.config.body.modelPath?.includes('movenet')) bodyRes = this.config.body.enabled ? movenet.predict(img.tensor, this.config) : [];
if (this.performance.body) delete this.performance.body;
} else {
this.state = 'run:body';
timeStamp = now();
if (this.config.body.modelPath?.includes('posenet')) bodyRes = this.config.body.enabled ? await posenet.predict(img.tensor, this.config) : [];
else if (this.config.body.modelPath?.includes('blazepose')) bodyRes = this.config.body.enabled ? await blazepose.predict(img.tensor, this.config) : [];
@ -498,11 +501,11 @@ export class Human {
// run handpose
this.analyze('Start Hand:');
this.state = 'run:hand';
if (this.config.async) {
handRes = this.config.hand.enabled ? handpose.predict(img.tensor, this.config) : [];
if (this.performance.hand) delete this.performance.hand;
} else {
this.state = 'run:hand';
timeStamp = now();
handRes = this.config.hand.enabled ? await handpose.predict(img.tensor, this.config) : [];
elapsedTime = Math.trunc(now() - timeStamp);
@ -512,12 +515,12 @@ export class Human {
// run nanodet
this.analyze('Start Object:');
this.state = 'run:object';
if (this.config.async) {
if (this.config.object.modelPath?.includes('nanodet')) objectRes = this.config.object.enabled ? nanodet.predict(img.tensor, this.config) : [];
else if (this.config.object.modelPath?.includes('centernet')) objectRes = this.config.object.enabled ? centernet.predict(img.tensor, this.config) : [];
if (this.performance.object) delete this.performance.object;
} else {
this.state = 'run:object';
timeStamp = now();
if (this.config.object.modelPath?.includes('nanodet')) objectRes = this.config.object.enabled ? await nanodet.predict(img.tensor, this.config) : [];
else if (this.config.object.modelPath?.includes('centernet')) objectRes = this.config.object.enabled ? await centernet.predict(img.tensor, this.config) : [];
@ -527,9 +530,12 @@ export class Human {
this.analyze('End Object:');
// if async wait for results
this.state = 'run:await';
if (this.config.yield) await wait(1);
if (this.config.async) [faceRes, bodyRes, handRes, objectRes] = await Promise.all([faceRes, bodyRes, handRes, objectRes]);
// run gesture analysis last
this.state = 'run:gesture';
let gestureRes: GestureResult[] = [];
if (this.config.gesture.enabled) {
timeStamp = now();
@ -539,7 +545,6 @@ export class Human {
}
this.performance.total = Math.trunc(now() - timeStart);
this.state = 'idle';
const shape = this.process?.tensor?.shape || [];
this.result = {
face: faceRes as FaceResult[],
@ -558,6 +563,7 @@ export class Human {
// log('Result:', result);
this.emit('detect');
this.state = 'idle';
resolve(this.result);
});
}

View File

@ -21,7 +21,7 @@ let fx: fxImage.GLImageFilter | null; // instance of imagefx
export function canvas(width, height): HTMLCanvasElement | OffscreenCanvas {
let c;
if (env.browser) {
if (typeof OffscreenCanvas !== 'undefined') {
if (env.offscreen) {
c = new OffscreenCanvas(width, height);
} else {
c = document.createElement('canvas');
@ -63,6 +63,7 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
}
if (input instanceof tf.Tensor) {
// if input is tensor, use as-is
if (input.isDisposed) throw new Error('input tensor is disposed');
if ((input as unknown as Tensor).shape && (input as unknown as Tensor).shape.length === 4 && (input as unknown as Tensor).shape[0] === 1 && (input as unknown as Tensor).shape[3] === 3) tensor = tf.clone(input);
else throw new Error(`input tensor shape must be [1, height, width, 3] and instead was ${(input as unknown as Tensor).shape}`);
} else {

View File

@ -45,6 +45,7 @@ function extensions(): void {
*/
export async function register(instance): Promise<void> {
// force backend reload if gl context is not valid
if (instance.config.backend !== 'humangl') return;
if ((config.name in tf.engine().registry) && (!config.gl || !config.gl.getParameter(config.gl.VERSION))) {
log('error: humangl backend invalid context');
models.reset(instance);
@ -95,11 +96,12 @@ export async function register(instance): Promise<void> {
log('error: cannot set WebGL context:', err);
return;
}
const current = tf.backend().getGPGPUContext().gl;
const current = tf.backend().getGPGPUContext ? tf.backend().getGPGPUContext().gl : null;
if (current) {
log(`humangl webgl version:${current.getParameter(current.VERSION)} renderer:${current.getParameter(current.RENDERER)}`);
} else {
log('error: no current context:', current, config.gl);
log('error: no current gl context:', current, config.gl);
return;
}
try {
const ctx = new tf.GPGPUContext(config.gl);

146
test/browser.html Normal file
View File

@ -0,0 +1,146 @@
<!DOCTYPE html>
<html lang="en">
<head>
<title>Human Browser Tests</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="viewport" content="width=device-width, shrink-to-fit=yes">
<meta name="keywords" content="Human">
<meta name="application-name" content="Human">
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
<link rel="apple-touch-icon" href="../../assets/icon.png">
<style>
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
html { font-family: 'Lato', 'Segoe UI'; font-size: 14px; font-variant: small-caps; }
body { margin: 0; background: black; color: white; }
canvas { position: absolute; bottom: 10px; right: 10px; width: 256px; height: 256px; }
pre { line-height: 150%; }
.events { position: absolute; top: 10px; right: 10px; width: 12rem; height: 1.25rem; background-color: grey; padding: 8px; }
.state { position: absolute; top: 60px; right: 10px; width: 12rem; height: 1.25rem; background-color: grey; padding: 8px; }
</style>
</head>
<body>
<pre id="log"></pre>
<div id="events" class="events"></div>
<div id="state" class="state"></div>
<script type="module">
import Human from '../dist/human.esm.js';
const config = {
async: true,
warmup: 'full',
debug: true,
cacheSensitivity: 0,
object: { enabled: true },
}
const backends = ['wasm', 'webgl', 'humangl'];
// const backends = ['humangl'];
const start = performance.now();
function str(...msg) {
if (!Array.isArray(msg)) return msg;
let line = '';
for (const entry of msg) {
if (typeof entry === 'object') line += JSON.stringify(entry).replace(/"/g, '').replace(/,/g, ', ').replace(/:/g, ': ');
else line += entry;
}
return line + '\n';
}
async function log(...msgs) {
document.getElementById('log').innerHTML += str(...msgs);
console.log(...msgs);
}
async function image(url) {
const el = document.createElement('img');
el.id = 'image';
const loaded = new Promise((resolve) => { el.onload = () => resolve(true) });
el.src = url;
await loaded;
return el;
}
async function wait(time) {
const waiting = new Promise((resolve) => setTimeout(() => resolve(), time));
await waiting;
}
async function events(event) {
document.getElementById('events').innerText = `${Math.round(performance.now() - start)}ms Event: ${event}`;
}
async function main() {
log('human tests');
let res;
let human = new Human(config);
setInterval(() => { document.getElementById('state').innerText = `State: ${human.state}`; }, 10);
log({ version: human.version });
log({ env: human.env });
log({ config: human.config });
for (const backend of backends) {
log('');
log('test start:', backend);
human.config.backend = backend;
human = new Human(config);
human.events.addEventListener('warmup', () => events('warmup'));
human.events.addEventListener('image', () => events('image'));
human.events.addEventListener('detect', () => events('detect'));
await human.load();
human.env.offscreen = false;
human.env.initial = false;
await human.init();
log({ tfjs: human.tf.version.tfjs, backend: human.tf.getBackend() });
const models = Object.keys(human.models).map((model) => ({ name: model, loaded: (human.models[model] !== null) }));
log({ models: { models }});
log({ memory: human.tf.engine().state });
res = await human.validate();
log({ validate: res });
res = await human.warmup();
log({ warmup: res });
let img = await image('../../samples/ai-body.jpg');
const input = await human.image(img);
let node = document.body.appendChild(res.canvas);
await wait(100);
log({ input });
res = await human.detect(input.tensor);
log({ detect: res});
const interpolated = human.next();
log({ interpolated });
const persons = res.persons;
log({ persons: { persons } });
log({ summary: { persons: persons.length, face: res.face.length, body: res.body.length, hand: res.hand.length, object: res.object.length, gesture: res.gesture.length }});
log({ performance: human.performance });
human.tf.dispose(input.tensor);
document.body.removeChild(node);
await wait(100);
img = await image('../../samples/ai-face.jpg');
human.reset();
human.config.backend = backend;
for (const val of [0, 0.25, 0.5, 0.75, 10]) {
human.performance = {};
const t0 = performance.now();
for (let i = 0; i < 10; i++) {
res = await human.detect(img, { cacheSensitivity: val, filter: { pixelate: 5 * i } });
node = document.body.appendChild(res.canvas);
}
const t1 = performance.now();
log({ benchmark: { time: Math.round((t1 - t0) / 10), cacheSensitivity: val }, performance: human.performance });
await wait(100);
}
document.body.removeChild(node);
log({ memory: human.tf.engine().state });
}
log('');
log('tests complete');
}
main();
</script>
</body>
</html>