experimental webgl status monitoring

pull/356/head
Vladimir Mandic 2021-09-17 11:23:00 -04:00
parent 75630a7aa3
commit 8dba39245d
17 changed files with 187 additions and 96 deletions

View File

@ -9,27 +9,16 @@ let busy = false;
// eslint-disable-next-line no-undef, new-cap
const human = new Human.default();
function log(...msg) {
const dt = new Date();
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
// eslint-disable-next-line no-console
if (msg) console.log(ts, 'Human:', ...msg);
}
onmessage = async (msg) => {
onmessage = async (msg) => { // receive message from main thread
if (busy) return;
busy = true;
// received from index.js using:
// worker.postMessage({ image: image.data.buffer, width: canvas.width, height: canvas.height, config }, [image.data.buffer]);
const image = new ImageData(new Uint8ClampedArray(msg.data.image), msg.data.width, msg.data.height);
let result = {};
try {
result = await human.detect(image, msg.data.userConfig);
} catch (err) {
result.error = err.message;
log('worker thread error:', err.message);
}
result = await human.detect(image, msg.data.userConfig);
result.tensors = human.tf.engine().state.numTensors; // append to result object so main thread get info
result.backend = human.tf.getBackend(); // append to result object so main thread get info
if (result.canvas) { // convert canvas to imageData and send it by reference
const canvas = new OffscreenCanvas(result.canvas.width, result.canvas.height);
const ctx = canvas.getContext('2d');
@ -37,9 +26,9 @@ onmessage = async (msg) => {
const img = ctx ? ctx.getImageData(0, 0, result.canvas.width, result.canvas.height) : null;
result.canvas = null; // must strip original canvas from return value as it cannot be transfered from worker thread
if (img) postMessage({ result, image: img.data.buffer, width: msg.data.width, height: msg.data.height }, [img.data.buffer]);
else postMessage({ result });
else postMessage({ result }); // send message back to main thread with canvas
} else {
postMessage({ result });
postMessage({ result }); // send message back to main thread without canvas
}
busy = false;
};

View File

@ -33,6 +33,7 @@ let human;
let userConfig = {
warmup: 'none',
backend: 'humangl',
debug: true,
/*
wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.9.0/dist/',
async: false,
@ -176,6 +177,20 @@ function status(msg) {
}
}
async function videoPlay() {
document.getElementById('btnStartText').innerHTML = 'pause video';
await document.getElementById('video').play();
status();
}
async function videoPause() {
document.getElementById('btnStartText').innerHTML = 'start video';
await document.getElementById('video').pause();
status('paused');
document.getElementById('play').style.display = 'block';
document.getElementById('loader').style.display = 'none';
}
const compare = { enabled: false, original: null };
async function calcSimmilarity(result) {
document.getElementById('compare-container').style.display = compare.enabled ? 'block' : 'none';
@ -280,7 +295,7 @@ async function drawResults(input) {
const avgDraw = ui.drawFPS.length > 0 ? Math.trunc(10 * ui.drawFPS.reduce((a, b) => a + b, 0) / ui.drawFPS.length) / 10 : 0;
const warning = (ui.detectFPS.length > 5) && (avgDetect < 2) ? '<font color="lightcoral">warning: your performance is low: try switching to higher performance backend, lowering resolution or disabling some models</font>' : '';
const fps = avgDetect > 0 ? `FPS process:${avgDetect} refresh:${avgDraw}` : '';
const backend = engine.state.numTensors > 0 ? `backend: ${human.tf.getBackend()} | ${memory}` : 'running in web worker';
const backend = engine.state.numTensors > 0 ? `${human.tf.getBackend()} | ${memory}` : `${result.backend} | tensors: ${result.tensors} in worker`;
document.getElementById('log').innerHTML = `
video: ${ui.camera.name} | facing: ${ui.camera.facing} | screen: ${window.innerWidth} x ${window.innerHeight} camera: ${ui.camera.width} x ${ui.camera.height} ${processing}<br>
backend: ${backend}<br>
@ -387,7 +402,7 @@ async function setupCamera() {
canvas.height = video.videoHeight;
ui.menuWidth.input.setAttribute('value', video.videoWidth);
ui.menuHeight.input.setAttribute('value', video.videoHeight);
if (live || ui.autoPlay) video.play();
if (live || ui.autoPlay) videoPlay();
// eslint-disable-next-line no-use-before-define
if ((live || ui.autoPlay) && !ui.detectThread) runHumanDetect(video, canvas);
ui.busy = false;
@ -485,8 +500,20 @@ function runHumanDetect(input, canvas, timestamp) {
// perform detection in worker
webWorker(input, data, canvas, timestamp);
} else {
if (human.env.initial) status('starting detection');
else status();
human.detect(input, userConfig).then((result) => {
status();
/*
setTimeout(async () => { // simulate gl context lost 2sec after initial detection
const ext = human.gl && human.gl.gl ? human.gl.gl.getExtension('WEBGL_lose_context') : {};
if (ext && ext.loseContext) {
log('simulate context lost:', human.env.webgl, human.gl, ext);
human.gl.gl.getExtension('WEBGL_lose_context').loseContext();
await videoPause();
status('Exception: WebGL');
}
}, 2000);
*/
if (result.performance && result.performance.total) ui.detectFPS.push(1000 / result.performance.total);
if (ui.detectFPS.length > ui.maxFPSframes) ui.detectFPS.shift();
if (ui.bench) {
@ -588,10 +615,8 @@ async function processVideo(input, title) {
video.addEventListener('canplay', async () => {
for (const m of Object.values(menu)) m.hide();
document.getElementById('samples-container').style.display = 'none';
document.getElementById('play').style.display = 'none';
canvas.style.display = 'block';
document.getElementById('btnStartText').innerHTML = 'pause video';
await video.play();
await videoPlay();
if (!ui.detectThread) runHumanDetect(video, canvas);
});
video.src = input;
@ -605,17 +630,14 @@ async function detectVideo() {
canvas.style.display = 'block';
cancelAnimationFrame(ui.detectThread);
if ((video.srcObject !== null) && !video.paused) {
document.getElementById('btnStartText').innerHTML = 'start video';
status('paused');
await video.pause();
await videoPause();
// if (ui.drawThread) cancelAnimationFrame(ui.drawThread);
} else {
const cameraError = await setupCamera();
if (!cameraError) {
status('starting detection');
for (const m of Object.values(menu)) m.hide();
document.getElementById('btnStartText').innerHTML = 'pause video';
await video.play();
await videoPlay();
runHumanDetect(video, canvas);
} else {
status(cameraError);
@ -904,6 +926,7 @@ async function pwaRegister() {
}
async function main() {
/*
window.addEventListener('unhandledrejection', (evt) => {
// eslint-disable-next-line no-console
console.error(evt.reason || evt);
@ -911,6 +934,7 @@ async function main() {
status('exception error');
evt.preventDefault();
});
*/
log('demo starting ...');
@ -1028,6 +1052,7 @@ async function main() {
}
if (human.config.debug) log('environment:', human.env);
if (human.config.backend === 'humangl' && human.config.debug) log('backend:', human.gl);
}
window.onload = main;

View File

@ -7,19 +7,23 @@ import { log, join } from '../helpers';
import * as tf from '../../dist/tfjs.esm.js';
import type { Config } from '../config';
import type { GraphModel, Tensor } from '../tfjs/types';
import { env } from '../env';
let model: GraphModel;
let model: GraphModel | null;
let last = { age: 0 };
let skipped = Number.MAX_SAFE_INTEGER;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
export async function load(config: Config | any) {
if (env.initial) model = null;
if (!model) {
model = await tf.loadGraphModel(join(config.modelBasePath, config.face.age.modelPath)) as unknown as GraphModel;
if (!model || !model['modelUrl']) log('load model failed:', config.face.age.modelPath);
else if (config.debug) log('load model:', model['modelUrl']);
} else if (config.debug) log('cached model:', model['modelUrl']);
} else {
if (config.debug) log('cached model:', model['modelUrl']);
}
return model;
}
@ -32,7 +36,7 @@ export async function predict(image: Tensor, config: Config | any) {
}
skipped = 0;
return new Promise(async (resolve) => {
if (!model.inputs || !model.inputs[0] || !model.inputs[0].shape) return;
if (!model?.inputs || !model.inputs[0] || !model.inputs[0].shape) return;
const resize = tf.image.resizeBilinear(image, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
const enhance = tf.mul(resize, [255.0]);
tf.dispose(resize);

View File

@ -7,8 +7,9 @@ import * as tf from '../../dist/tfjs.esm.js';
import type { BodyResult } from '../result';
import type { GraphModel, Tensor } from '../tfjs/types';
import type { Config } from '../config';
import { env } from '../env';
let model: GraphModel;
let model: GraphModel | null;
type Keypoints = { score: number, part: string, position: [number, number], positionRaw: [number, number] };
@ -21,6 +22,7 @@ let skipped = Number.MAX_SAFE_INTEGER;
const bodyParts = ['head', 'neck', 'rightShoulder', 'rightElbow', 'rightWrist', 'chest', 'leftShoulder', 'leftElbow', 'leftWrist', 'pelvis', 'rightHip', 'rightKnee', 'rightAnkle', 'leftHip', 'leftKnee', 'leftAnkle'];
export async function load(config: Config): Promise<GraphModel> {
if (env.initial) model = null;
if (!model) {
model = await tf.loadGraphModel(join(config.modelBasePath, config.body.modelPath || '')) as unknown as GraphModel;
if (!model || !model['modelUrl']) log('load model failed:', config.body.modelPath);
@ -54,7 +56,7 @@ export async function predict(image: Tensor, config: Config): Promise<BodyResult
skipped = 0;
return new Promise(async (resolve) => {
const tensor = tf.tidy(() => {
if (!model.inputs[0].shape) return null;
if (!model?.inputs[0].shape) return null;
const resize = tf.image.resizeBilinear(image, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
const enhance = tf.mul(resize, 2);
const norm = enhance.sub(1);
@ -62,7 +64,7 @@ export async function predict(image: Tensor, config: Config): Promise<BodyResult
});
let resT;
if (config.body.enabled) resT = await model.predict(tensor);
if (config.body.enabled) resT = await model?.predict(tensor);
tf.dispose(tensor);
if (resT) {

View File

@ -4,11 +4,12 @@
import { log, join } from '../helpers';
import type { Config } from '../config';
import type { Tensor, GraphModel } from '../tfjs/types';
import type { GraphModel, Tensor } from '../tfjs/types';
import * as tf from '../../dist/tfjs.esm.js';
import { env } from '../env';
const annotations = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral'];
let model;
let model: GraphModel | null;
// let last: Array<{ score: number, emotion: string }> = [];
const last: Array<Array<{ score: number, emotion: string }>> = [];
let lastCount = 0;
@ -18,11 +19,12 @@ let skipped = Number.MAX_SAFE_INTEGER;
const rgb = [0.2989, 0.5870, 0.1140]; // factors for red/green/blue colors when converting to grayscale
export async function load(config: Config): Promise<GraphModel> {
if (env.initial) model = null;
if (!model) {
model = await tf.loadGraphModel(join(config.modelBasePath, config.face.emotion?.modelPath || ''));
if (!model || !model.modelUrl) log('load model failed:', config.face.emotion?.modelPath || '');
else if (config.debug) log('load model:', model.modelUrl);
} else if (config.debug) log('cached model:', model.modelUrl);
model = await tf.loadGraphModel(join(config.modelBasePath, config.face.emotion?.modelPath || '')) as unknown as GraphModel;
if (!model || !model['modelUrl']) log('load model failed:', config.body.modelPath);
else if (config.debug) log('load model:', model['modelUrl']);
} else if (config.debug) log('cached model:', model['modelUrl']);
return model;
}
@ -34,7 +36,7 @@ export async function predict(image: Tensor, config: Config, idx, count) {
}
skipped = 0;
return new Promise(async (resolve) => {
const resize = tf.image.resizeBilinear(image, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
const resize = tf.image.resizeBilinear(image, [model?.inputs[0].shape ? model.inputs[0].shape[2] : 0, model?.inputs[0].shape ? model.inputs[0].shape[1] : 0], false);
const [red, green, blue] = tf.split(resize, 3, 3);
tf.dispose(resize);
// weighted rgb to grayscale: https://www.mathworks.com/help/matlab/ref/rgb2gray.html
@ -52,7 +54,7 @@ export async function predict(image: Tensor, config: Config, idx, count) {
tf.dispose(grayscale);
const obj: Array<{ score: number, emotion: string }> = [];
if (config.face.emotion?.enabled) {
const emotionT = await model.predict(normalize); // result is already in range 0..1, no need for additional activation
const emotionT = await model?.predict(normalize) as Tensor; // result is already in range 0..1, no need for additional activation
const data = await emotionT.data();
tf.dispose(emotionT);
for (let i = 0; i < data.length; i++) {

View File

@ -8,6 +8,7 @@ export interface Env {
platform: undefined | string,
agent: undefined | string,
backends: string[],
initial: boolean,
tfjs: {
version: undefined | string,
},
@ -39,6 +40,7 @@ export const env: Env = {
worker: undefined,
platform: undefined,
agent: undefined,
initial: true,
backends: [],
tfjs: {
version: undefined,

View File

@ -8,8 +8,9 @@ import { log, join } from '../helpers';
import * as tf from '../../dist/tfjs.esm.js';
import type { Tensor, GraphModel } from '../tfjs/types';
import type { Config } from '../config';
import { env } from '../env';
let model: GraphModel;
let model: GraphModel | null;
const last: Array<{
age: number,
gender: string,
@ -24,6 +25,7 @@ type DB = Array<{ name: string, source: string, embedding: number[] }>;
export async function load(config: Config): Promise<GraphModel> {
const modelUrl = join(config.modelBasePath, config.face.description?.modelPath || '');
if (env.initial) model = null;
if (!model) {
model = await tf.loadGraphModel(modelUrl) as unknown as GraphModel;
if (!model) log('load model failed:', config.face.description?.modelPath || '');
@ -66,7 +68,7 @@ export function enhance(input): Tensor {
// do a tight crop of image and resize it to fit the model
const box = [[0.05, 0.15, 0.85, 0.85]]; // empyrical values for top, left, bottom, right
// const box = [[0.0, 0.0, 1.0, 1.0]]; // basically no crop for test
if (!model.inputs[0].shape) return null; // model has no shape so no point continuing
if (!model?.inputs[0].shape) return null; // model has no shape so no point continuing
const crop = (tensor.shape.length === 3)
? tf.image.cropAndResize(tf.expandDims(tensor, 0), box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]) // add batch dimension if missing
: tf.image.cropAndResize(tensor, box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]);
@ -128,7 +130,7 @@ export async function predict(image: Tensor, config: Config, idx, count) {
descriptor: <number[]>[],
};
if (config.face.description?.enabled) resT = await model.predict(enhanced);
if (config.face.description?.enabled) resT = await model?.predict(enhanced);
tf.dispose(enhanced);
if (resT) {

View File

@ -7,8 +7,9 @@ import { log, join } from '../helpers';
import * as tf from '../../dist/tfjs.esm.js';
import type { Config } from '../config';
import type { GraphModel, Tensor } from '../tfjs/types';
import { env } from '../env';
let model: GraphModel;
let model: GraphModel | null;
let last = { gender: '' };
let skipped = Number.MAX_SAFE_INTEGER;
let alternative = false;
@ -18,6 +19,7 @@ const rgb = [0.2989, 0.5870, 0.1140]; // factors for red/green/blue colors when
// eslint-disable-next-line @typescript-eslint/no-explicit-any
export async function load(config: Config | any) {
if (env.initial) model = null;
if (!model) {
model = await tf.loadGraphModel(join(config.modelBasePath, config.face.gender.modelPath)) as unknown as GraphModel;
alternative = model.inputs[0].shape ? model.inputs[0]?.shape[3] === 1 : false;
@ -36,7 +38,7 @@ export async function predict(image: Tensor, config: Config | any) {
}
skipped = 0;
return new Promise(async (resolve) => {
if (!model.inputs[0].shape) return;
if (!model?.inputs[0].shape) return;
const resize = tf.image.resizeBilinear(image, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
let enhance;
if (alternative) {

View File

@ -10,6 +10,7 @@ import * as fingerPose from '../fingerpose/fingerpose';
import type { HandResult } from '../result';
import type { Tensor, GraphModel } from '../tfjs/types';
import type { Config } from '../config';
import { env } from '../env';
const meshAnnotations = {
thumb: [1, 2, 3, 4],
@ -79,6 +80,10 @@ export async function predict(input: Tensor, config: Config): Promise<HandResult
}
export async function load(config: Config): Promise<[GraphModel | null, GraphModel | null]> {
if (env.initial) {
handDetectorModel = null;
handPoseModel = null;
}
if (!handDetectorModel || !handPoseModel) {
[handDetectorModel, handPoseModel] = await Promise.all([
config.hand.enabled ? tf.loadGraphModel(join(config.modelBasePath, config.hand.detector?.modelPath || ''), { fromTFHub: (config.hand.detector?.modelPath || '').includes('tfhub.dev') }) as unknown as GraphModel : null,

View File

@ -25,6 +25,7 @@ import * as persons from './persons';
import * as interpolate from './interpolate';
import * as env from './env';
import * as backend from './tfjs/backend';
import * as humangl from './tfjs/humangl';
import * as app from '../package.json';
import * as warmups from './warmup';
import type { Tensor, GraphModel } from './tfjs/types';
@ -152,7 +153,8 @@ export class Human {
#numTensors: number;
#analyzeMemoryLeaks: boolean;
#checkSanity: boolean;
initial: boolean;
/** WebGL debug info */
gl: Record<string, unknown>;
// definition end
/**
@ -173,7 +175,6 @@ export class Human {
this.#numTensors = 0;
this.#analyzeMemoryLeaks = false;
this.#checkSanity = false;
this.initial = true;
this.performance = { backend: 0, load: 0, image: 0, frames: 0, cached: 0, changed: 0, total: 0, draw: 0 };
this.events = new EventTarget();
// object that contains all initialized models
@ -212,6 +213,8 @@ export class Human {
// export raw access to underlying models
this.faceTriangulation = facemesh.triangulation;
this.faceUVMap = facemesh.uvmap;
// set gl info
this.gl = humangl.config;
// include platform info
this.emit('create');
}
@ -303,7 +306,7 @@ export class Human {
const count = Object.values(this.models).filter((model) => model).length;
if (userConfig) this.config = mergeDeep(this.config, userConfig) as Config;
if (this.initial) { // print version info on first run and check for correct backend setup
if (env.env.initial) { // print version info on first run and check for correct backend setup
if (this.config.debug) log(`version: ${this.version}`);
if (this.config.debug) log(`tfjs version: ${this.tf.version_core}`);
await backend.check(this);
@ -315,9 +318,8 @@ export class Human {
}
await models.load(this); // actually loads models
if (this.initial && this.config.debug) log('tf engine state:', this.tf.engine().state.numBytes, 'bytes', this.tf.engine().state.numTensors, 'tensors'); // print memory stats on first run
this.initial = false;
if (env.env.initial && this.config.debug) log('tf engine state:', this.tf.engine().state.numBytes, 'bytes', this.tf.engine().state.numTensors, 'tensors'); // print memory stats on first run
env.env.initial = false;
const loaded = Object.values(this.models).filter((model) => model).length;
if (loaded !== count) { // number of loaded models changed

View File

@ -18,7 +18,7 @@ let outCanvas;
// @ts-ignore // imagefx is js module that should be converted to a class
let fx: fxImage.GLImageFilter | null; // instance of imagefx
export function canvas(width, height) {
export function canvas(width, height): HTMLCanvasElement | OffscreenCanvas {
let c;
if (env.browser) {
if (typeof OffscreenCanvas !== 'undefined') {
@ -180,9 +180,11 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
} else { // cpu and wasm kernel does not implement efficient fromPixels method
// we cant use canvas as-is as it already has a context, so we do a silly one more canvas and do fromPixels on ImageData instead
const tempCanvas = canvas(targetWidth, targetHeight);
if (!tempCanvas) return { tensor: null, canvas: inCanvas };
tempCanvas.width = targetWidth;
tempCanvas.height = targetHeight;
const tempCtx = tempCanvas.getContext('2d');
if (!tempCtx) return { tensor: null, canvas: inCanvas };
tempCtx.drawImage(outCanvas, 0, 0);
const data = tempCtx.getImageData(0, 0, targetWidth, targetHeight);
if (tf.browser && env.browser) {

View File

@ -11,13 +11,35 @@ import * as movenet from './movenet/movenet';
import * as nanodet from './object/nanodet';
import * as centernet from './object/centernet';
import * as segmentation from './segmentation/segmentation';
import { env } from './env';
// import * as agegenderrace from './gear/agegenderrace';
export function reset(instance) {
// if (instance.config.debug) log('resetting loaded models');
instance.models = {
face: null, // array of models
handpose: null, // array of models
posenet: null,
blazepose: null,
efficientpose: null,
movenet: null,
age: null,
gender: null,
emotion: null,
embedding: null,
nanodet: null,
centernet: null,
faceres: null,
segmentation: null,
};
}
/** Load method preloads all instance.configured models on-demand
* - Not explicitly required as any required model is load implicitly on it's first run
* @param userinstance.config?: {@link instance.config}
*/
export async function load(instance) {
if (env.initial) reset(instance);
if (instance.config.async) { // load models concurrently
[
instance.models.face,
@ -68,17 +90,23 @@ export async function validate(instance) {
for (const defined of Object.keys(instance.models)) {
if (instance.models[defined]) { // check if model is loaded
let models: GraphModel[] = [];
if (Array.isArray(instance.models[defined])) models = instance.models[defined].map((model) => (model.executor ? model : model.model));
if (Array.isArray(instance.models[defined])) models = instance.models[defined].map((model) => ((model && model.executor) ? model : model.model));
else models = [instance.models[defined]];
for (const model of models) {
if (!model) {
if (instance.config.debug) log('model marked as loaded but not defined:', defined);
continue;
}
const ops: string[] = [];
// @ts-ignore // executor is a private method
const executor = model?.executor;
if (executor) {
if (executor && executor.graph.nodes) {
for (const kernel of Object.values(executor.graph.nodes)) {
const op = (kernel as Op).op.toLowerCase();
if (!ops.includes(op)) ops.push(op);
}
} else {
if (!executor && instance.config.debug) log('model signature not determined:', defined);
}
const missing: string[] = [];
for (const op of ops) {
@ -90,10 +118,9 @@ export async function validate(instance) {
missing.push(op);
}
}
if (!executor && instance.config.debug) log('model executor not found:', defined);
// log('model validation ops:', defined, ops);
if (missing.length > 0 && instance.config.debug) log('model validation:', defined, missing);
}
}
}
// log.data('ops used by model:', ops);
}

View File

@ -7,8 +7,9 @@ import * as tf from '../../dist/tfjs.esm.js';
import type { BodyResult } from '../result';
import type { GraphModel, Tensor } from '../tfjs/types';
import type { Config } from '../config';
import { env } from '../env';
let model: GraphModel;
let model: GraphModel | null;
type Keypoints = { score: number, part: string, position: [number, number], positionRaw: [number, number] };
const keypoints: Array<Keypoints> = [];
@ -22,6 +23,7 @@ let skipped = Number.MAX_SAFE_INTEGER;
const bodyParts = ['nose', 'leftEye', 'rightEye', 'leftEar', 'rightEar', 'leftShoulder', 'rightShoulder', 'leftElbow', 'rightElbow', 'leftWrist', 'rightWrist', 'leftHip', 'rightHip', 'leftKnee', 'rightKnee', 'leftAnkle', 'rightAnkle'];
export async function load(config: Config): Promise<GraphModel> {
if (env.initial) model = null;
if (!model) {
model = await tf.loadGraphModel(join(config.modelBasePath, config.body.modelPath || '')) as unknown as GraphModel;
if (!model || !model['modelUrl']) log('load model failed:', config.body.modelPath);
@ -122,7 +124,7 @@ export async function predict(image: Tensor, config: Config): Promise<BodyResult
skipped = 0;
return new Promise(async (resolve) => {
const tensor = tf.tidy(() => {
if (!model.inputs[0].shape) return null;
if (!model?.inputs[0].shape) return null;
let inputSize = model.inputs[0].shape[2];
if (inputSize === -1) inputSize = 256;
const resize = tf.image.resizeBilinear(image, [inputSize, inputSize], false);
@ -131,7 +133,7 @@ export async function predict(image: Tensor, config: Config): Promise<BodyResult
});
let resT;
if (config.body.enabled) resT = await model.predict(tensor);
if (config.body.enabled) resT = await model?.predict(tensor);
tf.dispose(tensor);
if (!resT) resolve([]);

View File

@ -10,23 +10,24 @@ import type { GraphModel, Tensor } from '../tfjs/types';
import type { Config } from '../config';
import { env } from '../env';
let model;
let model: GraphModel | null;
let inputSize = 0;
let last: ObjectResult[] = [];
let skipped = Number.MAX_SAFE_INTEGER;
export async function load(config: Config): Promise<GraphModel> {
if (env.initial) model = null;
if (!model) {
model = await tf.loadGraphModel(join(config.modelBasePath, config.object.modelPath || ''));
model = await tf.loadGraphModel(join(config.modelBasePath, config.object.modelPath || '')) as unknown as GraphModel;
const inputs = Object.values(model.modelSignature['inputs']);
model.inputSize = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : null;
if (!model.inputSize) throw new Error(`Human: Cannot determine model inputSize: ${config.object.modelPath}`);
if (!model || !model.modelUrl) log('load model failed:', config.object.modelPath);
else if (config.debug) log('load model:', model.modelUrl);
} else if (config.debug) log('cached model:', model.modelUrl);
inputSize = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : 0;
if (!model || !model['modelUrl']) log('load model failed:', config.object.modelPath);
else if (config.debug) log('load model:', model['modelUrl']);
} else if (config.debug) log('cached model:', model['modelUrl']);
return model;
}
async function process(res: Tensor, inputSize, outputShape, config: Config) {
async function process(res: Tensor | null, outputShape, config: Config) {
if (!res) return [];
const results: Array<ObjectResult> = [];
const detections = await res.array();
@ -81,11 +82,11 @@ export async function predict(input: Tensor, config: Config): Promise<ObjectResu
if (!env.kernels.includes('mod') || !env.kernels.includes('sparsetodense')) return last;
return new Promise(async (resolve) => {
const outputSize = [input.shape[2], input.shape[1]];
const resize = tf.image.resizeBilinear(input, [model.inputSize, model.inputSize]);
const objectT = config.object.enabled ? model.execute(resize, ['tower_0/detections']) : null;
const resize = tf.image.resizeBilinear(input, [inputSize, inputSize]);
const objectT = config.object.enabled ? model?.execute(resize, ['tower_0/detections']) as Tensor : null;
tf.dispose(resize);
const obj = await process(objectT, model.inputSize, outputSize, config);
const obj = await process(objectT, outputSize, config);
last = obj;
resolve(obj);

View File

@ -17,7 +17,7 @@ let skipped = Number.MAX_SAFE_INTEGER;
const scaleBox = 2.5; // increase box size
export async function load(config: Config): Promise<GraphModel> {
if (!model) {
if (!model || env.initial) {
model = await tf.loadGraphModel(join(config.modelBasePath, config.object.modelPath || ''));
const inputs = Object.values(model.modelSignature['inputs']);
model.inputSize = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : null;

View File

@ -4,33 +4,24 @@ import * as env from '../env';
import * as tf from '../../dist/tfjs.esm.js';
export async function check(instance) {
if (instance.initial || (instance.config.backend && (instance.config.backend.length > 0) && (tf.getBackend() !== instance.config.backend))) {
if (env.env.initial || (instance.config.backend && (instance.config.backend.length > 0) && (tf.getBackend() !== instance.config.backend))) {
const timeStamp = now();
instance.state = 'backend';
/* force backend reload
if (instance.config.backend in tf.engine().registry) {
const backendFactory = tf.findBackendFactory(instance.config.backend);
tf.removeBackend(instance.config.backend);
tf.registerBackend(instance.config.backend, backendFactory);
} else {
log('Backend not registred:', instance.config.backend);
}
*/
if (instance.config.backend && instance.config.backend.length > 0) {
// detect web worker
// @ts-ignore ignore missing type for WorkerGlobalScope as that is the point
if (typeof window === 'undefined' && typeof WorkerGlobalScope !== 'undefined' && instance.config.debug) {
log('running inside web worker');
if (instance.config.debug) log('running inside web worker');
}
// force browser vs node backend
if (env.env.browser && instance.config.backend === 'tensorflow') {
log('override: backend set to tensorflow while running in browser');
if (instance.config.debug) log('override: backend set to tensorflow while running in browser');
instance.config.backend = 'humangl';
}
if (env.env.node && (instance.config.backend === 'webgl' || instance.config.backend === 'humangl')) {
log(`override: backend set to ${instance.config.backend} while running in nodejs`);
if (instance.config.debug) log(`override: backend set to ${instance.config.backend} while running in nodejs`);
instance.config.backend = 'tensorflow';
}
@ -46,14 +37,14 @@ export async function check(instance) {
}
// check available backends
if (instance.config.backend === 'humangl') humangl.register();
if (instance.config.backend === 'humangl') await humangl.register(instance);
const available = Object.keys(tf.engine().registryFactory);
if (instance.config.debug) log('available backends:', available);
if (!available.includes(instance.config.backend)) {
log(`error: backend ${instance.config.backend} not found in registry`);
instance.config.backend = env.env.node ? 'tensorflow' : 'humangl';
log(`override: setting backend ${instance.config.backend}`);
if (instance.config.debug) log(`override: setting backend ${instance.config.backend}`);
}
if (instance.config.debug) log('setting backend:', instance.config.backend);
@ -79,6 +70,8 @@ export async function check(instance) {
}
}
tf.ENV.set('WEBGL_DELETE_TEXTURE_THRESHOLD', 0);
// handle webgl & humangl
if (tf.getBackend() === 'humangl') {
tf.ENV.set('CHECK_COMPUTATION_FOR_ERRORS', false);

View File

@ -4,16 +4,16 @@
*/
import { log } from '../helpers';
import { env } from '../env';
import * as models from '../models';
import * as tf from '../../dist/tfjs.esm.js';
import * as image from '../image/image';
export const config = {
name: 'humangl',
priority: 99,
priority: 999,
canvas: <null | OffscreenCanvas | HTMLCanvasElement>null,
gl: <null | WebGL2RenderingContext>null,
width: 1024,
height: 1024,
extensions: <string[]> [],
webGLattr: { // https://www.khronos.org/registry/webgl/specs/latest/1.0/#5.2
alpha: false,
@ -43,27 +43,58 @@ function extensions(): void {
*
* @returns void
*/
export function register(): void {
export async function register(instance): Promise<void> {
// force backend reload if gl context is not valid
if ((config.name in tf.engine().registry) && (!config.gl || !config.gl.getParameter(config.gl.VERSION))) {
log('error: humangl backend invalid context');
log('resetting humangl backend');
models.reset(instance);
await tf.removeBackend(config.name);
await register(instance); // re-register
}
if (!tf.findBackend(config.name)) {
// log('backend registration:', config.name);
try {
config.canvas = image.canvas(100, 100);
config.canvas = await image.canvas(100, 100);
} catch (err) {
log('error: cannot create canvas:', err);
return;
}
try {
config.gl = config.canvas?.getContext('webgl2', config.webGLattr) as WebGL2RenderingContext;
if (config.canvas) {
config.canvas.addEventListener('webglcontextlost', async (e) => {
const err = config.gl?.getError();
log('error: humangl context lost:', err, e);
log('gpu memory usage:', instance.tf.engine().backendInstance.numBytesInGPU);
log('resetting humangl backend');
env.initial = true;
models.reset(instance);
await tf.removeBackend(config.name);
// await register(instance); // re-register
});
config.canvas.addEventListener('webglcontextrestored', (e) => {
log('error: humangl context restored:', e);
});
config.canvas.addEventListener('webglcontextcreationerror', (e) => {
log('error: humangl context create:', e);
});
}
} catch (err) {
log('error: cannot get WebGL2 context:', err);
log('error: cannot get WebGL context:', err);
return;
}
try {
tf.setWebGLContext(2, config.gl);
} catch (err) {
log('error: cannot set WebGL2 context:', err);
log('error: cannot set WebGL context:', err);
return;
}
const current = tf.backend().getGPGPUContext().gl;
if (current) {
log(`humangl webgl version:${current.getParameter(current.VERSION)} renderer:${current.getParameter(current.RENDERER)}`);
} else {
log('error: no current context:', current, config.gl);
}
try {
const ctx = new tf.GPGPUContext(config.gl);
tf.registerBackend(config.name, () => new tf.MathBackendWebGL(ctx), config.priority);