webgl exception handling

pull/280/head
Vladimir Mandic 2021-09-17 14:07:44 -04:00
parent 22364c9583
commit d5d2afee0f
10 changed files with 108 additions and 86 deletions

View File

@ -34,6 +34,7 @@ let userConfig = {
warmup: 'none', warmup: 'none',
backend: 'humangl', backend: 'humangl',
debug: true, debug: true,
filter: { enabled: false },
/* /*
wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.9.0/dist/', wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.9.0/dist/',
async: false, async: false,
@ -80,7 +81,7 @@ const ui = {
useWorker: true, // use web workers for processing useWorker: true, // use web workers for processing
worker: 'index-worker.js', worker: 'index-worker.js',
maxFPSframes: 10, // keep fps history for how many frames maxFPSframes: 10, // keep fps history for how many frames
modelsPreload: true, // preload human models on startup modelsPreload: false, // preload human models on startup
modelsWarmup: false, // warmup human models on startup modelsWarmup: false, // warmup human models on startup
buffered: true, // should output be buffered between frames buffered: true, // should output be buffered between frames
interpolated: true, // should output be interpolated for smoothness between frames interpolated: true, // should output be interpolated for smoothness between frames
@ -180,7 +181,7 @@ function status(msg) {
async function videoPlay() { async function videoPlay() {
document.getElementById('btnStartText').innerHTML = 'pause video'; document.getElementById('btnStartText').innerHTML = 'pause video';
await document.getElementById('video').play(); await document.getElementById('video').play();
status(); // status();
} }
async function videoPause() { async function videoPause() {
@ -337,7 +338,7 @@ async function setupCamera() {
} catch (err) { } catch (err) {
log(err); log(err);
} finally { } finally {
status(); // status();
} }
return ''; return '';
} }
@ -394,28 +395,22 @@ async function setupCamera() {
if (initialCameraAccess) log('selected video source:', track, settings); // log('selected camera:', track.label, 'id:', settings.deviceId); if (initialCameraAccess) log('selected video source:', track, settings); // log('selected camera:', track.label, 'id:', settings.deviceId);
ui.camera = { name: track.label.toLowerCase(), width: video.videoWidth, height: video.videoHeight, facing: settings.facingMode === 'user' ? 'front' : 'back' }; ui.camera = { name: track.label.toLowerCase(), width: video.videoWidth, height: video.videoHeight, facing: settings.facingMode === 'user' ? 'front' : 'back' };
initialCameraAccess = false; initialCameraAccess = false;
const promise = !stream || new Promise((resolve) => {
video.onloadeddata = () => { if (!stream) return 'camera stream empty';
if (settings.width > settings.height) canvas.style.width = '100vw';
else canvas.style.height = '100vh'; const ready = new Promise((resolve) => (video.onloadeddata = () => resolve(true)));
canvas.width = video.videoWidth; video.srcObject = stream;
canvas.height = video.videoHeight; await ready;
ui.menuWidth.input.setAttribute('value', video.videoWidth); if (settings.width > settings.height) canvas.style.width = '100vw';
ui.menuHeight.input.setAttribute('value', video.videoHeight); else canvas.style.height = '100vh';
if (live || ui.autoPlay) videoPlay(); canvas.width = video.videoWidth;
// eslint-disable-next-line no-use-before-define canvas.height = video.videoHeight;
if ((live || ui.autoPlay) && !ui.detectThread) runHumanDetect(video, canvas); ui.menuWidth.input.setAttribute('value', video.videoWidth);
ui.busy = false; ui.menuHeight.input.setAttribute('value', video.videoHeight);
resolve(); if (live || ui.autoPlay) await videoPlay();
}; // eslint-disable-next-line no-use-before-define
}); if ((live || ui.autoPlay) && !ui.detectThread) runHumanDetect(video, canvas);
// attach input to video element return 'camera stream ready';
if (stream) {
video.srcObject = stream;
return promise;
}
ui.busy = false;
return 'camera stream empty';
} }
function initPerfMonitor() { function initPerfMonitor() {
@ -500,9 +495,8 @@ function runHumanDetect(input, canvas, timestamp) {
// perform detection in worker // perform detection in worker
webWorker(input, data, canvas, timestamp); webWorker(input, data, canvas, timestamp);
} else { } else {
if (human.env.initial) status('starting detection');
else status();
human.detect(input, userConfig).then((result) => { human.detect(input, userConfig).then((result) => {
status();
/* /*
setTimeout(async () => { // simulate gl context lost 2sec after initial detection setTimeout(async () => { // simulate gl context lost 2sec after initial detection
const ext = human.gl && human.gl.gl ? human.gl.gl.getExtension('WEBGL_lose_context') : {}; const ext = human.gl && human.gl.gl ? human.gl.gl.getExtension('WEBGL_lose_context') : {};
@ -926,15 +920,16 @@ async function pwaRegister() {
} }
async function main() { async function main() {
/*
window.addEventListener('unhandledrejection', (evt) => { window.addEventListener('unhandledrejection', (evt) => {
if (ui.detectThread) cancelAnimationFrame(ui.detectThread);
if (ui.drawThread) cancelAnimationFrame(ui.drawThread);
const msg = evt.reason.message || evt.reason || evt;
// eslint-disable-next-line no-console // eslint-disable-next-line no-console
console.error(evt.reason || evt); console.error(msg);
document.getElementById('log').innerHTML = evt.reason.message || evt.reason || evt; document.getElementById('log').innerHTML = msg;
status('exception error'); status(`exception: ${msg}`);
evt.preventDefault(); evt.preventDefault();
}); });
*/
log('demo starting ...'); log('demo starting ...');
@ -945,7 +940,7 @@ async function main() {
// sanity check for webworker compatibility // sanity check for webworker compatibility
if (typeof Worker === 'undefined' || typeof OffscreenCanvas === 'undefined') { if (typeof Worker === 'undefined' || typeof OffscreenCanvas === 'undefined') {
ui.useWorker = false; ui.useWorker = false;
log('workers are disabled due to missing browser functionality'); log('webworker functionality is disabled due to missing browser functionality');
} }
// register PWA ServiceWorker // register PWA ServiceWorker
@ -1010,6 +1005,8 @@ async function main() {
await human.load(userConfig); // this is not required, just pre-loads all models await human.load(userConfig); // this is not required, just pre-loads all models
const loaded = Object.keys(human.models).filter((a) => human.models[a]); const loaded = Object.keys(human.models).filter((a) => human.models[a]);
log('demo loaded models:', loaded); log('demo loaded models:', loaded);
} else {
await human.init();
} }
// warmup models // warmup models

View File

@ -68,7 +68,7 @@ export const options: DrawOptions = {
const getCanvasContext = (input) => { const getCanvasContext = (input) => {
if (input && input.getContext) return input.getContext('2d'); if (input && input.getContext) return input.getContext('2d');
throw new Error('Human: Invalid Canvas'); throw new Error('invalid canvas');
}; };
const rad2deg = (theta) => Math.round((theta * 180) / Math.PI); const rad2deg = (theta) => Math.round((theta * 180) / Math.PI);

View File

@ -93,18 +93,19 @@ export async function backendInfo() {
env.backends = Object.keys(tf.engine().registryFactory); env.backends = Object.keys(tf.engine().registryFactory);
env.wasm.supported = typeof WebAssembly !== 'undefined'; env.wasm.supported = typeof WebAssembly !== 'undefined';
env.wasm.backend = env.backends.includes('wasm'); env.wasm.backend = env.backends.includes('wasm');
if (env.wasm.supported && env.wasm.backend) { if (env.wasm.supported && env.wasm.backend && tf.getBackend() === 'wasm') {
env.wasm.simd = await tf.env().getAsync('WASM_HAS_SIMD_SUPPORT'); env.wasm.simd = await tf.env().getAsync('WASM_HAS_SIMD_SUPPORT');
env.wasm.multithread = await tf.env().getAsync('WASM_HAS_MULTITHREAD_SUPPORT'); env.wasm.multithread = await tf.env().getAsync('WASM_HAS_MULTITHREAD_SUPPORT');
} }
const c = image.canvas(100, 100); const c = image.canvas(100, 100);
const ctx = c ? c.getContext('webgl2') : undefined; const ctx = c ? c.getContext('webgl2') : undefined; // causes too many gl contexts
// const ctx = typeof tf.backend().getGPGPUContext !== undefined ? tf.backend().getGPGPUContext : null;
env.webgl.supported = typeof ctx !== 'undefined'; env.webgl.supported = typeof ctx !== 'undefined';
env.webgl.backend = env.backends.includes('webgl'); env.webgl.backend = env.backends.includes('webgl');
if (env.webgl.supported && env.webgl.backend) { if (env.webgl.supported && env.webgl.backend && (tf.getBackend() === 'webgl' || tf.getBackend() === 'humangl')) {
// @ts-ignore getGPGPUContext only exists on WebGL backend // @ts-ignore getGPGPUContext only exists on WebGL backend
const gl = (tf.backend().gpgpu !== 'undefined') && (tf.backend().getGPGPUContext) ? await tf.backend().getGPGPUContext().gl : null; const gl = tf.backend().gpgpu !== 'undefined' ? await tf.backend().getGPGPUContext().gl : null;
if (gl) { if (gl) {
env.webgl.version = gl.getParameter(gl.VERSION); env.webgl.version = gl.getParameter(gl.VERSION);
env.webgl.renderer = gl.getParameter(gl.RENDERER); env.webgl.renderer = gl.getParameter(gl.RENDERER);

View File

@ -7,7 +7,7 @@ export function join(folder: string, file: string): string {
const separator = folder.endsWith('/') ? '' : '/'; const separator = folder.endsWith('/') ? '' : '/';
const skipJoin = file.startsWith('.') || file.startsWith('/') || file.startsWith('http:') || file.startsWith('https:') || file.startsWith('file:'); const skipJoin = file.startsWith('.') || file.startsWith('/') || file.startsWith('http:') || file.startsWith('https:') || file.startsWith('file:');
const path = skipJoin ? `${file}` : `${folder}${separator}${file}`; const path = skipJoin ? `${file}` : `${folder}${separator}${file}`;
if (!path.toLocaleLowerCase().includes('.json')) throw new Error(`Human: ModelPath Error: ${path} Expecting JSON file`); if (!path.toLocaleLowerCase().includes('.json')) throw new Error(`modelpath error: ${path} expecting json file`);
return path; return path;
} }

View File

@ -43,13 +43,14 @@ export { env } from './env';
export type Input = Tensor | ImageData | ImageBitmap | HTMLImageElement | HTMLMediaElement | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas; export type Input = Tensor | ImageData | ImageBitmap | HTMLImageElement | HTMLMediaElement | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas;
/** Events dispatched by `human.events` /** Events dispatched by `human.events`
*
* - `create`: triggered when Human object is instantiated * - `create`: triggered when Human object is instantiated
* - `load`: triggered when models are loaded (explicitly or on-demand) * - `load`: triggered when models are loaded (explicitly or on-demand)
* - `image`: triggered when input image is this.processed * - `image`: triggered when input image is this.processed
* - `result`: triggered when detection is complete * - `result`: triggered when detection is complete
* - `warmup`: triggered when warmup is complete * - `warmup`: triggered when warmup is complete
*/ */
export type Events = 'create' | 'load' | 'image' | 'result' | 'warmup'; export type Events = 'create' | 'load' | 'image' | 'result' | 'warmup' | 'error';
/** Error message /** Error message
* @typedef Error Type * @typedef Error Type
@ -61,8 +62,7 @@ export type Error = { error: string };
*/ */
export type TensorFlow = typeof tf; export type TensorFlow = typeof tf;
/** /** **Human** library main class
* **Human** library main class
* *
* All methods and properties are available only as members of Human class * All methods and properties are available only as members of Human class
* *
@ -71,6 +71,7 @@ export type TensorFlow = typeof tf;
* - Possible inputs: {@link Input} * - Possible inputs: {@link Input}
* *
* @param userConfig: {@link Config} * @param userConfig: {@link Config}
* @return instance
*/ */
export class Human { export class Human {
/** Current version of Human library in *semver* format */ /** Current version of Human library in *semver* format */
@ -95,14 +96,13 @@ export class Human {
/** currenty processed image tensor and canvas */ /** currenty processed image tensor and canvas */
process: { tensor: Tensor | null, canvas: OffscreenCanvas | HTMLCanvasElement | null }; process: { tensor: Tensor | null, canvas: OffscreenCanvas | HTMLCanvasElement | null };
/** @internal: Instance of TensorFlow/JS used by Human /** Instance of TensorFlow/JS used by Human
* - Can be embedded or externally provided * - Can be embedded or externally provided
* @internal
*/ */
tf: TensorFlow; tf: TensorFlow;
/** /** Object containing environment information used for diagnostics */
* Object containing environment information used for diagnostics
*/
env: env.Env; env: env.Env;
/** Draw helper classes that can draw detected objects on canvas using specified draw /** Draw helper classes that can draw detected objects on canvas using specified draw
@ -113,10 +113,11 @@ export class Human {
* - canvas: draw this.processed canvas which is a this.processed copy of the input * - canvas: draw this.processed canvas which is a this.processed copy of the input
* - all: meta-function that performs: canvas, face, body, hand * - all: meta-function that performs: canvas, face, body, hand
*/ */
// draw: typeof draw;
draw: { canvas, face, body, hand, gesture, object, person, all, options: DrawOptions }; draw: { canvas, face, body, hand, gesture, object, person, all, options: DrawOptions };
/** @internal: Currently loaded models */ /** Currently loaded models
* @internal
*/
models: { models: {
face: [unknown, GraphModel | null, GraphModel | null] | null, face: [unknown, GraphModel | null, GraphModel | null] | null,
posenet: GraphModel | null, posenet: GraphModel | null,
@ -142,6 +143,7 @@ export class Human {
* - `image`: triggered when input image is this.processed * - `image`: triggered when input image is this.processed
* - `result`: triggered when detection is complete * - `result`: triggered when detection is complete
* - `warmup`: triggered when warmup is complete * - `warmup`: triggered when warmup is complete
* - `error`: triggered on some errors
*/ */
events: EventTarget; events: EventTarget;
/** Reference face triangualtion array of 468 points, used for triangle references between points */ /** Reference face triangualtion array of 468 points, used for triangle references between points */
@ -157,9 +159,11 @@ export class Human {
gl: Record<string, unknown>; gl: Record<string, unknown>;
// definition end // definition end
/** /** Constructor for **Human** library that is futher used for all operations
* Creates instance of Human library that is futher used for all operations *
* @param userConfig: {@link Config} * @param userConfig: {@link Config}
*
* @return instance
*/ */
constructor(userConfig?: Partial<Config>) { constructor(userConfig?: Partial<Config>) {
env.get(); env.get();
@ -252,7 +256,8 @@ export class Human {
image = (input: Input) => image.process(input, this.config); image = (input: Input) => image.process(input, this.config);
/** Simmilarity method calculates simmilarity between two provided face descriptors (face embeddings) /** Simmilarity method calculates simmilarity between two provided face descriptors (face embeddings)
* - Calculation is based on normalized Minkowski distance between * - Calculation is based on normalized Minkowski distance between two descriptors
* - Default is Euclidean distance which is Minkowski distance of 2nd order
* *
* @param embedding1: face descriptor as array of numbers * @param embedding1: face descriptor as array of numbers
* @param embedding2: face descriptor as array of numbers * @param embedding2: face descriptor as array of numbers
@ -263,10 +268,9 @@ export class Human {
return faceres.similarity(embedding1, embedding2); return faceres.similarity(embedding1, embedding2);
} }
/** /** Segmentation method takes any input and returns this.processed canvas with body segmentation
* Segmentation method takes any input and returns this.processed canvas with body segmentation * - Optional parameter background is used to fill the background with specific input
* Optional parameter background is used to fill the background with specific input * - Segmentation is not triggered as part of detect this.process
* Segmentation is not triggered as part of detect this.process
* *
* @param input: {@link Input} * @param input: {@link Input}
* @param background?: {@link Input} * @param background?: {@link Input}
@ -276,7 +280,8 @@ export class Human {
return input ? segmentation.process(input, background, this.config) : null; return input ? segmentation.process(input, background, this.config) : null;
} }
/** Enhance method performs additional enhacements to face image previously detected for futher this.processing /** Enhance method performs additional enhacements to face image previously detected for futher processing
*
* @param input: Tensor as provided in human.result.face[n].tensor * @param input: Tensor as provided in human.result.face[n].tensor
* @returns Tensor * @returns Tensor
*/ */
@ -286,6 +291,7 @@ export class Human {
} }
/** Math method find best match between provided face descriptor and predefined database of known descriptors /** Math method find best match between provided face descriptor and predefined database of known descriptors
*
* @param faceEmbedding: face descriptor previsouly calculated on any face * @param faceEmbedding: face descriptor previsouly calculated on any face
* @param db: array of mapping of face descriptors to known values * @param db: array of mapping of face descriptors to known values
* @param threshold: minimum score for matching to be considered in the result * @param threshold: minimum score for matching to be considered in the result
@ -296,9 +302,22 @@ export class Human {
return faceres.match(faceEmbedding, db, threshold); return faceres.match(faceEmbedding, db, threshold);
} }
/** Explicit backend initialization
* - Normally done implicitly during initial load phase
* - Call to explictly register and initialize TFJS backend without any other operations
* - Used in webworker environments where there can be multiple instances of Human and not all initialized
*
* @return Promise<void>
*/
init() {
backend.check(this);
}
/** Load method preloads all configured models on-demand /** Load method preloads all configured models on-demand
* - Not explicitly required as any required model is load implicitly on it's first run * - Not explicitly required as any required model is load implicitly on it's first run
*
* @param userConfig?: {@link Config} * @param userConfig?: {@link Config}
* @return Promise<void>
*/ */
async load(userConfig?: Partial<Config>) { async load(userConfig?: Partial<Config>) {
this.state = 'load'; this.state = 'load';
@ -309,7 +328,7 @@ export class Human {
if (env.env.initial) { // print version info on first run and check for correct backend setup if (env.env.initial) { // print version info on first run and check for correct backend setup
if (this.config.debug) log(`version: ${this.version}`); if (this.config.debug) log(`version: ${this.version}`);
if (this.config.debug) log(`tfjs version: ${this.tf.version_core}`); if (this.config.debug) log(`tfjs version: ${this.tf.version_core}`);
await backend.check(this); if (!await backend.check(this)) log('error: backend check failed');
await tf.ready(); await tf.ready();
if (this.env.browser) { if (this.env.browser) {
if (this.config.debug) log('configuration:', this.config); if (this.config.debug) log('configuration:', this.config);
@ -335,8 +354,7 @@ export class Human {
/** @hidden */ /** @hidden */
emit = (event: string) => this.events?.dispatchEvent(new Event(event)); emit = (event: string) => this.events?.dispatchEvent(new Event(event));
/** /** Runs interpolation using last known result and returns smoothened result
* Runs interpolation using last known result and returns smoothened result
* Interpolation is based on time since last known result so can be called independently * Interpolation is based on time since last known result so can be called independently
* *
* @param result?: {@link Result} optional use specific result set to run interpolation on * @param result?: {@link Result} optional use specific result set to run interpolation on
@ -536,7 +554,5 @@ export class Human {
} }
} }
/** /** Class Human as default export */
* Class Human is also available as default export
*/
export { Human as default }; export { Human as default };

View File

@ -32,7 +32,7 @@ export function canvas(width, height): HTMLCanvasElement | OffscreenCanvas {
// @ts-ignore // env.canvas is an external monkey-patch // @ts-ignore // env.canvas is an external monkey-patch
c = (typeof env.Canvas !== 'undefined') ? new env.Canvas(width, height) : null; c = (typeof env.Canvas !== 'undefined') ? new env.Canvas(width, height) : null;
} }
// if (!c) throw new Error('Human: Cannot create canvas'); // if (!c) throw new Error('cannot create canvas');
return c; return c;
} }
@ -41,7 +41,7 @@ export function canvas(width, height): HTMLCanvasElement | OffscreenCanvas {
// input is resized and run through imagefx filter // input is resized and run through imagefx filter
export function process(input: Input, config: Config): { tensor: Tensor | null, canvas: OffscreenCanvas | HTMLCanvasElement } { export function process(input: Input, config: Config): { tensor: Tensor | null, canvas: OffscreenCanvas | HTMLCanvasElement } {
let tensor; let tensor;
if (!input) throw new Error('Human: Input is missing'); if (!input) throw new Error('input is missing');
// sanity checks since different browsers do not implement all dom elements // sanity checks since different browsers do not implement all dom elements
if ( if (
!(input instanceof tf.Tensor) !(input instanceof tf.Tensor)
@ -55,12 +55,12 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
&& !(typeof HTMLCanvasElement !== 'undefined' && input instanceof HTMLCanvasElement) && !(typeof HTMLCanvasElement !== 'undefined' && input instanceof HTMLCanvasElement)
&& !(typeof OffscreenCanvas !== 'undefined' && input instanceof OffscreenCanvas) && !(typeof OffscreenCanvas !== 'undefined' && input instanceof OffscreenCanvas)
) { ) {
throw new Error('Human: Input type is not recognized'); throw new Error('input type is not recognized');
} }
if (input instanceof tf.Tensor) { if (input instanceof tf.Tensor) {
// if input is tensor, use as-is // if input is tensor, use as-is
if ((input as unknown as Tensor).shape && (input as unknown as Tensor).shape.length === 4 && (input as unknown as Tensor).shape[0] === 1 && (input as unknown as Tensor).shape[3] === 3) tensor = tf.clone(input); if ((input as unknown as Tensor).shape && (input as unknown as Tensor).shape.length === 4 && (input as unknown as Tensor).shape[0] === 1 && (input as unknown as Tensor).shape[3] === 3) tensor = tf.clone(input);
else throw new Error(`Human: Input tensor shape must be [1, height, width, 3] and instead was ${(input as unknown as Tensor).shape}`); else throw new Error(`input tensor shape must be [1, height, width, 3] and instead was ${(input as unknown as Tensor).shape}`);
} else { } else {
// check if resizing will be needed // check if resizing will be needed
if (typeof input['readyState'] !== 'undefined' && input['readyState'] <= 2) { if (typeof input['readyState'] !== 'undefined' && input['readyState'] <= 2) {
@ -89,7 +89,7 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
else if ((config.filter.height || 0) > 0) targetWidth = originalWidth * ((config.filter.height || 0) / originalHeight); else if ((config.filter.height || 0) > 0) targetWidth = originalWidth * ((config.filter.height || 0) / originalHeight);
if ((config.filter.height || 0) > 0) targetHeight = config.filter.height; if ((config.filter.height || 0) > 0) targetHeight = config.filter.height;
else if ((config.filter.width || 0) > 0) targetHeight = originalHeight * ((config.filter.width || 0) / originalWidth); else if ((config.filter.width || 0) > 0) targetHeight = originalHeight * ((config.filter.width || 0) / originalWidth);
if (!targetWidth || !targetHeight) throw new Error('Human: Input cannot determine dimension'); if (!targetWidth || !targetHeight) throw new Error('input cannot determine dimension');
if (!inCanvas || (inCanvas?.width !== targetWidth) || (inCanvas?.height !== targetHeight)) inCanvas = canvas(targetWidth, targetHeight); if (!inCanvas || (inCanvas?.width !== targetWidth) || (inCanvas?.height !== targetHeight)) inCanvas = canvas(targetWidth, targetHeight);
// draw input to our canvas // draw input to our canvas
@ -176,7 +176,11 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
tempCanvas.height = targetHeight; tempCanvas.height = targetHeight;
const tempCtx = tempCanvas.getContext('2d'); const tempCtx = tempCanvas.getContext('2d');
tempCtx?.drawImage(outCanvas, 0, 0); tempCtx?.drawImage(outCanvas, 0, 0);
pixels = (tf.browser && env.browser) ? tf.browser.fromPixels(tempCanvas) : null; try {
pixels = (tf.browser && env.browser) ? tf.browser.fromPixels(tempCanvas) : null;
} catch (err) {
throw new Error('browser webgl error');
}
} else { // cpu and wasm kernel does not implement efficient fromPixels method } else { // cpu and wasm kernel does not implement efficient fromPixels method
// we cant use canvas as-is as it already has a context, so we do a silly one more canvas and do fromPixels on ImageData instead // we cant use canvas as-is as it already has a context, so we do a silly one more canvas and do fromPixels on ImageData instead
const tempCanvas = canvas(targetWidth, targetHeight); const tempCanvas = canvas(targetWidth, targetHeight);
@ -206,7 +210,7 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
tf.dispose(casted); tf.dispose(casted);
} else { } else {
tensor = tf.zeros([1, targetWidth, targetHeight, 3]); tensor = tf.zeros([1, targetWidth, targetHeight, 3]);
throw new Error('Human: Cannot create tensor from input'); throw new Error('cannot create tensor from input');
} }
} }
} }

View File

@ -17,7 +17,7 @@ function GLProgram(gl, vertexSource, fragmentSource) {
const shader = gl.createShader(type); const shader = gl.createShader(type);
gl.shaderSource(shader, source); gl.shaderSource(shader, source);
gl.compileShader(shader); gl.compileShader(shader);
if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) throw new Error('Filter: GL compile failed', gl.getShaderInfoLog(shader)); if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) throw new Error('filter: gl compile failed', gl.getShaderInfoLog(shader));
return shader; return shader;
}; };
@ -30,7 +30,7 @@ function GLProgram(gl, vertexSource, fragmentSource) {
gl.attachShader(this.id, _fsh); gl.attachShader(this.id, _fsh);
gl.linkProgram(this.id); gl.linkProgram(this.id);
if (!gl.getProgramParameter(this.id, gl.LINK_STATUS)) throw new Error('Filter: GL link failed', gl.getProgramInfoLog(this.id)); if (!gl.getProgramParameter(this.id, gl.LINK_STATUS)) throw new Error('filter: gl link failed', gl.getProgramInfoLog(this.id));
gl.useProgram(this.id); gl.useProgram(this.id);
// Collect attributes // Collect attributes
@ -61,7 +61,7 @@ export function GLImageFilter(params) {
const _shaderProgramCache = { }; const _shaderProgramCache = { };
const DRAW = { INTERMEDIATE: 1 }; const DRAW = { INTERMEDIATE: 1 };
const gl = _canvas.getContext('webgl'); const gl = _canvas.getContext('webgl');
if (!gl) throw new Error('Filter: getContext() failed'); if (!gl) throw new Error('filter: context failed');
this.addFilter = function (name) { this.addFilter = function (name) {
// eslint-disable-next-line prefer-rest-params // eslint-disable-next-line prefer-rest-params

View File

@ -21,7 +21,7 @@ export async function load(config: Config): Promise<GraphModel> {
model = await tf.loadGraphModel(join(config.modelBasePath, config.object.modelPath || '')); model = await tf.loadGraphModel(join(config.modelBasePath, config.object.modelPath || ''));
const inputs = Object.values(model.modelSignature['inputs']); const inputs = Object.values(model.modelSignature['inputs']);
model.inputSize = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : null; model.inputSize = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : null;
if (!model.inputSize) throw new Error(`Human: Cannot determine model inputSize: ${config.object.modelPath}`); if (!model.inputSize) throw new Error(`cannot determine model inputSize: ${config.object.modelPath}`);
if (!model || !model.modelUrl) log('load model failed:', config.object.modelPath); if (!model || !model.modelUrl) log('load model failed:', config.object.modelPath);
else if (config.debug) log('load model:', model.modelUrl); else if (config.debug) log('load model:', model.modelUrl);
} else if (config.debug) log('cached model:', model.modelUrl); } else if (config.debug) log('cached model:', model.modelUrl);

View File

@ -53,25 +53,22 @@ export async function check(instance) {
if (instance.config.backend === 'wasm') { if (instance.config.backend === 'wasm') {
if (instance.config.debug) log('wasm path:', instance.config.wasmPath); if (instance.config.debug) log('wasm path:', instance.config.wasmPath);
if (typeof tf?.setWasmPaths !== 'undefined') await tf.setWasmPaths(instance.config.wasmPath); if (typeof tf?.setWasmPaths !== 'undefined') await tf.setWasmPaths(instance.config.wasmPath);
else throw new Error('Human: WASM backend is not loaded'); else throw new Error('wasm backend is not loaded');
const simd = await tf.env().getAsync('WASM_HAS_SIMD_SUPPORT'); const simd = await tf.env().getAsync('WASM_HAS_SIMD_SUPPORT');
const mt = await tf.env().getAsync('WASM_HAS_MULTITHREAD_SUPPORT'); const mt = await tf.env().getAsync('WASM_HAS_MULTITHREAD_SUPPORT');
if (instance.config.debug) log(`wasm execution: ${simd ? 'SIMD' : 'no SIMD'} ${mt ? 'multithreaded' : 'singlethreaded'}`); if (instance.config.debug) log(`wasm execution: ${simd ? 'SIMD' : 'no SIMD'} ${mt ? 'multithreaded' : 'singlethreaded'}`);
if (instance.config.debug && !simd) log('warning: wasm simd support is not enabled'); if (instance.config.debug && !simd) log('warning: wasm simd support is not enabled');
} }
await tf.setBackend(instance.config.backend);
try { try {
await tf.setBackend(instance.config.backend); await tf.setBackend(instance.config.backend);
await tf.ready(); await tf.ready();
} catch (err) { } catch (err) {
log('error: cannot set backend:', instance.config.backend, err); log('error: cannot set backend:', instance.config.backend, err);
return false;
} }
} }
tf.ENV.set('WEBGL_DELETE_TEXTURE_THRESHOLD', 0);
// handle webgl & humangl // handle webgl & humangl
if (tf.getBackend() === 'humangl') { if (tf.getBackend() === 'humangl') {
tf.ENV.set('CHECK_COMPUTATION_FOR_ERRORS', false); tf.ENV.set('CHECK_COMPUTATION_FOR_ERRORS', false);
@ -97,4 +94,5 @@ export async function check(instance) {
env.get(); // update env on backend init env.get(); // update env on backend init
instance.env = env.env; instance.env = env.env;
} }
return true;
} }

View File

@ -4,10 +4,10 @@
*/ */
import { log } from '../helpers'; import { log } from '../helpers';
import { env } from '../env';
import * as models from '../models';
import * as tf from '../../dist/tfjs.esm.js'; import * as tf from '../../dist/tfjs.esm.js';
import * as image from '../image/image'; import * as image from '../image/image';
import * as models from '../models';
// import { env } from '../env';
export const config = { export const config = {
name: 'humangl', name: 'humangl',
@ -47,10 +47,12 @@ export async function register(instance): Promise<void> {
// force backend reload if gl context is not valid // force backend reload if gl context is not valid
if ((config.name in tf.engine().registry) && (!config.gl || !config.gl.getParameter(config.gl.VERSION))) { if ((config.name in tf.engine().registry) && (!config.gl || !config.gl.getParameter(config.gl.VERSION))) {
log('error: humangl backend invalid context'); log('error: humangl backend invalid context');
log('resetting humangl backend');
models.reset(instance); models.reset(instance);
/*
log('resetting humangl backend');
await tf.removeBackend(config.name); await tf.removeBackend(config.name);
await register(instance); // re-register await register(instance); // re-register
*/
} }
if (!tf.findBackend(config.name)) { if (!tf.findBackend(config.name)) {
try { try {
@ -63,14 +65,18 @@ export async function register(instance): Promise<void> {
config.gl = config.canvas?.getContext('webgl2', config.webGLattr) as WebGL2RenderingContext; config.gl = config.canvas?.getContext('webgl2', config.webGLattr) as WebGL2RenderingContext;
if (config.canvas) { if (config.canvas) {
config.canvas.addEventListener('webglcontextlost', async (e) => { config.canvas.addEventListener('webglcontextlost', async (e) => {
const err = config.gl?.getError(); log('error: humangl:', e.type);
log('error: humangl context lost:', err, e); // log('gpu memory usage:', instance.tf.engine().backendInstance.numBytesInGPU);
log('gpu memory usage:', instance.tf.engine().backendInstance.numBytesInGPU); log('possible browser memory leak using webgl');
instance.emit('error');
throw new Error('browser webgl error');
/*
log('resetting humangl backend'); log('resetting humangl backend');
env.initial = true; env.initial = true;
models.reset(instance); models.reset(instance);
await tf.removeBackend(config.name); await tf.removeBackend(config.name);
// await register(instance); // re-register await register(instance); // re-register
*/
}); });
config.canvas.addEventListener('webglcontextrestored', (e) => { config.canvas.addEventListener('webglcontextrestored', (e) => {
log('error: humangl context restored:', e); log('error: humangl context restored:', e);