mirror of https://github.com/vladmandic/human
webgl exception handling
parent
5b69a70a62
commit
64c6195342
|
@ -34,6 +34,7 @@ let userConfig = {
|
|||
warmup: 'none',
|
||||
backend: 'humangl',
|
||||
debug: true,
|
||||
filter: { enabled: false },
|
||||
/*
|
||||
wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.9.0/dist/',
|
||||
async: false,
|
||||
|
@ -80,7 +81,7 @@ const ui = {
|
|||
useWorker: true, // use web workers for processing
|
||||
worker: 'index-worker.js',
|
||||
maxFPSframes: 10, // keep fps history for how many frames
|
||||
modelsPreload: true, // preload human models on startup
|
||||
modelsPreload: false, // preload human models on startup
|
||||
modelsWarmup: false, // warmup human models on startup
|
||||
buffered: true, // should output be buffered between frames
|
||||
interpolated: true, // should output be interpolated for smoothness between frames
|
||||
|
@ -180,7 +181,7 @@ function status(msg) {
|
|||
async function videoPlay() {
|
||||
document.getElementById('btnStartText').innerHTML = 'pause video';
|
||||
await document.getElementById('video').play();
|
||||
status();
|
||||
// status();
|
||||
}
|
||||
|
||||
async function videoPause() {
|
||||
|
@ -337,7 +338,7 @@ async function setupCamera() {
|
|||
} catch (err) {
|
||||
log(err);
|
||||
} finally {
|
||||
status();
|
||||
// status();
|
||||
}
|
||||
return '';
|
||||
}
|
||||
|
@ -394,28 +395,22 @@ async function setupCamera() {
|
|||
if (initialCameraAccess) log('selected video source:', track, settings); // log('selected camera:', track.label, 'id:', settings.deviceId);
|
||||
ui.camera = { name: track.label.toLowerCase(), width: video.videoWidth, height: video.videoHeight, facing: settings.facingMode === 'user' ? 'front' : 'back' };
|
||||
initialCameraAccess = false;
|
||||
const promise = !stream || new Promise((resolve) => {
|
||||
video.onloadeddata = () => {
|
||||
if (settings.width > settings.height) canvas.style.width = '100vw';
|
||||
else canvas.style.height = '100vh';
|
||||
canvas.width = video.videoWidth;
|
||||
canvas.height = video.videoHeight;
|
||||
ui.menuWidth.input.setAttribute('value', video.videoWidth);
|
||||
ui.menuHeight.input.setAttribute('value', video.videoHeight);
|
||||
if (live || ui.autoPlay) videoPlay();
|
||||
// eslint-disable-next-line no-use-before-define
|
||||
if ((live || ui.autoPlay) && !ui.detectThread) runHumanDetect(video, canvas);
|
||||
ui.busy = false;
|
||||
resolve();
|
||||
};
|
||||
});
|
||||
// attach input to video element
|
||||
if (stream) {
|
||||
video.srcObject = stream;
|
||||
return promise;
|
||||
}
|
||||
ui.busy = false;
|
||||
return 'camera stream empty';
|
||||
|
||||
if (!stream) return 'camera stream empty';
|
||||
|
||||
const ready = new Promise((resolve) => (video.onloadeddata = () => resolve(true)));
|
||||
video.srcObject = stream;
|
||||
await ready;
|
||||
if (settings.width > settings.height) canvas.style.width = '100vw';
|
||||
else canvas.style.height = '100vh';
|
||||
canvas.width = video.videoWidth;
|
||||
canvas.height = video.videoHeight;
|
||||
ui.menuWidth.input.setAttribute('value', video.videoWidth);
|
||||
ui.menuHeight.input.setAttribute('value', video.videoHeight);
|
||||
if (live || ui.autoPlay) await videoPlay();
|
||||
// eslint-disable-next-line no-use-before-define
|
||||
if ((live || ui.autoPlay) && !ui.detectThread) runHumanDetect(video, canvas);
|
||||
return 'camera stream ready';
|
||||
}
|
||||
|
||||
function initPerfMonitor() {
|
||||
|
@ -500,9 +495,8 @@ function runHumanDetect(input, canvas, timestamp) {
|
|||
// perform detection in worker
|
||||
webWorker(input, data, canvas, timestamp);
|
||||
} else {
|
||||
if (human.env.initial) status('starting detection');
|
||||
else status();
|
||||
human.detect(input, userConfig).then((result) => {
|
||||
status();
|
||||
/*
|
||||
setTimeout(async () => { // simulate gl context lost 2sec after initial detection
|
||||
const ext = human.gl && human.gl.gl ? human.gl.gl.getExtension('WEBGL_lose_context') : {};
|
||||
|
@ -926,15 +920,16 @@ async function pwaRegister() {
|
|||
}
|
||||
|
||||
async function main() {
|
||||
/*
|
||||
window.addEventListener('unhandledrejection', (evt) => {
|
||||
if (ui.detectThread) cancelAnimationFrame(ui.detectThread);
|
||||
if (ui.drawThread) cancelAnimationFrame(ui.drawThread);
|
||||
const msg = evt.reason.message || evt.reason || evt;
|
||||
// eslint-disable-next-line no-console
|
||||
console.error(evt.reason || evt);
|
||||
document.getElementById('log').innerHTML = evt.reason.message || evt.reason || evt;
|
||||
status('exception error');
|
||||
console.error(msg);
|
||||
document.getElementById('log').innerHTML = msg;
|
||||
status(`exception: ${msg}`);
|
||||
evt.preventDefault();
|
||||
});
|
||||
*/
|
||||
|
||||
log('demo starting ...');
|
||||
|
||||
|
@ -945,7 +940,7 @@ async function main() {
|
|||
// sanity check for webworker compatibility
|
||||
if (typeof Worker === 'undefined' || typeof OffscreenCanvas === 'undefined') {
|
||||
ui.useWorker = false;
|
||||
log('workers are disabled due to missing browser functionality');
|
||||
log('webworker functionality is disabled due to missing browser functionality');
|
||||
}
|
||||
|
||||
// register PWA ServiceWorker
|
||||
|
@ -1010,6 +1005,8 @@ async function main() {
|
|||
await human.load(userConfig); // this is not required, just pre-loads all models
|
||||
const loaded = Object.keys(human.models).filter((a) => human.models[a]);
|
||||
log('demo loaded models:', loaded);
|
||||
} else {
|
||||
await human.init();
|
||||
}
|
||||
|
||||
// warmup models
|
||||
|
|
|
@ -68,7 +68,7 @@ export const options: DrawOptions = {
|
|||
|
||||
const getCanvasContext = (input) => {
|
||||
if (input && input.getContext) return input.getContext('2d');
|
||||
throw new Error('Human: Invalid Canvas');
|
||||
throw new Error('invalid canvas');
|
||||
};
|
||||
|
||||
const rad2deg = (theta) => Math.round((theta * 180) / Math.PI);
|
||||
|
|
|
@ -93,18 +93,19 @@ export async function backendInfo() {
|
|||
env.backends = Object.keys(tf.engine().registryFactory);
|
||||
env.wasm.supported = typeof WebAssembly !== 'undefined';
|
||||
env.wasm.backend = env.backends.includes('wasm');
|
||||
if (env.wasm.supported && env.wasm.backend) {
|
||||
if (env.wasm.supported && env.wasm.backend && tf.getBackend() === 'wasm') {
|
||||
env.wasm.simd = await tf.env().getAsync('WASM_HAS_SIMD_SUPPORT');
|
||||
env.wasm.multithread = await tf.env().getAsync('WASM_HAS_MULTITHREAD_SUPPORT');
|
||||
}
|
||||
|
||||
const c = image.canvas(100, 100);
|
||||
const ctx = c ? c.getContext('webgl2') : undefined;
|
||||
const ctx = c ? c.getContext('webgl2') : undefined; // causes too many gl contexts
|
||||
// const ctx = typeof tf.backend().getGPGPUContext !== undefined ? tf.backend().getGPGPUContext : null;
|
||||
env.webgl.supported = typeof ctx !== 'undefined';
|
||||
env.webgl.backend = env.backends.includes('webgl');
|
||||
if (env.webgl.supported && env.webgl.backend) {
|
||||
if (env.webgl.supported && env.webgl.backend && (tf.getBackend() === 'webgl' || tf.getBackend() === 'humangl')) {
|
||||
// @ts-ignore getGPGPUContext only exists on WebGL backend
|
||||
const gl = (tf.backend().gpgpu !== 'undefined') && (tf.backend().getGPGPUContext) ? await tf.backend().getGPGPUContext().gl : null;
|
||||
const gl = tf.backend().gpgpu !== 'undefined' ? await tf.backend().getGPGPUContext().gl : null;
|
||||
if (gl) {
|
||||
env.webgl.version = gl.getParameter(gl.VERSION);
|
||||
env.webgl.renderer = gl.getParameter(gl.RENDERER);
|
||||
|
|
|
@ -7,7 +7,7 @@ export function join(folder: string, file: string): string {
|
|||
const separator = folder.endsWith('/') ? '' : '/';
|
||||
const skipJoin = file.startsWith('.') || file.startsWith('/') || file.startsWith('http:') || file.startsWith('https:') || file.startsWith('file:');
|
||||
const path = skipJoin ? `${file}` : `${folder}${separator}${file}`;
|
||||
if (!path.toLocaleLowerCase().includes('.json')) throw new Error(`Human: ModelPath Error: ${path} Expecting JSON file`);
|
||||
if (!path.toLocaleLowerCase().includes('.json')) throw new Error(`modelpath error: ${path} expecting json file`);
|
||||
return path;
|
||||
}
|
||||
|
||||
|
|
64
src/human.ts
64
src/human.ts
|
@ -43,13 +43,14 @@ export { env } from './env';
|
|||
export type Input = Tensor | ImageData | ImageBitmap | HTMLImageElement | HTMLMediaElement | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas;
|
||||
|
||||
/** Events dispatched by `human.events`
|
||||
*
|
||||
* - `create`: triggered when Human object is instantiated
|
||||
* - `load`: triggered when models are loaded (explicitly or on-demand)
|
||||
* - `image`: triggered when input image is this.processed
|
||||
* - `result`: triggered when detection is complete
|
||||
* - `warmup`: triggered when warmup is complete
|
||||
*/
|
||||
export type Events = 'create' | 'load' | 'image' | 'result' | 'warmup';
|
||||
export type Events = 'create' | 'load' | 'image' | 'result' | 'warmup' | 'error';
|
||||
|
||||
/** Error message
|
||||
* @typedef Error Type
|
||||
|
@ -61,8 +62,7 @@ export type Error = { error: string };
|
|||
*/
|
||||
export type TensorFlow = typeof tf;
|
||||
|
||||
/**
|
||||
* **Human** library main class
|
||||
/** **Human** library main class
|
||||
*
|
||||
* All methods and properties are available only as members of Human class
|
||||
*
|
||||
|
@ -71,6 +71,7 @@ export type TensorFlow = typeof tf;
|
|||
* - Possible inputs: {@link Input}
|
||||
*
|
||||
* @param userConfig: {@link Config}
|
||||
* @return instance
|
||||
*/
|
||||
export class Human {
|
||||
/** Current version of Human library in *semver* format */
|
||||
|
@ -95,14 +96,13 @@ export class Human {
|
|||
/** currenty processed image tensor and canvas */
|
||||
process: { tensor: Tensor | null, canvas: OffscreenCanvas | HTMLCanvasElement | null };
|
||||
|
||||
/** @internal: Instance of TensorFlow/JS used by Human
|
||||
* - Can be embedded or externally provided
|
||||
/** Instance of TensorFlow/JS used by Human
|
||||
* - Can be embedded or externally provided
|
||||
* @internal
|
||||
*/
|
||||
tf: TensorFlow;
|
||||
|
||||
/**
|
||||
* Object containing environment information used for diagnostics
|
||||
*/
|
||||
/** Object containing environment information used for diagnostics */
|
||||
env: env.Env;
|
||||
|
||||
/** Draw helper classes that can draw detected objects on canvas using specified draw
|
||||
|
@ -113,10 +113,11 @@ export class Human {
|
|||
* - canvas: draw this.processed canvas which is a this.processed copy of the input
|
||||
* - all: meta-function that performs: canvas, face, body, hand
|
||||
*/
|
||||
// draw: typeof draw;
|
||||
draw: { canvas, face, body, hand, gesture, object, person, all, options: DrawOptions };
|
||||
|
||||
/** @internal: Currently loaded models */
|
||||
/** Currently loaded models
|
||||
* @internal
|
||||
*/
|
||||
models: {
|
||||
face: [unknown, GraphModel | null, GraphModel | null] | null,
|
||||
posenet: GraphModel | null,
|
||||
|
@ -142,6 +143,7 @@ export class Human {
|
|||
* - `image`: triggered when input image is this.processed
|
||||
* - `result`: triggered when detection is complete
|
||||
* - `warmup`: triggered when warmup is complete
|
||||
* - `error`: triggered on some errors
|
||||
*/
|
||||
events: EventTarget;
|
||||
/** Reference face triangualtion array of 468 points, used for triangle references between points */
|
||||
|
@ -157,9 +159,11 @@ export class Human {
|
|||
gl: Record<string, unknown>;
|
||||
// definition end
|
||||
|
||||
/**
|
||||
* Creates instance of Human library that is futher used for all operations
|
||||
/** Constructor for **Human** library that is futher used for all operations
|
||||
*
|
||||
* @param userConfig: {@link Config}
|
||||
*
|
||||
* @return instance
|
||||
*/
|
||||
constructor(userConfig?: Partial<Config>) {
|
||||
env.get();
|
||||
|
@ -252,7 +256,8 @@ export class Human {
|
|||
image = (input: Input) => image.process(input, this.config);
|
||||
|
||||
/** Simmilarity method calculates simmilarity between two provided face descriptors (face embeddings)
|
||||
* - Calculation is based on normalized Minkowski distance between
|
||||
* - Calculation is based on normalized Minkowski distance between two descriptors
|
||||
* - Default is Euclidean distance which is Minkowski distance of 2nd order
|
||||
*
|
||||
* @param embedding1: face descriptor as array of numbers
|
||||
* @param embedding2: face descriptor as array of numbers
|
||||
|
@ -263,10 +268,9 @@ export class Human {
|
|||
return faceres.similarity(embedding1, embedding2);
|
||||
}
|
||||
|
||||
/**
|
||||
* Segmentation method takes any input and returns this.processed canvas with body segmentation
|
||||
* Optional parameter background is used to fill the background with specific input
|
||||
* Segmentation is not triggered as part of detect this.process
|
||||
/** Segmentation method takes any input and returns this.processed canvas with body segmentation
|
||||
* - Optional parameter background is used to fill the background with specific input
|
||||
* - Segmentation is not triggered as part of detect this.process
|
||||
*
|
||||
* @param input: {@link Input}
|
||||
* @param background?: {@link Input}
|
||||
|
@ -276,7 +280,8 @@ export class Human {
|
|||
return input ? segmentation.process(input, background, this.config) : null;
|
||||
}
|
||||
|
||||
/** Enhance method performs additional enhacements to face image previously detected for futher this.processing
|
||||
/** Enhance method performs additional enhacements to face image previously detected for futher processing
|
||||
*
|
||||
* @param input: Tensor as provided in human.result.face[n].tensor
|
||||
* @returns Tensor
|
||||
*/
|
||||
|
@ -286,6 +291,7 @@ export class Human {
|
|||
}
|
||||
|
||||
/** Math method find best match between provided face descriptor and predefined database of known descriptors
|
||||
*
|
||||
* @param faceEmbedding: face descriptor previsouly calculated on any face
|
||||
* @param db: array of mapping of face descriptors to known values
|
||||
* @param threshold: minimum score for matching to be considered in the result
|
||||
|
@ -296,9 +302,22 @@ export class Human {
|
|||
return faceres.match(faceEmbedding, db, threshold);
|
||||
}
|
||||
|
||||
/** Explicit backend initialization
|
||||
* - Normally done implicitly during initial load phase
|
||||
* - Call to explictly register and initialize TFJS backend without any other operations
|
||||
* - Used in webworker environments where there can be multiple instances of Human and not all initialized
|
||||
*
|
||||
* @return Promise<void>
|
||||
*/
|
||||
init() {
|
||||
backend.check(this);
|
||||
}
|
||||
|
||||
/** Load method preloads all configured models on-demand
|
||||
* - Not explicitly required as any required model is load implicitly on it's first run
|
||||
*
|
||||
* @param userConfig?: {@link Config}
|
||||
* @return Promise<void>
|
||||
*/
|
||||
async load(userConfig?: Partial<Config>) {
|
||||
this.state = 'load';
|
||||
|
@ -309,7 +328,7 @@ export class Human {
|
|||
if (env.env.initial) { // print version info on first run and check for correct backend setup
|
||||
if (this.config.debug) log(`version: ${this.version}`);
|
||||
if (this.config.debug) log(`tfjs version: ${this.tf.version_core}`);
|
||||
await backend.check(this);
|
||||
if (!await backend.check(this)) log('error: backend check failed');
|
||||
await tf.ready();
|
||||
if (this.env.browser) {
|
||||
if (this.config.debug) log('configuration:', this.config);
|
||||
|
@ -335,8 +354,7 @@ export class Human {
|
|||
/** @hidden */
|
||||
emit = (event: string) => this.events?.dispatchEvent(new Event(event));
|
||||
|
||||
/**
|
||||
* Runs interpolation using last known result and returns smoothened result
|
||||
/** Runs interpolation using last known result and returns smoothened result
|
||||
* Interpolation is based on time since last known result so can be called independently
|
||||
*
|
||||
* @param result?: {@link Result} optional use specific result set to run interpolation on
|
||||
|
@ -536,7 +554,5 @@ export class Human {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Class Human is also available as default export
|
||||
*/
|
||||
/** Class Human as default export */
|
||||
export { Human as default };
|
||||
|
|
|
@ -32,7 +32,7 @@ export function canvas(width, height): HTMLCanvasElement | OffscreenCanvas {
|
|||
// @ts-ignore // env.canvas is an external monkey-patch
|
||||
c = (typeof env.Canvas !== 'undefined') ? new env.Canvas(width, height) : null;
|
||||
}
|
||||
// if (!c) throw new Error('Human: Cannot create canvas');
|
||||
// if (!c) throw new Error('cannot create canvas');
|
||||
return c;
|
||||
}
|
||||
|
||||
|
@ -41,7 +41,7 @@ export function canvas(width, height): HTMLCanvasElement | OffscreenCanvas {
|
|||
// input is resized and run through imagefx filter
|
||||
export function process(input: Input, config: Config): { tensor: Tensor | null, canvas: OffscreenCanvas | HTMLCanvasElement } {
|
||||
let tensor;
|
||||
if (!input) throw new Error('Human: Input is missing');
|
||||
if (!input) throw new Error('input is missing');
|
||||
// sanity checks since different browsers do not implement all dom elements
|
||||
if (
|
||||
!(input instanceof tf.Tensor)
|
||||
|
@ -55,12 +55,12 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
|
|||
&& !(typeof HTMLCanvasElement !== 'undefined' && input instanceof HTMLCanvasElement)
|
||||
&& !(typeof OffscreenCanvas !== 'undefined' && input instanceof OffscreenCanvas)
|
||||
) {
|
||||
throw new Error('Human: Input type is not recognized');
|
||||
throw new Error('input type is not recognized');
|
||||
}
|
||||
if (input instanceof tf.Tensor) {
|
||||
// if input is tensor, use as-is
|
||||
if ((input as unknown as Tensor).shape && (input as unknown as Tensor).shape.length === 4 && (input as unknown as Tensor).shape[0] === 1 && (input as unknown as Tensor).shape[3] === 3) tensor = tf.clone(input);
|
||||
else throw new Error(`Human: Input tensor shape must be [1, height, width, 3] and instead was ${(input as unknown as Tensor).shape}`);
|
||||
else throw new Error(`input tensor shape must be [1, height, width, 3] and instead was ${(input as unknown as Tensor).shape}`);
|
||||
} else {
|
||||
// check if resizing will be needed
|
||||
if (typeof input['readyState'] !== 'undefined' && input['readyState'] <= 2) {
|
||||
|
@ -89,7 +89,7 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
|
|||
else if ((config.filter.height || 0) > 0) targetWidth = originalWidth * ((config.filter.height || 0) / originalHeight);
|
||||
if ((config.filter.height || 0) > 0) targetHeight = config.filter.height;
|
||||
else if ((config.filter.width || 0) > 0) targetHeight = originalHeight * ((config.filter.width || 0) / originalWidth);
|
||||
if (!targetWidth || !targetHeight) throw new Error('Human: Input cannot determine dimension');
|
||||
if (!targetWidth || !targetHeight) throw new Error('input cannot determine dimension');
|
||||
if (!inCanvas || (inCanvas?.width !== targetWidth) || (inCanvas?.height !== targetHeight)) inCanvas = canvas(targetWidth, targetHeight);
|
||||
|
||||
// draw input to our canvas
|
||||
|
@ -176,7 +176,11 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
|
|||
tempCanvas.height = targetHeight;
|
||||
const tempCtx = tempCanvas.getContext('2d');
|
||||
tempCtx?.drawImage(outCanvas, 0, 0);
|
||||
pixels = (tf.browser && env.browser) ? tf.browser.fromPixels(tempCanvas) : null;
|
||||
try {
|
||||
pixels = (tf.browser && env.browser) ? tf.browser.fromPixels(tempCanvas) : null;
|
||||
} catch (err) {
|
||||
throw new Error('browser webgl error');
|
||||
}
|
||||
} else { // cpu and wasm kernel does not implement efficient fromPixels method
|
||||
// we cant use canvas as-is as it already has a context, so we do a silly one more canvas and do fromPixels on ImageData instead
|
||||
const tempCanvas = canvas(targetWidth, targetHeight);
|
||||
|
@ -206,7 +210,7 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
|
|||
tf.dispose(casted);
|
||||
} else {
|
||||
tensor = tf.zeros([1, targetWidth, targetHeight, 3]);
|
||||
throw new Error('Human: Cannot create tensor from input');
|
||||
throw new Error('cannot create tensor from input');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ function GLProgram(gl, vertexSource, fragmentSource) {
|
|||
const shader = gl.createShader(type);
|
||||
gl.shaderSource(shader, source);
|
||||
gl.compileShader(shader);
|
||||
if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) throw new Error('Filter: GL compile failed', gl.getShaderInfoLog(shader));
|
||||
if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) throw new Error('filter: gl compile failed', gl.getShaderInfoLog(shader));
|
||||
return shader;
|
||||
};
|
||||
|
||||
|
@ -30,7 +30,7 @@ function GLProgram(gl, vertexSource, fragmentSource) {
|
|||
gl.attachShader(this.id, _fsh);
|
||||
gl.linkProgram(this.id);
|
||||
|
||||
if (!gl.getProgramParameter(this.id, gl.LINK_STATUS)) throw new Error('Filter: GL link failed', gl.getProgramInfoLog(this.id));
|
||||
if (!gl.getProgramParameter(this.id, gl.LINK_STATUS)) throw new Error('filter: gl link failed', gl.getProgramInfoLog(this.id));
|
||||
|
||||
gl.useProgram(this.id);
|
||||
// Collect attributes
|
||||
|
@ -61,7 +61,7 @@ export function GLImageFilter(params) {
|
|||
const _shaderProgramCache = { };
|
||||
const DRAW = { INTERMEDIATE: 1 };
|
||||
const gl = _canvas.getContext('webgl');
|
||||
if (!gl) throw new Error('Filter: getContext() failed');
|
||||
if (!gl) throw new Error('filter: context failed');
|
||||
|
||||
this.addFilter = function (name) {
|
||||
// eslint-disable-next-line prefer-rest-params
|
||||
|
|
|
@ -21,7 +21,7 @@ export async function load(config: Config): Promise<GraphModel> {
|
|||
model = await tf.loadGraphModel(join(config.modelBasePath, config.object.modelPath || ''));
|
||||
const inputs = Object.values(model.modelSignature['inputs']);
|
||||
model.inputSize = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : null;
|
||||
if (!model.inputSize) throw new Error(`Human: Cannot determine model inputSize: ${config.object.modelPath}`);
|
||||
if (!model.inputSize) throw new Error(`cannot determine model inputSize: ${config.object.modelPath}`);
|
||||
if (!model || !model.modelUrl) log('load model failed:', config.object.modelPath);
|
||||
else if (config.debug) log('load model:', model.modelUrl);
|
||||
} else if (config.debug) log('cached model:', model.modelUrl);
|
||||
|
|
|
@ -53,25 +53,22 @@ export async function check(instance) {
|
|||
if (instance.config.backend === 'wasm') {
|
||||
if (instance.config.debug) log('wasm path:', instance.config.wasmPath);
|
||||
if (typeof tf?.setWasmPaths !== 'undefined') await tf.setWasmPaths(instance.config.wasmPath);
|
||||
else throw new Error('Human: WASM backend is not loaded');
|
||||
else throw new Error('wasm backend is not loaded');
|
||||
const simd = await tf.env().getAsync('WASM_HAS_SIMD_SUPPORT');
|
||||
const mt = await tf.env().getAsync('WASM_HAS_MULTITHREAD_SUPPORT');
|
||||
if (instance.config.debug) log(`wasm execution: ${simd ? 'SIMD' : 'no SIMD'} ${mt ? 'multithreaded' : 'singlethreaded'}`);
|
||||
if (instance.config.debug && !simd) log('warning: wasm simd support is not enabled');
|
||||
}
|
||||
|
||||
await tf.setBackend(instance.config.backend);
|
||||
|
||||
try {
|
||||
await tf.setBackend(instance.config.backend);
|
||||
await tf.ready();
|
||||
} catch (err) {
|
||||
log('error: cannot set backend:', instance.config.backend, err);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
tf.ENV.set('WEBGL_DELETE_TEXTURE_THRESHOLD', 0);
|
||||
|
||||
// handle webgl & humangl
|
||||
if (tf.getBackend() === 'humangl') {
|
||||
tf.ENV.set('CHECK_COMPUTATION_FOR_ERRORS', false);
|
||||
|
@ -97,4 +94,5 @@ export async function check(instance) {
|
|||
env.get(); // update env on backend init
|
||||
instance.env = env.env;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -4,10 +4,10 @@
|
|||
*/
|
||||
|
||||
import { log } from '../helpers';
|
||||
import { env } from '../env';
|
||||
import * as models from '../models';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import * as image from '../image/image';
|
||||
import * as models from '../models';
|
||||
// import { env } from '../env';
|
||||
|
||||
export const config = {
|
||||
name: 'humangl',
|
||||
|
@ -47,10 +47,12 @@ export async function register(instance): Promise<void> {
|
|||
// force backend reload if gl context is not valid
|
||||
if ((config.name in tf.engine().registry) && (!config.gl || !config.gl.getParameter(config.gl.VERSION))) {
|
||||
log('error: humangl backend invalid context');
|
||||
log('resetting humangl backend');
|
||||
models.reset(instance);
|
||||
/*
|
||||
log('resetting humangl backend');
|
||||
await tf.removeBackend(config.name);
|
||||
await register(instance); // re-register
|
||||
*/
|
||||
}
|
||||
if (!tf.findBackend(config.name)) {
|
||||
try {
|
||||
|
@ -63,14 +65,18 @@ export async function register(instance): Promise<void> {
|
|||
config.gl = config.canvas?.getContext('webgl2', config.webGLattr) as WebGL2RenderingContext;
|
||||
if (config.canvas) {
|
||||
config.canvas.addEventListener('webglcontextlost', async (e) => {
|
||||
const err = config.gl?.getError();
|
||||
log('error: humangl context lost:', err, e);
|
||||
log('gpu memory usage:', instance.tf.engine().backendInstance.numBytesInGPU);
|
||||
log('error: humangl:', e.type);
|
||||
// log('gpu memory usage:', instance.tf.engine().backendInstance.numBytesInGPU);
|
||||
log('possible browser memory leak using webgl');
|
||||
instance.emit('error');
|
||||
throw new Error('browser webgl error');
|
||||
/*
|
||||
log('resetting humangl backend');
|
||||
env.initial = true;
|
||||
models.reset(instance);
|
||||
await tf.removeBackend(config.name);
|
||||
// await register(instance); // re-register
|
||||
await register(instance); // re-register
|
||||
*/
|
||||
});
|
||||
config.canvas.addEventListener('webglcontextrestored', (e) => {
|
||||
log('error: humangl context restored:', e);
|
||||
|
|
Loading…
Reference in New Issue