fix backend conflict in webworker

pull/356/head
Vladimir Mandic 2021-10-04 17:03:36 -04:00
parent e0ef7c5b1e
commit 0e9195dca3
8 changed files with 16 additions and 22 deletions

View File

@ -4,11 +4,11 @@
"browser": true,
"commonjs": true,
"node": true,
"es2020": true
"es2021": true
},
"parser": "@typescript-eslint/parser",
"parserOptions": {
"ecmaVersion": 2020
"ecmaVersion": 2021
},
"plugins": [
"@typescript-eslint"

View File

@ -9,7 +9,10 @@
## Changelog
### **HEAD -> main** 2021/10/03 mandic00@live.com
### **HEAD -> main** 2021/10/04 mandic00@live.com
### **origin/main** 2021/10/03 mandic00@live.com
- added docker notes
- breaking change: new similarity and match methods

View File

@ -31,13 +31,6 @@ import jsonView from './helpers/jsonview.js';
let human;
let userConfig = {
face: { enabled: false },
object: { enabled: false },
gesture: { enabled: true },
hand: { enabled: false },
body: { enabled: true, modelPath: 'https://vladmandic.github.io/human-models/models/blazepose-lite.json' },
segmentation: { enabled: false },
/*
warmup: 'none',
backend: 'humangl',
@ -98,6 +91,7 @@ const ui = {
autoPlay: false, // start webcam & detection on load
// internal variables
exceptionHandler: false, // should capture all unhandled exceptions
busy: false, // internal camera busy flag
menuWidth: 0, // internal
menuHeight: 0, // internal
@ -115,7 +109,6 @@ const ui = {
lastFrame: 0, // time of last frame processing
viewportSet: false, // internal, has custom viewport been set
background: null, // holds instance of segmentation background image
exceptionHandler: false, // should capture all unhandled exceptions
// webrtc
useWebRTC: false, // use webrtc as camera source instead of local webcam

View File

@ -67,8 +67,8 @@
"@tensorflow/tfjs-node": "^3.9.0",
"@tensorflow/tfjs-node-gpu": "^3.9.0",
"@types/node": "^16.10.2",
"@typescript-eslint/eslint-plugin": "^4.32.0",
"@typescript-eslint/parser": "^4.32.0",
"@typescript-eslint/eslint-plugin": "^4.33.0",
"@typescript-eslint/parser": "^4.33.0",
"@vladmandic/build": "^0.5.3",
"@vladmandic/pilogger": "^0.3.3",
"canvas": "^2.8.0",
@ -84,7 +84,7 @@
"rimraf": "^3.0.2",
"seedrandom": "^3.0.5",
"tslib": "^2.3.1",
"typedoc": "0.22.4",
"typedoc": "0.22.5",
"typescript": "4.4.3"
}
}

View File

@ -2,7 +2,7 @@
* BlazePose model implementation
*/
import * as tf from '@tensorflow/tfjs';
import * as tf from '../../dist/tfjs.esm.js';
import { log, join } from '../util/util';
import type { BodyKeypoint, BodyResult, Box, Point } from '../result';
import type { GraphModel, Tensor } from '../tfjs/types';
@ -87,8 +87,8 @@ async function prepareImage(input: Tensor): Promise<Tensor> {
[input.shape[1] > input.shape[2] ? Math.trunc((input.shape[1] - input.shape[2]) / 2) : 0, input.shape[1] > input.shape[2] ? Math.trunc((input.shape[1] - input.shape[2]) / 2) : 0], // width before&after
[0, 0], // dont touch rbg
];
t.pad = tf.pad(input as tf.Tensor4D, padding);
t.resize = tf.image.resizeBilinear(t.pad as tf.Tensor4D, [inputSize[1][0], inputSize[1][1]]);
t.pad = tf.pad(input, padding);
t.resize = tf.image.resizeBilinear(t.pad, [inputSize[1][0], inputSize[1][1]]);
const final = tf.div(t.resize, 255);
Object.keys(t).forEach((tensor) => tf.dispose(t[tensor]));
return final;

View File

@ -411,7 +411,7 @@ const config: Config = {
// only valid for posenet and movenet-multipose as other models detects single pose
// set to -1 to autodetect based on number of detected faces
minConfidence: 0.2, // threshold for discarding a prediction
skipFrames: 5, // how many max frames to go without re-running the detector
skipFrames: 1, // how many max frames to go without re-running the detector
// only used when cacheSensitivity is not zero
},

View File

@ -183,8 +183,6 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
tempCanvas.height = targetHeight;
const tempCtx = tempCanvas.getContext('2d');
tempCtx?.drawImage(outCanvas, 0, 0);
console.log('PIXELS', tempCanvas);
pixels = (tf.browser && env.browser) ? tf.browser.fromPixels(tempCanvas) : null;
try {
pixels = (tf.browser && env.browser) ? tf.browser.fromPixels(tempCanvas) : null;
} catch (err) {

View File

@ -65,9 +65,9 @@ export async function register(instance): Promise<void> {
config.canvas.addEventListener('webglcontextlost', async (e) => {
log('error: humangl:', e.type);
// log('gpu memory usage:', instance.tf.engine().backendInstance.numBytesInGPU);
log('possible browser memory leak using webgl');
log('possible browser memory leak using webgl or conflict with multiple backend registrations');
instance.emit('error');
// throw new Error('browser webgl error');
throw new Error('browser webgl error');
/*
log('resetting humangl backend');
env.initial = true;