diff --git a/.eslintrc.json b/.eslintrc.json index 77718bc2..b1558e10 100644 --- a/.eslintrc.json +++ b/.eslintrc.json @@ -4,11 +4,11 @@ "browser": true, "commonjs": true, "node": true, - "es2020": true + "es2021": true }, "parser": "@typescript-eslint/parser", "parserOptions": { - "ecmaVersion": 2020 + "ecmaVersion": 2021 }, "plugins": [ "@typescript-eslint" diff --git a/CHANGELOG.md b/CHANGELOG.md index 369751a8..f56d9e68 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,7 +9,10 @@ ## Changelog -### **HEAD -> main** 2021/10/03 mandic00@live.com +### **HEAD -> main** 2021/10/04 mandic00@live.com + + +### **origin/main** 2021/10/03 mandic00@live.com - added docker notes - breaking change: new similarity and match methods diff --git a/demo/index.js b/demo/index.js index 7586a74d..0f5790b5 100644 --- a/demo/index.js +++ b/demo/index.js @@ -31,13 +31,6 @@ import jsonView from './helpers/jsonview.js'; let human; let userConfig = { - face: { enabled: false }, - object: { enabled: false }, - gesture: { enabled: true }, - hand: { enabled: false }, - body: { enabled: true, modelPath: 'https://vladmandic.github.io/human-models/models/blazepose-lite.json' }, - segmentation: { enabled: false }, - /* warmup: 'none', backend: 'humangl', @@ -98,6 +91,7 @@ const ui = { autoPlay: false, // start webcam & detection on load // internal variables + exceptionHandler: false, // should capture all unhandled exceptions busy: false, // internal camera busy flag menuWidth: 0, // internal menuHeight: 0, // internal @@ -115,7 +109,6 @@ const ui = { lastFrame: 0, // time of last frame processing viewportSet: false, // internal, has custom viewport been set background: null, // holds instance of segmentation background image - exceptionHandler: false, // should capture all unhandled exceptions // webrtc useWebRTC: false, // use webrtc as camera source instead of local webcam diff --git a/package.json b/package.json index f93af31a..e0c5663f 100644 --- a/package.json +++ b/package.json @@ -67,8 +67,8 @@ "@tensorflow/tfjs-node": "^3.9.0", "@tensorflow/tfjs-node-gpu": "^3.9.0", "@types/node": "^16.10.2", - "@typescript-eslint/eslint-plugin": "^4.32.0", - "@typescript-eslint/parser": "^4.32.0", + "@typescript-eslint/eslint-plugin": "^4.33.0", + "@typescript-eslint/parser": "^4.33.0", "@vladmandic/build": "^0.5.3", "@vladmandic/pilogger": "^0.3.3", "canvas": "^2.8.0", @@ -84,7 +84,7 @@ "rimraf": "^3.0.2", "seedrandom": "^3.0.5", "tslib": "^2.3.1", - "typedoc": "0.22.4", + "typedoc": "0.22.5", "typescript": "4.4.3" } } diff --git a/src/body/blazepose.ts b/src/body/blazepose.ts index 5ffa0226..1d1a6a89 100644 --- a/src/body/blazepose.ts +++ b/src/body/blazepose.ts @@ -2,7 +2,7 @@ * BlazePose model implementation */ -import * as tf from '@tensorflow/tfjs'; +import * as tf from '../../dist/tfjs.esm.js'; import { log, join } from '../util/util'; import type { BodyKeypoint, BodyResult, Box, Point } from '../result'; import type { GraphModel, Tensor } from '../tfjs/types'; @@ -87,8 +87,8 @@ async function prepareImage(input: Tensor): Promise { [input.shape[1] > input.shape[2] ? Math.trunc((input.shape[1] - input.shape[2]) / 2) : 0, input.shape[1] > input.shape[2] ? Math.trunc((input.shape[1] - input.shape[2]) / 2) : 0], // width before&after [0, 0], // dont touch rbg ]; - t.pad = tf.pad(input as tf.Tensor4D, padding); - t.resize = tf.image.resizeBilinear(t.pad as tf.Tensor4D, [inputSize[1][0], inputSize[1][1]]); + t.pad = tf.pad(input, padding); + t.resize = tf.image.resizeBilinear(t.pad, [inputSize[1][0], inputSize[1][1]]); const final = tf.div(t.resize, 255); Object.keys(t).forEach((tensor) => tf.dispose(t[tensor])); return final; diff --git a/src/config.ts b/src/config.ts index 1f498bba..ae66be17 100644 --- a/src/config.ts +++ b/src/config.ts @@ -411,7 +411,7 @@ const config: Config = { // only valid for posenet and movenet-multipose as other models detects single pose // set to -1 to autodetect based on number of detected faces minConfidence: 0.2, // threshold for discarding a prediction - skipFrames: 5, // how many max frames to go without re-running the detector + skipFrames: 1, // how many max frames to go without re-running the detector // only used when cacheSensitivity is not zero }, diff --git a/src/image/image.ts b/src/image/image.ts index 356a1d88..8a58db0a 100644 --- a/src/image/image.ts +++ b/src/image/image.ts @@ -183,8 +183,6 @@ export function process(input: Input, config: Config): { tensor: Tensor | null, tempCanvas.height = targetHeight; const tempCtx = tempCanvas.getContext('2d'); tempCtx?.drawImage(outCanvas, 0, 0); - console.log('PIXELS', tempCanvas); - pixels = (tf.browser && env.browser) ? tf.browser.fromPixels(tempCanvas) : null; try { pixels = (tf.browser && env.browser) ? tf.browser.fromPixels(tempCanvas) : null; } catch (err) { diff --git a/src/tfjs/humangl.ts b/src/tfjs/humangl.ts index 7d5033f9..5e24db73 100644 --- a/src/tfjs/humangl.ts +++ b/src/tfjs/humangl.ts @@ -65,9 +65,9 @@ export async function register(instance): Promise { config.canvas.addEventListener('webglcontextlost', async (e) => { log('error: humangl:', e.type); // log('gpu memory usage:', instance.tf.engine().backendInstance.numBytesInGPU); - log('possible browser memory leak using webgl'); + log('possible browser memory leak using webgl or conflict with multiple backend registrations'); instance.emit('error'); - // throw new Error('browser webgl error'); + throw new Error('browser webgl error'); /* log('resetting humangl backend'); env.initial = true;