mirror of https://github.com/vladmandic/human
exception handling
parent
e1f285f314
commit
0e20bfe665
|
@ -1,6 +1,6 @@
|
|||
# @vladmandic/human
|
||||
|
||||
Version: **1.4.0**
|
||||
Version: **1.4.1**
|
||||
Description: **Human: AI-powered 3D Face Detection, Face Description & Recognition, Body Pose Tracking, Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction & Gesture Recognition**
|
||||
|
||||
Author: **Vladimir Mandic <mandic00@live.com>**
|
||||
|
@ -9,8 +9,9 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
|
|||
|
||||
## Changelog
|
||||
|
||||
### **HEAD -> main** 2021/04/08 mandic00@live.com
|
||||
### **1.4.1** 2021/04/09 mandic00@live.com
|
||||
|
||||
- add modelbasepath option
|
||||
|
||||
### **1.3.5** 2021/04/06 mandic00@live.com
|
||||
|
||||
|
|
|
@ -30,9 +30,10 @@ Check out [**Live Demo**](https://vladmandic.github.io/human/demo/index.html) fo
|
|||
|
||||
## Demos
|
||||
|
||||
- [**Demo Application**](https://vladmandic.github.io/human/demo/index.html)
|
||||
- [**Main Application**](https://vladmandic.github.io/human/demo/index.html)
|
||||
- [**Face Extraction, Description, Identification and Matching**](https://vladmandic.github.io/human/demo/facematch.html)
|
||||
- [**Face Extraction and 3D Rendering**](https://vladmandic.github.io/human/demo/face3d.html)
|
||||
- [**Details on Demo Applications**](https://github.com/vladmandic/human/wiki/Demos)
|
||||
|
||||
## Project pages
|
||||
|
||||
|
@ -47,7 +48,6 @@ Check out [**Live Demo**](https://vladmandic.github.io/human/demo/index.html) fo
|
|||
## Wiki pages
|
||||
|
||||
- [**Home**](https://github.com/vladmandic/human/wiki)
|
||||
- [**Demos**](https://github.com/vladmandic/human/wiki/Demos)
|
||||
- [**Installation**](https://github.com/vladmandic/human/wiki/Install)
|
||||
- [**Usage & Functions**](https://github.com/vladmandic/human/wiki/Usage)
|
||||
- [**Configuration Details**](https://github.com/vladmandic/human/wiki/Configuration)
|
||||
|
@ -185,7 +185,7 @@ For more info, see [**Configuration Details**](https://github.com/vladmandic/hum
|
|||
|
||||
<br><hr><br>
|
||||
|
||||
`Human` library is written in `TypeScript` [4.3](https://www.typescriptlang.org/docs/handbook/intro.html)
|
||||
`Human` library is written in `TypeScript` [4.2](https://www.typescriptlang.org/docs/handbook/intro.html)
|
||||
Conforming to `JavaScript` [ECMAScript version 2020](https://www.ecma-international.org/ecma-262/11.0/index.html) standard
|
||||
Build target is `JavaScript` **EMCAScript version 2018**
|
||||
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
// @ts-nocheck
|
||||
|
||||
let instance = 0;
|
||||
let CSScreated = false;
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@ import Menu from './helpers/menu.js';
|
|||
import GLBench from './helpers/gl-bench.js';
|
||||
|
||||
const userConfig = { backend: 'webgl' }; // add any user configuration overrides
|
||||
let human;
|
||||
|
||||
/*
|
||||
const userConfig = {
|
||||
|
@ -42,6 +43,7 @@ const ui = {
|
|||
console: true, // log messages to browser console
|
||||
maxFPSframes: 10, // keep fps history for how many frames
|
||||
modelsPreload: true, // preload human models on startup
|
||||
modelsWarmup: true, // warmup human models on startup
|
||||
busy: false, // internal camera busy flag
|
||||
menuWidth: 0, // internal
|
||||
menuHeight: 0, // internal
|
||||
|
@ -89,12 +91,6 @@ function status(msg) {
|
|||
if (div) div.innerText = msg;
|
||||
}
|
||||
|
||||
const human = new Human(userConfig);
|
||||
if (typeof tf !== 'undefined') {
|
||||
log('TensorFlow external version:', tf.version);
|
||||
human.tf = tf; // use externally loaded version of tfjs
|
||||
}
|
||||
|
||||
const compare = { enabled: false, original: null };
|
||||
async function calcSimmilariry(result) {
|
||||
document.getElementById('compare-container').style.display = compare.enabled ? 'block' : 'none';
|
||||
|
@ -564,20 +560,57 @@ async function drawWarmup(res) {
|
|||
|
||||
async function main() {
|
||||
log('demo starting ...');
|
||||
|
||||
// parse url search params
|
||||
const params = new URLSearchParams(location.search);
|
||||
log('url options:', params.toString());
|
||||
if (params.has('worker')) {
|
||||
ui.useWorker = JSON.parse(params.get('worker'));
|
||||
log('overriding worker:', ui.useWorker);
|
||||
}
|
||||
if (params.has('backend')) {
|
||||
userConfig.backend = JSON.parse(params.get('backend'));
|
||||
log('overriding backend:', userConfig.backend);
|
||||
}
|
||||
if (params.has('preload')) {
|
||||
ui.modelsPreload = JSON.parse(params.get('preload'));
|
||||
log('overriding preload:', ui.modelsPreload);
|
||||
}
|
||||
if (params.has('warmup')) {
|
||||
ui.modelsWarmup = JSON.parse(params.get('warmup'));
|
||||
log('overriding warmup:', ui.modelsWarmup);
|
||||
}
|
||||
|
||||
// create instance of human
|
||||
human = new Human(userConfig);
|
||||
if (typeof tf !== 'undefined') {
|
||||
log('TensorFlow external version:', tf.version);
|
||||
human.tf = tf; // use externally loaded version of tfjs
|
||||
}
|
||||
|
||||
// setup main menu
|
||||
setupMenu();
|
||||
document.getElementById('log').innerText = `Human: version ${human.version}`;
|
||||
|
||||
// preload models
|
||||
if (ui.modelsPreload && !ui.useWorker) {
|
||||
status('loading');
|
||||
await human.load(userConfig); // this is not required, just pre-loads all models
|
||||
const loaded = Object.keys(human.models).filter((a) => human.models[a]);
|
||||
log('demo loaded models:', loaded);
|
||||
}
|
||||
if (!ui.useWorker) {
|
||||
|
||||
// warmup models
|
||||
if (ui.modelsWarmup && !ui.useWorker) {
|
||||
status('initializing');
|
||||
const res = await human.warmup(userConfig); // this is not required, just pre-warms all models for faster initial inference
|
||||
if (res && res.canvas && ui.drawWarmup) await drawWarmup(res);
|
||||
}
|
||||
|
||||
// setup camera
|
||||
await setupCamera();
|
||||
|
||||
// ready
|
||||
status('human: ready');
|
||||
document.getElementById('loader').style.display = 'none';
|
||||
document.getElementById('play').style.display = 'block';
|
||||
|
|
|
@ -64,8 +64,7 @@ export const hand = (res) => {
|
|||
for (let i = 0; i < res.length; i++) {
|
||||
const fingers: Array<{ name: string, position: number }> = [];
|
||||
for (const [finger, pos] of Object.entries(res[i]['annotations'])) {
|
||||
// @ts-ignore
|
||||
if (finger !== 'palmBase') fingers.push({ name: finger.toLowerCase(), position: pos[0] }); // get tip of each finger
|
||||
if (finger !== 'palmBase' && Array.isArray(pos)) fingers.push({ name: finger.toLowerCase(), position: pos[0] }); // get tip of each finger
|
||||
}
|
||||
if (fingers && fingers.length > 0) {
|
||||
const closest = fingers.reduce((best, a) => (best.position[2] < a.position[2] ? best : a));
|
||||
|
|
15
src/human.ts
15
src/human.ts
|
@ -124,7 +124,7 @@ export class Human {
|
|||
faceres: null,
|
||||
};
|
||||
// export access to image processing
|
||||
// @ts-ignore
|
||||
// @ts-ignore // typescript cannot infer type
|
||||
this.image = (input: Input) => image.process(input, this.config);
|
||||
// export raw access to underlying models
|
||||
this.classes = {
|
||||
|
@ -214,9 +214,9 @@ export class Human {
|
|||
this.models.gender,
|
||||
this.models.emotion,
|
||||
this.models.embedding,
|
||||
// @ts-ignore
|
||||
// @ts-ignore // typescript cannot infer type
|
||||
this.models.handpose,
|
||||
// @ts-ignore false warning with latest @typescript-eslint
|
||||
// @ts-ignore // typescript cannot infer type
|
||||
this.models.posenet,
|
||||
this.models.blazepose,
|
||||
this.models.efficientpose,
|
||||
|
@ -422,15 +422,14 @@ export class Human {
|
|||
if (this.config.async) {
|
||||
[faceRes, bodyRes, handRes, objectRes] = await Promise.all([faceRes, bodyRes, handRes, objectRes]);
|
||||
}
|
||||
process.tensor.dispose();
|
||||
tf.dispose(process.tensor);
|
||||
|
||||
if (this.config.scoped) this.tf.engine().endScope();
|
||||
this.analyze('End Scope:');
|
||||
|
||||
let gestureRes = [];
|
||||
let gestureRes: any[] = [];
|
||||
if (this.config.gesture.enabled) {
|
||||
timeStamp = now();
|
||||
// @ts-ignore
|
||||
gestureRes = [...gesture.face(faceRes), ...gesture.body(bodyRes), ...gesture.hand(handRes), ...gesture.iris(faceRes)];
|
||||
if (!this.config.async) this.perf.gesture = Math.trunc(now() - timeStamp);
|
||||
else if (this.perf.gesture) delete this.perf.gesture;
|
||||
|
@ -507,8 +506,8 @@ export class Human {
|
|||
#warmupNode = async () => {
|
||||
const atob = (str) => Buffer.from(str, 'base64');
|
||||
const img = this.config.warmup === 'face' ? atob(sample.face) : atob(sample.body);
|
||||
// @ts-ignore
|
||||
const data = tf.node.decodeJpeg(img); // tf.node is only defined when compiling for nodejs
|
||||
// @ts-ignore // tf.node is only defined when compiling for nodejs
|
||||
const data = tf.node?.decodeJpeg(img);
|
||||
const expanded = data.expandDims(0);
|
||||
this.tf.dispose(data);
|
||||
// log('Input:', expanded);
|
||||
|
|
|
@ -1,19 +1,17 @@
|
|||
// @ts-nocheck
|
||||
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import * as fxImage from './imagefx';
|
||||
|
||||
const maxSize = 2048;
|
||||
// internal temp canvases
|
||||
let inCanvas = null;
|
||||
let outCanvas = null;
|
||||
let inCanvas;
|
||||
let outCanvas;
|
||||
// instance of fximage
|
||||
let fx = null;
|
||||
let fx;
|
||||
|
||||
// process input image and return tensor
|
||||
// input can be tensor, imagedata, htmlimageelement, htmlvideoelement
|
||||
// input is resized and run through imagefx filter
|
||||
export function process(input, config): { tensor: tf.Tensor, canvas: OffscreenCanvas | HTMLCanvasElement } {
|
||||
export function process(input, config): { tensor: typeof tf.Tensor | null, canvas: OffscreenCanvas | HTMLCanvasElement } {
|
||||
let tensor;
|
||||
if (!input) throw new Error('Human: Input is missing');
|
||||
if (
|
||||
|
@ -32,8 +30,8 @@ export function process(input, config): { tensor: tf.Tensor, canvas: OffscreenCa
|
|||
if (input instanceof tf.Tensor) {
|
||||
tensor = tf.clone(input);
|
||||
} else {
|
||||
const originalWidth = input.naturalWidth || input.videoWidth || input.width || (input.shape && (input.shape[1] > 0));
|
||||
const originalHeight = input.naturalHeight || input.videoHeight || input.height || (input.shape && (input.shape[2] > 0));
|
||||
const originalWidth = input['naturalWidth'] || input['videoWidth'] || input['width'] || (input['shape'] && (input['shape'][1] > 0));
|
||||
const originalHeight = input['naturalHeight'] || input['videoHeight'] || input['height'] || (input['shape'] && (input['shape'][2] > 0));
|
||||
let targetWidth = originalWidth;
|
||||
let targetHeight = originalHeight;
|
||||
if (targetWidth > maxSize) {
|
||||
|
@ -49,19 +47,19 @@ export function process(input, config): { tensor: tf.Tensor, canvas: OffscreenCa
|
|||
if (config.filter.height > 0) targetHeight = config.filter.height;
|
||||
else if (config.filter.width > 0) targetHeight = originalHeight * (config.filter.width / originalWidth);
|
||||
if (!targetWidth || !targetHeight) throw new Error('Human: Input cannot determine dimension');
|
||||
if (!inCanvas || (inCanvas.width !== targetWidth) || (inCanvas.height !== targetHeight)) {
|
||||
if (!inCanvas || (inCanvas?.width !== targetWidth) || (inCanvas?.height !== targetHeight)) {
|
||||
inCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(targetWidth, targetHeight) : document.createElement('canvas');
|
||||
if (inCanvas.width !== targetWidth) inCanvas.width = targetWidth;
|
||||
if (inCanvas.height !== targetHeight) inCanvas.height = targetHeight;
|
||||
if (inCanvas?.width !== targetWidth) inCanvas.width = targetWidth;
|
||||
if (inCanvas?.height !== targetHeight) inCanvas.height = targetHeight;
|
||||
}
|
||||
const ctx = inCanvas.getContext('2d');
|
||||
if (input instanceof ImageData) ctx.putImageData(input, 0, 0);
|
||||
else ctx.drawImage(input, 0, 0, originalWidth, originalHeight, 0, 0, inCanvas.width, inCanvas.height);
|
||||
else ctx.drawImage(input, 0, 0, originalWidth, originalHeight, 0, 0, inCanvas?.width, inCanvas?.height);
|
||||
if (config.filter.enabled) {
|
||||
if (!fx || !outCanvas || (inCanvas.width !== outCanvas.width) || (inCanvas.height !== outCanvas.height)) {
|
||||
outCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(inCanvas.width, inCanvas.height) : document.createElement('canvas');
|
||||
if (outCanvas.width !== inCanvas.width) outCanvas.width = inCanvas.width;
|
||||
if (outCanvas.height !== inCanvas.height) outCanvas.height = inCanvas.height;
|
||||
if (!fx || !outCanvas || (inCanvas.width !== outCanvas.width) || (inCanvas?.height !== outCanvas?.height)) {
|
||||
outCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(inCanvas?.width, inCanvas?.height) : document.createElement('canvas');
|
||||
if (outCanvas?.width !== inCanvas?.width) outCanvas.width = inCanvas?.width;
|
||||
if (outCanvas?.height !== inCanvas?.height) outCanvas.height = inCanvas?.height;
|
||||
// log('created FX filter');
|
||||
fx = tf.ENV.flags.IS_BROWSER ? new fxImage.GLImageFilter({ canvas: outCanvas }) : null; // && (typeof document !== 'undefined')
|
||||
}
|
||||
|
|
|
@ -17,10 +17,7 @@ function GLProgram(gl, vertexSource, fragmentSource) {
|
|||
const shader = gl.createShader(type);
|
||||
gl.shaderSource(shader, source);
|
||||
gl.compileShader(shader);
|
||||
if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) {
|
||||
// @ts-ignore
|
||||
throw new Error('Filter: GL compile failed', gl.getShaderInfoLog(shader));
|
||||
}
|
||||
if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) throw new Error('Filter: GL compile failed', gl.getShaderInfoLog(shader));
|
||||
return shader;
|
||||
};
|
||||
|
||||
|
@ -33,10 +30,7 @@ function GLProgram(gl, vertexSource, fragmentSource) {
|
|||
gl.attachShader(this.id, _fsh);
|
||||
gl.linkProgram(this.id);
|
||||
|
||||
if (!gl.getProgramParameter(this.id, gl.LINK_STATUS)) {
|
||||
// @ts-ignore
|
||||
throw new Error('Filter: GL link failed', gl.getProgramInfoLog(this.id));
|
||||
}
|
||||
if (!gl.getProgramParameter(this.id, gl.LINK_STATUS)) throw new Error('Filter: GL link failed', gl.getProgramInfoLog(this.id));
|
||||
|
||||
gl.useProgram(this.id);
|
||||
// Collect attributes
|
||||
|
@ -123,7 +117,6 @@ export function GLImageFilter(params) {
|
|||
};
|
||||
|
||||
const _getTempFramebuffer = function (index) {
|
||||
// @ts-ignore
|
||||
_tempFramebuffers[index] = _tempFramebuffers[index] || _createFramebufferTexture(_width, _height);
|
||||
return _tempFramebuffers[index];
|
||||
};
|
||||
|
@ -138,7 +131,6 @@ export function GLImageFilter(params) {
|
|||
source = _sourceTexture;
|
||||
} else {
|
||||
// All following draw calls use the temp buffer last drawn to
|
||||
// @ts-ignore
|
||||
source = _getTempFramebuffer(_currentFramebufferIndex)?.texture;
|
||||
}
|
||||
_drawCount++;
|
||||
|
@ -151,7 +143,6 @@ export function GLImageFilter(params) {
|
|||
} else {
|
||||
// Intermediate draw call - get a temp buffer to draw to
|
||||
_currentFramebufferIndex = (_currentFramebufferIndex + 1) % 2;
|
||||
// @ts-ignore
|
||||
target = _getTempFramebuffer(_currentFramebufferIndex)?.fbo;
|
||||
}
|
||||
// Bind the source and target and draw the two triangles
|
||||
|
|
|
@ -12,8 +12,9 @@ const scaleBox = 2.5; // increase box size
|
|||
export async function load(config) {
|
||||
if (!model) {
|
||||
model = await tf.loadGraphModel(join(config.modelBasePath, config.object.modelPath));
|
||||
// @ts-ignore
|
||||
model.inputSize = parseInt(Object.values(model.modelSignature['inputs'])[0].tensorShape.dim[2].size);
|
||||
const inputs = Object.values(model.modelSignature['inputs']);
|
||||
model.inputSize = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : null;
|
||||
if (!model.inputSize) throw new Error(`Human: Cannot determine model inputSize: ${config.object.modelPath}`);
|
||||
if (!model || !model.modelUrl) log('load model failed:', config.object.modelPath);
|
||||
else if (config.debug) log('load model:', model.modelUrl);
|
||||
}
|
||||
|
|
|
@ -4,8 +4,8 @@ import * as tf from '../../dist/tfjs.esm.js';
|
|||
export const config = {
|
||||
name: 'humangl',
|
||||
priority: 99,
|
||||
canvas: null,
|
||||
gl: null,
|
||||
canvas: <null | OffscreenCanvas | HTMLCanvasElement>null,
|
||||
gl: <any>null,
|
||||
width: 1024,
|
||||
height: 1024,
|
||||
webGLattr: { // https://www.khronos.org/registry/webgl/specs/latest/1.0/#5.2
|
||||
|
@ -24,14 +24,12 @@ export function register(): void {
|
|||
if (!tf.findBackend(config.name)) {
|
||||
log('backend registration:', config.name);
|
||||
try {
|
||||
// @ts-ignore
|
||||
config.canvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(config.width, config.height) : document.createElement('canvas');
|
||||
} catch (err) {
|
||||
log('error: cannot create canvas:', err);
|
||||
return;
|
||||
}
|
||||
try {
|
||||
// @ts-ignore
|
||||
config.gl = config.canvas.getContext('webgl2', config.webGLattr);
|
||||
} catch (err) {
|
||||
log('error: cannot get WebGL2 context:', err);
|
||||
|
@ -62,7 +60,6 @@ export function register(): void {
|
|||
}
|
||||
try {
|
||||
tf.ENV.set('WEBGL_VERSION', 2);
|
||||
// @ts-ignore
|
||||
// tf.ENV.set('WEBGL_MAX_TEXTURE_SIZE', config.gl.getParameter(config.gl.MAX_TEXTURE_SIZE));
|
||||
// tf.ENV.set('WEBGL_FORCE_F16_TEXTURES', true);
|
||||
// tf.ENV.set('WEBGL_PACK_DEPTHWISECONV', true);
|
||||
|
|
2
wiki
2
wiki
|
@ -1 +1 @@
|
|||
Subproject commit d09d36cf9de309728504402bf301ac3ab018de65
|
||||
Subproject commit 652dee1013ae99789199cc229d6652b3323ae7de
|
Loading…
Reference in New Issue