mirror of https://github.com/vladmandic/human
improve error handling
parent
8e0aa270f0
commit
798d842c4b
|
@ -9,11 +9,13 @@
|
|||
|
||||
## Changelog
|
||||
|
||||
### **HEAD -> main** 2021/11/13 mandic00@live.com
|
||||
### **2.5.2** 2021/11/14 mandic00@live.com
|
||||
|
||||
|
||||
### **origin/main** 2021/11/12 mandic00@live.com
|
||||
### **origin/main** 2021/11/13 mandic00@live.com
|
||||
|
||||
- fix gear and ssrnet modules
|
||||
- fix for face crop when mesh is disabled
|
||||
- implement optional face masking
|
||||
- add similarity score range normalization
|
||||
- add faceid demo
|
||||
|
|
2
TODO.md
2
TODO.md
|
@ -56,3 +56,5 @@ Other:
|
|||
- Improved `similarity` and `match` score range normalization
|
||||
- Documentation overhaul
|
||||
- Fixed optional `gear`, `ssrnet`, `mobilefacenet` modules
|
||||
- Improved error handling
|
||||
- Fix Firefox WebGPU compatibility issue
|
||||
|
|
|
@ -190,6 +190,9 @@ export interface GestureConfig {
|
|||
enabled: boolean,
|
||||
}
|
||||
|
||||
export type BackendType = ['cpu', 'wasm', 'webgl', 'humangl', 'tensorflow', 'webgpu'];
|
||||
export type WarmupType = ['' | 'none' | 'face' | 'full' | 'body'];
|
||||
|
||||
/**
|
||||
* Configuration interface definition for **Human** library
|
||||
*
|
||||
|
@ -231,7 +234,7 @@ export interface Config {
|
|||
*
|
||||
* default: `full`
|
||||
*/
|
||||
warmup: 'none' | 'face' | 'full' | 'body',
|
||||
warmup: '' | 'none' | 'face' | 'full' | 'body',
|
||||
// warmup: string;
|
||||
|
||||
/** Base model path (typically starting with file://, http:// or https://) for all models
|
||||
|
|
142
src/face/face.ts
142
src/face/face.ts
|
@ -21,7 +21,7 @@ import type { Tensor } from '../tfjs/types';
|
|||
import type { Human } from '../human';
|
||||
import { calculateFaceAngle } from './angles';
|
||||
|
||||
export const detectFace = async (parent: Human /* instance of human */, input: Tensor): Promise<FaceResult[]> => {
|
||||
export const detectFace = async (instance: Human /* instance of human */, input: Tensor): Promise<FaceResult[]> => {
|
||||
// run facemesh, includes blazeface and iris
|
||||
// eslint-disable-next-line no-async-promise-executor
|
||||
let timeStamp;
|
||||
|
@ -34,16 +34,16 @@ export const detectFace = async (parent: Human /* instance of human */, input: T
|
|||
let livenessRes;
|
||||
let descRes;
|
||||
const faceRes: Array<FaceResult> = [];
|
||||
parent.state = 'run:face';
|
||||
instance.state = 'run:face';
|
||||
timeStamp = now();
|
||||
|
||||
const faces = await facemesh.predict(input, parent.config);
|
||||
parent.performance.face = env.perfadd ? (parent.performance.face || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
||||
const faces = await facemesh.predict(input, instance.config);
|
||||
instance.performance.face = env.perfadd ? (instance.performance.face || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
||||
if (!input.shape || input.shape.length !== 4) return [];
|
||||
if (!faces) return [];
|
||||
// for (const face of faces) {
|
||||
for (let i = 0; i < faces.length; i++) {
|
||||
parent.analyze('Get Face');
|
||||
instance.analyze('Get Face');
|
||||
|
||||
// is something went wrong, skip the face
|
||||
// @ts-ignore possibly undefied
|
||||
|
@ -53,7 +53,7 @@ export const detectFace = async (parent: Human /* instance of human */, input: T
|
|||
}
|
||||
|
||||
// optional face mask
|
||||
if (parent.config.face.detector?.mask) {
|
||||
if (instance.config.face.detector?.mask) {
|
||||
const masked = await mask.mask(faces[i]);
|
||||
tf.dispose(faces[i].tensor);
|
||||
faces[i].tensor = masked as Tensor;
|
||||
|
@ -63,106 +63,106 @@ export const detectFace = async (parent: Human /* instance of human */, input: T
|
|||
const rotation = faces[i].mesh && (faces[i].mesh.length > 200) ? calculateFaceAngle(faces[i], [input.shape[2], input.shape[1]]) : null;
|
||||
|
||||
// run emotion, inherits face from blazeface
|
||||
parent.analyze('Start Emotion:');
|
||||
if (parent.config.async) {
|
||||
emotionRes = parent.config.face.emotion?.enabled ? emotion.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
|
||||
instance.analyze('Start Emotion:');
|
||||
if (instance.config.async) {
|
||||
emotionRes = instance.config.face.emotion?.enabled ? emotion.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : null;
|
||||
} else {
|
||||
parent.state = 'run:emotion';
|
||||
instance.state = 'run:emotion';
|
||||
timeStamp = now();
|
||||
emotionRes = parent.config.face.emotion?.enabled ? await emotion.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
|
||||
parent.performance.emotion = env.perfadd ? (parent.performance.emotion || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
||||
emotionRes = instance.config.face.emotion?.enabled ? await emotion.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : null;
|
||||
instance.performance.emotion = env.perfadd ? (instance.performance.emotion || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
||||
}
|
||||
parent.analyze('End Emotion:');
|
||||
instance.analyze('End Emotion:');
|
||||
|
||||
// run antispoof, inherits face from blazeface
|
||||
parent.analyze('Start AntiSpoof:');
|
||||
if (parent.config.async) {
|
||||
antispoofRes = parent.config.face.antispoof?.enabled ? antispoof.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
|
||||
instance.analyze('Start AntiSpoof:');
|
||||
if (instance.config.async) {
|
||||
antispoofRes = instance.config.face.antispoof?.enabled ? antispoof.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : null;
|
||||
} else {
|
||||
parent.state = 'run:antispoof';
|
||||
instance.state = 'run:antispoof';
|
||||
timeStamp = now();
|
||||
antispoofRes = parent.config.face.antispoof?.enabled ? await antispoof.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
|
||||
parent.performance.antispoof = env.perfadd ? (parent.performance.antispoof || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
||||
antispoofRes = instance.config.face.antispoof?.enabled ? await antispoof.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : null;
|
||||
instance.performance.antispoof = env.perfadd ? (instance.performance.antispoof || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
||||
}
|
||||
parent.analyze('End AntiSpoof:');
|
||||
instance.analyze('End AntiSpoof:');
|
||||
|
||||
// run liveness, inherits face from blazeface
|
||||
parent.analyze('Start Liveness:');
|
||||
if (parent.config.async) {
|
||||
livenessRes = parent.config.face.liveness?.enabled ? liveness.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
|
||||
instance.analyze('Start Liveness:');
|
||||
if (instance.config.async) {
|
||||
livenessRes = instance.config.face.liveness?.enabled ? liveness.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : null;
|
||||
} else {
|
||||
parent.state = 'run:liveness';
|
||||
instance.state = 'run:liveness';
|
||||
timeStamp = now();
|
||||
livenessRes = parent.config.face.liveness?.enabled ? await liveness.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
|
||||
parent.performance.liveness = env.perfadd ? (parent.performance.antispoof || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
||||
livenessRes = instance.config.face.liveness?.enabled ? await liveness.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : null;
|
||||
instance.performance.liveness = env.perfadd ? (instance.performance.antispoof || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
||||
}
|
||||
parent.analyze('End Liveness:');
|
||||
instance.analyze('End Liveness:');
|
||||
|
||||
// run gear, inherits face from blazeface
|
||||
parent.analyze('Start GEAR:');
|
||||
if (parent.config.async) {
|
||||
gearRes = parent.config.face['gear']?.enabled ? gear.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : {};
|
||||
instance.analyze('Start GEAR:');
|
||||
if (instance.config.async) {
|
||||
gearRes = instance.config.face['gear']?.enabled ? gear.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : {};
|
||||
} else {
|
||||
parent.state = 'run:gear';
|
||||
instance.state = 'run:gear';
|
||||
timeStamp = now();
|
||||
gearRes = parent.config.face['gear']?.enabled ? await gear.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : {};
|
||||
parent.performance.gear = Math.trunc(now() - timeStamp);
|
||||
gearRes = instance.config.face['gear']?.enabled ? await gear.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : {};
|
||||
instance.performance.gear = Math.trunc(now() - timeStamp);
|
||||
}
|
||||
parent.analyze('End GEAR:');
|
||||
instance.analyze('End GEAR:');
|
||||
|
||||
// run gear, inherits face from blazeface
|
||||
parent.analyze('Start SSRNet:');
|
||||
if (parent.config.async) {
|
||||
ageRes = parent.config.face['ssrnet']?.enabled ? ssrnetAge.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : {};
|
||||
genderRes = parent.config.face['ssrnet']?.enabled ? ssrnetGender.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : {};
|
||||
instance.analyze('Start SSRNet:');
|
||||
if (instance.config.async) {
|
||||
ageRes = instance.config.face['ssrnet']?.enabled ? ssrnetAge.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : {};
|
||||
genderRes = instance.config.face['ssrnet']?.enabled ? ssrnetGender.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : {};
|
||||
} else {
|
||||
parent.state = 'run:ssrnet';
|
||||
instance.state = 'run:ssrnet';
|
||||
timeStamp = now();
|
||||
ageRes = parent.config.face['ssrnet']?.enabled ? await ssrnetAge.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : {};
|
||||
genderRes = parent.config.face['ssrnet']?.enabled ? await ssrnetGender.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : {};
|
||||
parent.performance.ssrnet = Math.trunc(now() - timeStamp);
|
||||
ageRes = instance.config.face['ssrnet']?.enabled ? await ssrnetAge.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : {};
|
||||
genderRes = instance.config.face['ssrnet']?.enabled ? await ssrnetGender.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : {};
|
||||
instance.performance.ssrnet = Math.trunc(now() - timeStamp);
|
||||
}
|
||||
parent.analyze('End SSRNet:');
|
||||
instance.analyze('End SSRNet:');
|
||||
|
||||
// run gear, inherits face from blazeface
|
||||
parent.analyze('Start MobileFaceNet:');
|
||||
if (parent.config.async) {
|
||||
mobilefacenetRes = parent.config.face['mobilefacenet']?.enabled ? mobilefacenet.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : {};
|
||||
instance.analyze('Start MobileFaceNet:');
|
||||
if (instance.config.async) {
|
||||
mobilefacenetRes = instance.config.face['mobilefacenet']?.enabled ? mobilefacenet.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : {};
|
||||
} else {
|
||||
parent.state = 'run:mobilefacenet';
|
||||
instance.state = 'run:mobilefacenet';
|
||||
timeStamp = now();
|
||||
mobilefacenetRes = parent.config.face['mobilefacenet']?.enabled ? await mobilefacenet.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : {};
|
||||
parent.performance.mobilefacenet = Math.trunc(now() - timeStamp);
|
||||
mobilefacenetRes = instance.config.face['mobilefacenet']?.enabled ? await mobilefacenet.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : {};
|
||||
instance.performance.mobilefacenet = Math.trunc(now() - timeStamp);
|
||||
}
|
||||
parent.analyze('End MobileFaceNet:');
|
||||
instance.analyze('End MobileFaceNet:');
|
||||
|
||||
// run emotion, inherits face from blazeface
|
||||
parent.analyze('Start Description:');
|
||||
if (parent.config.async) {
|
||||
descRes = parent.config.face.description?.enabled ? faceres.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
|
||||
instance.analyze('Start Description:');
|
||||
if (instance.config.async) {
|
||||
descRes = instance.config.face.description?.enabled ? faceres.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : null;
|
||||
} else {
|
||||
parent.state = 'run:description';
|
||||
instance.state = 'run:description';
|
||||
timeStamp = now();
|
||||
descRes = parent.config.face.description?.enabled ? await faceres.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
|
||||
parent.performance.description = env.perfadd ? (parent.performance.description || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
||||
descRes = instance.config.face.description?.enabled ? await faceres.predict(faces[i].tensor || tf.tensor([]), instance.config, i, faces.length) : null;
|
||||
instance.performance.description = env.perfadd ? (instance.performance.description || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
||||
}
|
||||
parent.analyze('End Description:');
|
||||
instance.analyze('End Description:');
|
||||
|
||||
// if async wait for results
|
||||
if (parent.config.async) {
|
||||
if (instance.config.async) {
|
||||
[ageRes, genderRes, emotionRes, mobilefacenetRes, descRes, gearRes, antispoofRes, livenessRes] = await Promise.all([ageRes, genderRes, emotionRes, mobilefacenetRes, descRes, gearRes, antispoofRes, livenessRes]);
|
||||
}
|
||||
parent.analyze('Finish Face:');
|
||||
instance.analyze('Finish Face:');
|
||||
|
||||
// override age/gender if alternative models are used
|
||||
if (parent.config.face['ssrnet']?.enabled && ageRes && genderRes) descRes = { age: ageRes.age, gender: genderRes.gender, genderScore: genderRes.genderScore };
|
||||
if (parent.config.face['gear']?.enabled && gearRes) descRes = { age: gearRes.age, gender: gearRes.gender, genderScore: gearRes.genderScore, race: gearRes.race };
|
||||
if (instance.config.face['ssrnet']?.enabled && ageRes && genderRes) descRes = { age: ageRes.age, gender: genderRes.gender, genderScore: genderRes.genderScore };
|
||||
if (instance.config.face['gear']?.enabled && gearRes) descRes = { age: gearRes.age, gender: gearRes.gender, genderScore: gearRes.genderScore, race: gearRes.race };
|
||||
// override descriptor if embedding model is used
|
||||
if (parent.config.face['mobilefacenet']?.enabled && mobilefacenetRes) descRes.descriptor = mobilefacenetRes;
|
||||
if (instance.config.face['mobilefacenet']?.enabled && mobilefacenetRes) descRes.descriptor = mobilefacenetRes;
|
||||
|
||||
// calculate iris distance
|
||||
// iris: array[ center, left, top, right, bottom]
|
||||
if (!parent.config.face.iris?.enabled && faces[i]?.annotations?.leftEyeIris && faces[i]?.annotations?.rightEyeIris) {
|
||||
if (!instance.config.face.iris?.enabled && faces[i]?.annotations?.leftEyeIris && faces[i]?.annotations?.rightEyeIris) {
|
||||
delete faces[i].annotations.leftEyeIris;
|
||||
delete faces[i].annotations.rightEyeIris;
|
||||
}
|
||||
|
@ -173,7 +173,7 @@ export const detectFace = async (parent: Human /* instance of human */, input: T
|
|||
: 0; // note: average human iris size is 11.7mm
|
||||
|
||||
// optionally return tensor
|
||||
const tensor = parent.config.face.detector?.return ? tf.squeeze(faces[i].tensor) : null;
|
||||
const tensor = instance.config.face.detector?.return ? tf.squeeze(faces[i].tensor) : null;
|
||||
// dispose original face tensor
|
||||
tf.dispose(faces[i].tensor);
|
||||
// delete temp face image
|
||||
|
@ -195,14 +195,14 @@ export const detectFace = async (parent: Human /* instance of human */, input: T
|
|||
if (rotation) res.rotation = rotation;
|
||||
if (tensor) res.tensor = tensor;
|
||||
faceRes.push(res);
|
||||
parent.analyze('End Face');
|
||||
instance.analyze('End Face');
|
||||
}
|
||||
parent.analyze('End FaceMesh:');
|
||||
if (parent.config.async) {
|
||||
if (parent.performance.face) delete parent.performance.face;
|
||||
if (parent.performance.age) delete parent.performance.age;
|
||||
if (parent.performance.gender) delete parent.performance.gender;
|
||||
if (parent.performance.emotion) delete parent.performance.emotion;
|
||||
instance.analyze('End FaceMesh:');
|
||||
if (instance.config.async) {
|
||||
if (instance.performance.face) delete instance.performance.face;
|
||||
if (instance.performance.age) delete instance.performance.age;
|
||||
if (instance.performance.gender) delete instance.performance.gender;
|
||||
if (instance.performance.emotion) delete instance.performance.emotion;
|
||||
}
|
||||
return faceRes;
|
||||
};
|
||||
|
|
|
@ -18,6 +18,7 @@ function insidePoly(x: number, y: number, polygon: Array<{ x: number, y: number
|
|||
|
||||
export async function mask(face: FaceResult): Promise<Tensor | undefined> {
|
||||
if (!face.tensor) return face.tensor;
|
||||
if (!face.mesh || face.mesh.length < 100) return face.tensor;
|
||||
const width = face.tensor.shape[2] || 0;
|
||||
const height = face.tensor.shape[1] || 0;
|
||||
const buffer = await face.tensor.buffer();
|
||||
|
|
41
src/human.ts
41
src/human.ts
|
@ -36,7 +36,7 @@ import * as posenet from './body/posenet';
|
|||
import * as segmentation from './segmentation/segmentation';
|
||||
import * as warmups from './warmup';
|
||||
// type definitions
|
||||
import type { Input, Tensor, DrawOptions, Config, Result, FaceResult, HandResult, BodyResult, ObjectResult, GestureResult, PersonResult } from './exports';
|
||||
import type { Input, Tensor, DrawOptions, Config, Result, FaceResult, HandResult, BodyResult, ObjectResult, GestureResult, PersonResult, AnyCanvas } from './exports';
|
||||
// type exports
|
||||
export * from './exports';
|
||||
|
||||
|
@ -46,12 +46,6 @@ export * from './exports';
|
|||
*/
|
||||
export type TensorFlow = typeof tf;
|
||||
|
||||
/** Error message */
|
||||
export type Error = {
|
||||
/** @property error message */
|
||||
error: string,
|
||||
};
|
||||
|
||||
/** **Human** library main class
|
||||
*
|
||||
* All methods and properties are available only as members of Human class
|
||||
|
@ -84,7 +78,7 @@ export class Human {
|
|||
state: string;
|
||||
|
||||
/** currenty processed image tensor and canvas */
|
||||
process: { tensor: Tensor | null, canvas: OffscreenCanvas | HTMLCanvasElement | null };
|
||||
process: { tensor: Tensor | null, canvas: AnyCanvas | null };
|
||||
|
||||
/** Instance of TensorFlow/JS used by Human
|
||||
* - Can be embedded or externally provided
|
||||
|
@ -161,16 +155,16 @@ export class Human {
|
|||
// reexport draw methods
|
||||
this.draw = {
|
||||
options: draw.options as DrawOptions,
|
||||
canvas: (input: HTMLCanvasElement | OffscreenCanvas | HTMLImageElement | HTMLMediaElement | HTMLVideoElement, output: HTMLCanvasElement) => draw.canvas(input, output),
|
||||
face: (output: HTMLCanvasElement | OffscreenCanvas, result: FaceResult[], options?: Partial<DrawOptions>) => draw.face(output, result, options),
|
||||
body: (output: HTMLCanvasElement | OffscreenCanvas, result: BodyResult[], options?: Partial<DrawOptions>) => draw.body(output, result, options),
|
||||
hand: (output: HTMLCanvasElement | OffscreenCanvas, result: HandResult[], options?: Partial<DrawOptions>) => draw.hand(output, result, options),
|
||||
gesture: (output: HTMLCanvasElement | OffscreenCanvas, result: GestureResult[], options?: Partial<DrawOptions>) => draw.gesture(output, result, options),
|
||||
object: (output: HTMLCanvasElement | OffscreenCanvas, result: ObjectResult[], options?: Partial<DrawOptions>) => draw.object(output, result, options),
|
||||
person: (output: HTMLCanvasElement | OffscreenCanvas, result: PersonResult[], options?: Partial<DrawOptions>) => draw.person(output, result, options),
|
||||
all: (output: HTMLCanvasElement | OffscreenCanvas, result: Result, options?: Partial<DrawOptions>) => draw.all(output, result, options),
|
||||
canvas: (input: AnyCanvas | HTMLImageElement | HTMLMediaElement | HTMLVideoElement, output: AnyCanvas) => draw.canvas(input, output),
|
||||
face: (output: AnyCanvas, result: FaceResult[], options?: Partial<DrawOptions>) => draw.face(output, result, options),
|
||||
body: (output: AnyCanvas, result: BodyResult[], options?: Partial<DrawOptions>) => draw.body(output, result, options),
|
||||
hand: (output: AnyCanvas, result: HandResult[], options?: Partial<DrawOptions>) => draw.hand(output, result, options),
|
||||
gesture: (output: AnyCanvas, result: GestureResult[], options?: Partial<DrawOptions>) => draw.gesture(output, result, options),
|
||||
object: (output: AnyCanvas, result: ObjectResult[], options?: Partial<DrawOptions>) => draw.object(output, result, options),
|
||||
person: (output: AnyCanvas, result: PersonResult[], options?: Partial<DrawOptions>) => draw.person(output, result, options),
|
||||
all: (output: AnyCanvas, result: Result, options?: Partial<DrawOptions>) => draw.all(output, result, options),
|
||||
};
|
||||
this.result = { face: [], body: [], hand: [], gesture: [], object: [], performance: {}, timestamp: 0, persons: [] };
|
||||
this.result = { face: [], body: [], hand: [], gesture: [], object: [], performance: {}, timestamp: 0, persons: [], error: null };
|
||||
// export access to image processing
|
||||
// @ts-ignore eslint-typescript cannot correctly infer type in anonymous function
|
||||
this.process = { tensor: null, canvas: null };
|
||||
|
@ -253,7 +247,7 @@ export class Human {
|
|||
* - `canvas` as canvas which is input image filtered with segementation data and optionally merged with background image. canvas alpha values are set to segmentation values for easy merging
|
||||
* - `alpha` as grayscale canvas that represents segmentation alpha values
|
||||
*/
|
||||
async segmentation(input: Input, background?: Input): Promise<{ data: number[] | Tensor, canvas: HTMLCanvasElement | OffscreenCanvas | null, alpha: HTMLCanvasElement | OffscreenCanvas | null }> {
|
||||
async segmentation(input: Input, background?: Input): Promise<{ data: number[] | Tensor, canvas: AnyCanvas | null, alpha: AnyCanvas | null }> {
|
||||
return segmentation.process(input, background, this.config);
|
||||
}
|
||||
|
||||
|
@ -389,7 +383,7 @@ export class Human {
|
|||
* @param userConfig?: {@link Config}
|
||||
* @returns result: {@link Result}
|
||||
*/
|
||||
async detect(input: Input, userConfig?: Partial<Config>): Promise<Result | Error> {
|
||||
async detect(input: Input, userConfig?: Partial<Config>): Promise<Result> {
|
||||
// detection happens inside a promise
|
||||
this.state = 'detect';
|
||||
return new Promise(async (resolve) => {
|
||||
|
@ -404,7 +398,8 @@ export class Human {
|
|||
const error = this.#sanity(input);
|
||||
if (error) {
|
||||
log(error, input);
|
||||
resolve({ error });
|
||||
this.emit('error');
|
||||
resolve({ face: [], body: [], hand: [], gesture: [], object: [], performance: this.performance, timestamp: now(), persons: [], error });
|
||||
}
|
||||
|
||||
const timeStart = now();
|
||||
|
@ -417,14 +412,15 @@ export class Human {
|
|||
|
||||
timeStamp = now();
|
||||
this.state = 'image';
|
||||
const img = await image.process(input, this.config) as { canvas: HTMLCanvasElement | OffscreenCanvas, tensor: Tensor };
|
||||
const img = await image.process(input, this.config) as { canvas: AnyCanvas, tensor: Tensor };
|
||||
this.process = img;
|
||||
this.performance.inputProcess = this.env.perfadd ? (this.performance.inputProcess || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
||||
this.analyze('Get Image:');
|
||||
|
||||
if (!img.tensor) {
|
||||
if (this.config.debug) log('could not convert input to tensor');
|
||||
resolve({ error: 'could not convert input to tensor' });
|
||||
this.emit('error');
|
||||
resolve({ face: [], body: [], hand: [], gesture: [], object: [], performance: this.performance, timestamp: now(), persons: [], error: 'could not convert input to tensor' });
|
||||
return;
|
||||
}
|
||||
this.emit('image');
|
||||
|
@ -534,6 +530,7 @@ export class Human {
|
|||
performance: this.performance,
|
||||
canvas: this.process.canvas,
|
||||
timestamp: Date.now(),
|
||||
error: null,
|
||||
get persons() { return persons.join(faceRes as FaceResult[], bodyRes as BodyResult[], handRes as HandResult[], gestureRes, shape); },
|
||||
};
|
||||
|
||||
|
|
|
@ -28,9 +28,10 @@ export function canvas(width, height): AnyCanvas {
|
|||
let c;
|
||||
if (env.browser) { // browser defines canvas object
|
||||
if (env.worker) { // if runing in web worker use OffscreenCanvas
|
||||
if (typeof OffscreenCanvas === 'undefined') throw new Error('canvas error: attempted to run in web worker but OffscreenCanvas is not supported');
|
||||
c = new OffscreenCanvas(width, height);
|
||||
} else { // otherwise use DOM canvas
|
||||
if (typeof document === 'undefined') throw new Error('attempted to run in web worker but offscreenCanvas is not supported');
|
||||
if (typeof document === 'undefined') throw new Error('canvas error: attempted to run in browser but DOM is not defined');
|
||||
c = document.createElement('canvas');
|
||||
c.width = width;
|
||||
c.height = height;
|
||||
|
@ -39,8 +40,8 @@ export function canvas(width, height): AnyCanvas {
|
|||
// @ts-ignore // env.canvas is an external monkey-patch
|
||||
if (typeof env.Canvas !== 'undefined') c = new env.Canvas(width, height);
|
||||
else if (typeof globalThis.Canvas !== 'undefined') c = new globalThis.Canvas(width, height);
|
||||
// else throw new Error('canvas error: attempted to use canvas in nodejs without canvas support installed');
|
||||
}
|
||||
// if (!c) throw new Error('cannot create canvas');
|
||||
return c;
|
||||
}
|
||||
|
||||
|
@ -58,7 +59,7 @@ export function copy(input: AnyCanvas, output?: AnyCanvas) {
|
|||
export async function process(input: Input, config: Config, getTensor: boolean = true): Promise<{ tensor: Tensor | null, canvas: AnyCanvas | null }> {
|
||||
if (!input) {
|
||||
// throw new Error('input is missing');
|
||||
if (config.debug) log('input is missing');
|
||||
if (config.debug) log('input error: input is missing');
|
||||
return { tensor: null, canvas: null }; // video may become temporarily unavailable due to onresize
|
||||
}
|
||||
// sanity checks since different browsers do not implement all dom elements
|
||||
|
@ -75,12 +76,12 @@ export async function process(input: Input, config: Config, getTensor: boolean =
|
|||
&& !(typeof HTMLCanvasElement !== 'undefined' && input instanceof HTMLCanvasElement)
|
||||
&& !(typeof OffscreenCanvas !== 'undefined' && input instanceof OffscreenCanvas)
|
||||
) {
|
||||
throw new Error('input type is not recognized');
|
||||
throw new Error('input error: type is not recognized');
|
||||
}
|
||||
if (input instanceof tf.Tensor) { // if input is tensor use as-is without filters but correct shape as needed
|
||||
let tensor: Tensor | null = null;
|
||||
if ((input as Tensor)['isDisposedInternal']) throw new Error('input tensor is disposed');
|
||||
if (!(input as Tensor)['shape']) throw new Error('input tensor has no shape');
|
||||
if ((input as Tensor)['isDisposedInternal']) throw new Error('input error: attempted to use tensor but it is disposed');
|
||||
if (!(input as Tensor)['shape']) throw new Error('input error: attempted to use tensor without a shape');
|
||||
if ((input as Tensor).shape.length === 3) { // [height, width, 3 || 4]
|
||||
if ((input as Tensor).shape[2] === 3) { // [height, width, 3] so add batch
|
||||
tensor = tf.expandDims(input, 0);
|
||||
|
@ -97,7 +98,7 @@ export async function process(input: Input, config: Config, getTensor: boolean =
|
|||
}
|
||||
}
|
||||
// at the end shape must be [1, height, width, 3]
|
||||
if (tensor == null || tensor.shape.length !== 4 || tensor.shape[0] !== 1 || tensor.shape[3] !== 3) throw new Error(`could not process input tensor with shape: ${input['shape']}`);
|
||||
if (tensor == null || tensor.shape.length !== 4 || tensor.shape[0] !== 1 || tensor.shape[3] !== 3) throw new Error(`input error: attempted to use tensor with unrecognized shape: ${input['shape']}`);
|
||||
if ((tensor as Tensor).dtype === 'int32') {
|
||||
const cast = tf.cast(tensor, 'float32');
|
||||
tf.dispose(tensor);
|
||||
|
@ -132,7 +133,7 @@ export async function process(input: Input, config: Config, getTensor: boolean =
|
|||
else if ((config.filter.height || 0) > 0) targetWidth = originalWidth * ((config.filter.height || 0) / originalHeight);
|
||||
if ((config.filter.height || 0) > 0) targetHeight = config.filter.height;
|
||||
else if ((config.filter.width || 0) > 0) targetHeight = originalHeight * ((config.filter.width || 0) / originalWidth);
|
||||
if (!targetWidth || !targetHeight) throw new Error('input cannot determine dimension');
|
||||
if (!targetWidth || !targetHeight) throw new Error('input error: cannot determine dimension');
|
||||
if (!inCanvas || (inCanvas?.width !== targetWidth) || (inCanvas?.height !== targetHeight)) inCanvas = canvas(targetWidth, targetHeight);
|
||||
|
||||
// draw input to our canvas
|
||||
|
@ -156,7 +157,10 @@ export async function process(input: Input, config: Config, getTensor: boolean =
|
|||
if (config.filter.enabled && env.webgl.supported) {
|
||||
if (!fx) fx = env.browser ? new fxImage.GLImageFilter() : null; // && (typeof document !== 'undefined')
|
||||
env.filter = !!fx;
|
||||
if (!fx) return { tensor: null, canvas: inCanvas };
|
||||
if (!fx || !fx.add) {
|
||||
if (config.debug) log('input process error: cannot initialize filters');
|
||||
return { tensor: null, canvas: inCanvas };
|
||||
}
|
||||
fx.reset();
|
||||
if (config.filter.brightness !== 0) fx.add('brightness', config.filter.brightness);
|
||||
if (config.filter.contrast !== 0) fx.add('contrast', config.filter.contrast);
|
||||
|
@ -181,7 +185,7 @@ export async function process(input: Input, config: Config, getTensor: boolean =
|
|||
}
|
||||
|
||||
if (!getTensor) return { tensor: null, canvas: outCanvas }; // just canvas was requested
|
||||
if (!outCanvas) throw new Error('cannot create output canvas');
|
||||
if (!outCanvas) throw new Error('canvas error: cannot create output');
|
||||
|
||||
// create tensor from image unless input was a tensor already
|
||||
let pixels;
|
||||
|
@ -218,7 +222,7 @@ export async function process(input: Input, config: Config, getTensor: boolean =
|
|||
tf.dispose(pixels);
|
||||
pixels = rgb;
|
||||
}
|
||||
if (!pixels) throw new Error('cannot create tensor from input');
|
||||
if (!pixels) throw new Error('input error: cannot create tensor');
|
||||
const casted = tf.cast(pixels, 'float32');
|
||||
const tensor = config.filter.equalization ? await enhance.histogramEqualization(casted) : tf.expandDims(casted, 0);
|
||||
tf.dispose([pixels, casted]);
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
|
||||
import * as shaders from './imagefxshaders';
|
||||
import { canvas } from './image';
|
||||
import { log } from '../util/util';
|
||||
|
||||
const collect = (source, prefix, collection) => {
|
||||
const r = new RegExp('\\b' + prefix + ' \\w+ (\\w+)', 'ig');
|
||||
|
@ -19,15 +20,24 @@ class GLProgram {
|
|||
attribute = {};
|
||||
gl: WebGLRenderingContext;
|
||||
id: WebGLProgram;
|
||||
|
||||
constructor(gl, vertexSource, fragmentSource) {
|
||||
this.gl = gl;
|
||||
const vertexShader = this.compile(vertexSource, this.gl.VERTEX_SHADER);
|
||||
const fragmentShader = this.compile(fragmentSource, this.gl.FRAGMENT_SHADER);
|
||||
this.id = this.gl.createProgram() as WebGLProgram;
|
||||
if (!vertexShader || !fragmentShader) return;
|
||||
if (!this.id) {
|
||||
log('filter: could not create webgl program');
|
||||
return;
|
||||
}
|
||||
this.gl.attachShader(this.id, vertexShader);
|
||||
this.gl.attachShader(this.id, fragmentShader);
|
||||
this.gl.linkProgram(this.id);
|
||||
if (!this.gl.getProgramParameter(this.id, this.gl.LINK_STATUS)) throw new Error(`filter: gl link failed: ${this.gl.getProgramInfoLog(this.id)}`);
|
||||
if (!this.gl.getProgramParameter(this.id, this.gl.LINK_STATUS)) {
|
||||
log(`filter: gl link failed: ${this.gl.getProgramInfoLog(this.id)}`);
|
||||
return;
|
||||
}
|
||||
this.gl.useProgram(this.id);
|
||||
collect(vertexSource, 'attribute', this.attribute); // Collect attributes
|
||||
for (const a in this.attribute) this.attribute[a] = this.gl.getAttribLocation(this.id, a);
|
||||
|
@ -36,11 +46,18 @@ class GLProgram {
|
|||
for (const u in this.uniform) this.uniform[u] = this.gl.getUniformLocation(this.id, u);
|
||||
}
|
||||
|
||||
compile = (source, type): WebGLShader => {
|
||||
compile = (source, type): WebGLShader | null => {
|
||||
const shader = this.gl.createShader(type) as WebGLShader;
|
||||
if (!shader) {
|
||||
log('filter: could not create shader');
|
||||
return null;
|
||||
}
|
||||
this.gl.shaderSource(shader, source);
|
||||
this.gl.compileShader(shader);
|
||||
if (!this.gl.getShaderParameter(shader, this.gl.COMPILE_STATUS)) throw new Error(`filter: gl compile failed: ${this.gl.getShaderInfoLog(shader)}`);
|
||||
if (!this.gl.getShaderParameter(shader, this.gl.COMPILE_STATUS)) {
|
||||
log(`filter: gl compile failed: ${this.gl.getShaderInfoLog(shader)}`);
|
||||
return null;
|
||||
}
|
||||
return shader;
|
||||
};
|
||||
}
|
||||
|
@ -67,7 +84,12 @@ export function GLImageFilter() {
|
|||
const shaderProgramCache = { }; // key is the shader program source, value is the compiled program
|
||||
const DRAW = { INTERMEDIATE: 1 };
|
||||
const gl = fxcanvas.getContext('webgl') as WebGLRenderingContext;
|
||||
if (!gl) throw new Error('filter: cannot get webgl context');
|
||||
// @ts-ignore used for sanity checks outside of imagefx
|
||||
this.gl = gl;
|
||||
if (!gl) {
|
||||
log('filter: cannot get webgl context');
|
||||
return;
|
||||
}
|
||||
|
||||
function resize(width, height) {
|
||||
if (width === fxcanvas.width && height === fxcanvas.height) return; // Same width/height? Nothing to do here
|
||||
|
@ -102,7 +124,7 @@ export function GLImageFilter() {
|
|||
return { fbo, texture };
|
||||
}
|
||||
|
||||
function getTempFramebuffer(index) {
|
||||
function getTempFramebuffer(index): { fbo: WebGLFramebuffer | null, texture: WebGLTexture | null } {
|
||||
tempFramebuffers[index] = tempFramebuffers[index] || createFramebufferTexture(fxcanvas.width, fxcanvas.height);
|
||||
return tempFramebuffers[index] as { fbo: WebGLFramebuffer, texture: WebGLTexture };
|
||||
}
|
||||
|
@ -128,13 +150,17 @@ export function GLImageFilter() {
|
|||
gl.drawArrays(gl.TRIANGLES, 0, 6);
|
||||
}
|
||||
|
||||
function compileShader(fragmentSource) {
|
||||
function compileShader(fragmentSource): GLProgram | null {
|
||||
if (shaderProgramCache[fragmentSource]) {
|
||||
currentProgram = shaderProgramCache[fragmentSource];
|
||||
gl.useProgram((currentProgram ? currentProgram.id : null) || null);
|
||||
return currentProgram as GLProgram;
|
||||
}
|
||||
currentProgram = new GLProgram(gl, shaders.vertexIdentity, fragmentSource);
|
||||
if (!currentProgram) {
|
||||
log('filter: could not get webgl program');
|
||||
return null;
|
||||
}
|
||||
const floatSize = Float32Array.BYTES_PER_ELEMENT;
|
||||
const vertSize = 4 * floatSize;
|
||||
gl.enableVertexAttribArray(currentProgram.attribute['pos']);
|
||||
|
@ -156,6 +182,7 @@ export function GLImageFilter() {
|
|||
? shaders.colorMatrixWithoutAlpha
|
||||
: shaders.colorMatrixWithAlpha;
|
||||
const program = compileShader(shader);
|
||||
if (!program) return;
|
||||
gl.uniform1fv(program.uniform['m'], m);
|
||||
draw();
|
||||
},
|
||||
|
@ -292,6 +319,7 @@ export function GLImageFilter() {
|
|||
const pixelSizeX = 1 / fxcanvas.width;
|
||||
const pixelSizeY = 1 / fxcanvas.height;
|
||||
const program = compileShader(shaders.convolution);
|
||||
if (!program) return;
|
||||
gl.uniform1fv(program.uniform['m'], m);
|
||||
gl.uniform2f(program.uniform['px'], pixelSizeX, pixelSizeY);
|
||||
draw();
|
||||
|
@ -348,6 +376,7 @@ export function GLImageFilter() {
|
|||
const blurSizeX = (size / 7) / fxcanvas.width;
|
||||
const blurSizeY = (size / 7) / fxcanvas.height;
|
||||
const program = compileShader(shaders.blur);
|
||||
if (!program) return;
|
||||
// Vertical
|
||||
gl.uniform2f(program.uniform['px'], 0, blurSizeY);
|
||||
draw(DRAW.INTERMEDIATE);
|
||||
|
@ -360,6 +389,7 @@ export function GLImageFilter() {
|
|||
const blurSizeX = (size) / fxcanvas.width;
|
||||
const blurSizeY = (size) / fxcanvas.height;
|
||||
const program = compileShader(shaders.pixelate);
|
||||
if (!program) return;
|
||||
gl.uniform2f(program.uniform['size'], blurSizeX, blurSizeY);
|
||||
draw();
|
||||
},
|
||||
|
|
|
@ -24,7 +24,6 @@ export async function load(config: Config): Promise<GraphModel> {
|
|||
model = await tf.loadGraphModel(join(config.modelBasePath, config.object.modelPath || ''));
|
||||
const inputs = Object.values(model.modelSignature['inputs']);
|
||||
model.inputSize = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : null;
|
||||
if (!model.inputSize) throw new Error(`cannot determine model inputSize: ${config.object.modelPath}`);
|
||||
if (!model || !model.modelUrl) log('load model failed:', config.object.modelPath);
|
||||
else if (config.debug) log('load model:', model.modelUrl);
|
||||
} else if (config.debug) log('cached model:', model.modelUrl);
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
|
||||
import type { Tensor } from './tfjs/types';
|
||||
import type { FaceGesture, BodyGesture, HandGesture, IrisGesture } from './gesture/gesture';
|
||||
import type { AnyCanvas } from './exports';
|
||||
|
||||
/** generic box as [x, y, width, height] */
|
||||
export type Box = [number, number, number, number];
|
||||
|
@ -185,9 +186,11 @@ export interface Result {
|
|||
/** global performance object with timing values for each operation */
|
||||
performance: Record<string, number>,
|
||||
/** optional processed canvas that can be used to draw input on screen */
|
||||
canvas?: OffscreenCanvas | HTMLCanvasElement | null | undefined,
|
||||
canvas?: AnyCanvas | null,
|
||||
/** timestamp of detection representing the milliseconds elapsed since the UNIX epoch */
|
||||
readonly timestamp: number,
|
||||
/** getter property that returns unified persons object */
|
||||
persons: Array<PersonResult>,
|
||||
/** @property Last known error message */
|
||||
error: string | null;
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@ export async function check(instance: Human, force = false) {
|
|||
if (instance.config.backend === 'wasm') {
|
||||
if (instance.config.debug) log('wasm path:', instance.config.wasmPath);
|
||||
if (typeof tf?.setWasmPaths !== 'undefined') await tf.setWasmPaths(instance.config.wasmPath);
|
||||
else throw new Error('wasm backend is not loaded');
|
||||
else throw new Error('backend error: attempting to use wasm backend but wasm path is not set');
|
||||
const simd = await tf.env().getAsync('WASM_HAS_SIMD_SUPPORT');
|
||||
const mt = await tf.env().getAsync('WASM_HAS_MULTITHREAD_SUPPORT');
|
||||
if (instance.config.debug) log(`wasm execution: ${simd ? 'SIMD' : 'no SIMD'} ${mt ? 'multithreaded' : 'singlethreaded'}`);
|
||||
|
|
|
@ -5,12 +5,13 @@ import { log } from '../util/util';
|
|||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import * as image from '../image/image';
|
||||
import * as models from '../models';
|
||||
import type { AnyCanvas } from '../exports';
|
||||
// import { env } from '../env';
|
||||
|
||||
export const config = {
|
||||
name: 'humangl',
|
||||
priority: 999,
|
||||
canvas: <null | OffscreenCanvas | HTMLCanvasElement>null,
|
||||
canvas: <null | AnyCanvas>null,
|
||||
gl: <null | WebGL2RenderingContext>null,
|
||||
extensions: <string[]> [],
|
||||
webGLattr: { // https://www.khronos.org/registry/webgl/specs/latest/1.0/#5.2
|
||||
|
@ -71,10 +72,9 @@ export async function register(instance: Human): Promise<void> {
|
|||
if (config.canvas) {
|
||||
config.canvas.addEventListener('webglcontextlost', async (e) => {
|
||||
log('error: humangl:', e.type);
|
||||
// log('gpu memory usage:', instance.tf.engine().backendInstance.numBytesInGPU);
|
||||
log('possible browser memory leak using webgl or conflict with multiple backend registrations');
|
||||
instance.emit('error');
|
||||
throw new Error('browser webgl error');
|
||||
throw new Error('backend error: webgl context lost');
|
||||
// log('resetting humangl backend');
|
||||
// env.initial = true;
|
||||
// models.reset(instance);
|
||||
|
|
|
@ -134,9 +134,13 @@ export class Env {
|
|||
}
|
||||
this.webgpu.supported = this.browser && typeof navigator['gpu'] !== 'undefined';
|
||||
this.webgpu.backend = this.backends.includes('webgpu');
|
||||
if (this.webgpu.supported) this.webgpu.adapter = (await navigator['gpu'].requestAdapter()).name;
|
||||
// enumerate kernels
|
||||
this.kernels = tf.getKernelsForBackend(tf.getBackend()).map((kernel) => kernel.kernelName.toLowerCase());
|
||||
try {
|
||||
if (this.webgpu.supported) this.webgpu.adapter = (await navigator['gpu'].requestAdapter()).name;
|
||||
// enumerate kernels
|
||||
this.kernels = tf.getKernelsForBackend(tf.getBackend()).map((kernel) => kernel.kernelName.toLowerCase());
|
||||
} catch {
|
||||
this.webgpu.supported = false;
|
||||
}
|
||||
}
|
||||
|
||||
async updateCPU() {
|
||||
|
|
|
@ -11,12 +11,12 @@ import * as efficientPoseCoords from '../body/efficientposecoords';
|
|||
import { now } from './util';
|
||||
import { env } from './env';
|
||||
|
||||
const bufferedResult: Result = { face: [], body: [], hand: [], gesture: [], object: [], persons: [], performance: {}, timestamp: 0 };
|
||||
const bufferedResult: Result = { face: [], body: [], hand: [], gesture: [], object: [], persons: [], performance: {}, timestamp: 0, error: null };
|
||||
let interpolateTime = 0;
|
||||
|
||||
export function calc(newResult: Result, config: Config): Result {
|
||||
const t0 = now();
|
||||
if (!newResult) return { face: [], body: [], hand: [], gesture: [], object: [], persons: [], performance: {}, timestamp: 0 };
|
||||
if (!newResult) return { face: [], body: [], hand: [], gesture: [], object: [], persons: [], performance: {}, timestamp: 0, error: null };
|
||||
// each record is only updated using deep clone when number of detected record changes, otherwise it will converge by itself
|
||||
// otherwise bufferedResult is a shallow clone of result plus updated local calculated values
|
||||
// thus mixing by-reference and by-value assignments to minimize memory operations
|
||||
|
@ -31,7 +31,8 @@ export function calc(newResult: Result, config: Config): Result {
|
|||
// - at 1sec delay buffer = 1 which means live data is used
|
||||
const bufferedFactor = elapsed < 1000 ? 8 - Math.log(elapsed + 1) : 1;
|
||||
|
||||
bufferedResult.canvas = newResult.canvas;
|
||||
if (newResult.canvas) bufferedResult.canvas = newResult.canvas;
|
||||
if (newResult.error) bufferedResult.error = newResult.error;
|
||||
|
||||
// interpolate body results
|
||||
if (!bufferedResult.body || (newResult.body.length !== bufferedResult.body.length)) {
|
||||
|
|
|
@ -2,15 +2,6 @@
|
|||
* Simple helper functions used accross codebase
|
||||
*/
|
||||
|
||||
// helper function: join two paths
|
||||
export function join(folder: string, file: string): string {
|
||||
const separator = folder.endsWith('/') ? '' : '/';
|
||||
const skipJoin = file.startsWith('.') || file.startsWith('/') || file.startsWith('http:') || file.startsWith('https:') || file.startsWith('file:');
|
||||
const path = skipJoin ? `${file}` : `${folder}${separator}${file}`;
|
||||
if (!path.toLocaleLowerCase().includes('.json')) throw new Error(`modelpath error: ${path} expecting json file`);
|
||||
return path;
|
||||
}
|
||||
|
||||
// helper function: wrapper around console output
|
||||
export function log(...msg): void {
|
||||
const dt = new Date();
|
||||
|
@ -19,6 +10,15 @@ export function log(...msg): void {
|
|||
if (msg) console.log(ts, 'Human:', ...msg);
|
||||
}
|
||||
|
||||
// helper function: join two paths
|
||||
export function join(folder: string, file: string): string {
|
||||
const separator = folder.endsWith('/') ? '' : '/';
|
||||
const skipJoin = file.startsWith('.') || file.startsWith('/') || file.startsWith('http:') || file.startsWith('https:') || file.startsWith('file:');
|
||||
const path = skipJoin ? `${file}` : `${folder}${separator}${file}`;
|
||||
if (!path.toLocaleLowerCase().includes('.json')) throw new Error(`modelpath error: expecting json file: ${path}`);
|
||||
return path;
|
||||
}
|
||||
|
||||
// helper function: gets elapsed time on both browser and nodejs
|
||||
export const now = () => {
|
||||
if (typeof performance !== 'undefined') return performance.now();
|
||||
|
|
|
@ -107,7 +107,7 @@ export async function warmup(instance: Human, userConfig?: Partial<Config>): Pro
|
|||
const t0 = now();
|
||||
instance.state = 'warmup';
|
||||
if (userConfig) instance.config = mergeDeep(instance.config, userConfig) as Config;
|
||||
if (!instance.config.warmup || instance.config.warmup === 'none') return { error: 'null' };
|
||||
if (!instance.config.warmup || instance.config.warmup.length === 0 || instance.config.warmup === 'none') return { error: 'null' };
|
||||
let res;
|
||||
return new Promise(async (resolve) => {
|
||||
if (typeof createImageBitmap === 'function') res = await warmupBitmap(instance);
|
||||
|
|
|
@ -104,8 +104,8 @@ async function testAll() {
|
|||
log.info('demos:', demos);
|
||||
// for (const demo of demos) await runDemo(demo);
|
||||
for (const test of tests) await runTest(test);
|
||||
log.info();
|
||||
log.info('failed', failedMessages);
|
||||
log.info('all tests complete');
|
||||
log.info('failed:', { count: failedMessages.length, messages: failedMessages });
|
||||
log.info('status:', status);
|
||||
}
|
||||
|
||||
|
|
|
@ -314,7 +314,7 @@ async function test(Human, inputConfig) {
|
|||
log('info', 'test: image null');
|
||||
res = await human.detect(null);
|
||||
if (!res || !res.error) log('error', 'failed: invalid input', res);
|
||||
else log('state', 'passed: invalid input', res);
|
||||
else log('state', 'passed: invalid input', res.error || res);
|
||||
|
||||
// test face similarity
|
||||
log('info', 'test face similarity');
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
"noUncheckedIndexedAccess": false,
|
||||
"noUnusedLocals": false,
|
||||
"noUnusedParameters": true,
|
||||
"preserveConstEnums": false,
|
||||
"preserveConstEnums": true,
|
||||
"pretty": true,
|
||||
"removeComments": false,
|
||||
"resolveJsonModule": true,
|
||||
|
|
2
wiki
2
wiki
|
@ -1 +1 @@
|
|||
Subproject commit e26b155506e7981fa8187be228b5651de77ee8c6
|
||||
Subproject commit e0e2b9a2ac15a4569abc1e8281e7636de2c45aef
|
Loading…
Reference in New Issue