mirror of https://github.com/vladmandic/human
new frame change detection algorithm
parent
51f4b1fa20
commit
cb461be486
|
@ -126,7 +126,7 @@
|
||||||
"format": "iife",
|
"format": "iife",
|
||||||
"input": "src/human.ts",
|
"input": "src/human.ts",
|
||||||
"output": "dist/human.js",
|
"output": "dist/human.js",
|
||||||
"minify": true,
|
"minify": false,
|
||||||
"globalName": "Human",
|
"globalName": "Human",
|
||||||
"external": ["fs", "os", "buffer", "util"]
|
"external": ["fs", "os", "buffer", "util"]
|
||||||
},
|
},
|
||||||
|
|
|
@ -11,11 +11,10 @@
|
||||||
|
|
||||||
### **HEAD -> main** 2021/11/05 mandic00@live.com
|
### **HEAD -> main** 2021/11/05 mandic00@live.com
|
||||||
|
|
||||||
|
- add histogram equalization
|
||||||
- implement wasm missing ops
|
- implement wasm missing ops
|
||||||
- performance and memory optimizations
|
- performance and memory optimizations
|
||||||
|
- fix react compatibility issues
|
||||||
### **origin/main** 2021/11/04 mandic00@live.com
|
|
||||||
|
|
||||||
- improve box rescaling for all modules
|
- improve box rescaling for all modules
|
||||||
- improve precision using wasm backend
|
- improve precision using wasm backend
|
||||||
- refactor predict with execute
|
- refactor predict with execute
|
||||||
|
|
31
TODO.md
31
TODO.md
|
@ -41,17 +41,20 @@ MoveNet MultiPose model does not work with WASM backend due to missing F32 broad
|
||||||
|
|
||||||
### Pending release
|
### Pending release
|
||||||
|
|
||||||
- Supports all modules on all backends
|
- Supports all modules on all backends
|
||||||
via custom implementation of missing kernel ops
|
via custom implementation of missing kernel ops
|
||||||
- Performance and precision improvements
|
- New frame change detection algorithm used for cache determination
|
||||||
**face** and **hand** modules
|
based on temporal input difference
|
||||||
- Use custom built TFJS for bundled version
|
- New optional input histogram equalization
|
||||||
reduced bundle size and built-in support for all backends
|
auto-level input for optimal brightness/contrast via `config.filter.equalization`
|
||||||
`nobundle` and `node` versions link to standard `@tensorflow` packages
|
- Performance and precision improvements
|
||||||
- Add optional input histogram equalization
|
**face**, **hand** and **gestures** modules
|
||||||
auto-level input for optimal brightness/contrast via `config.filter.equalization`
|
- Use custom built TFJS for bundled version
|
||||||
- Fix **ReactJS** compatibility
|
reduced bundle size and built-in support for all backends
|
||||||
- Better precision using **WASM**
|
`nobundle` and `node` versions link to standard `@tensorflow` packages
|
||||||
Previous issues due to math low-precision in WASM implementation
|
- Fix **ReactJS** compatibility
|
||||||
- Full **TS** type definitions for all modules and imports
|
- Better precision using **WASM**
|
||||||
- Focus on simplified demo
|
Previous issues due to math low-precision in WASM implementation
|
||||||
|
- Full **TS** type definitions for all modules and imports
|
||||||
|
- Focus on simplified demo
|
||||||
|
<https://vladmandic.github.io/human/demo/typescript/>
|
||||||
|
|
|
@ -9,25 +9,28 @@
|
||||||
import Human from '../../dist/human.esm.js';
|
import Human from '../../dist/human.esm.js';
|
||||||
|
|
||||||
const userConfig = {
|
const userConfig = {
|
||||||
backend: 'wasm',
|
backend: 'humangl',
|
||||||
async: false,
|
async: true,
|
||||||
warmup: 'none',
|
warmup: 'none',
|
||||||
cacheSensitivity: 0,
|
cacheSensitivity: 0,
|
||||||
debug: true,
|
debug: true,
|
||||||
modelBasePath: '../../models/',
|
modelBasePath: '../../models/',
|
||||||
|
deallocate: true,
|
||||||
|
filter: {
|
||||||
|
enabled: true,
|
||||||
|
equalization: true,
|
||||||
|
},
|
||||||
face: {
|
face: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
detector: { rotation: true, return: true, maxDetected: 50 },
|
detector: { rotation: true, return: true, maxDetected: 50 },
|
||||||
mesh: { enabled: true },
|
mesh: { enabled: true },
|
||||||
embedding: { enabled: false },
|
iris: { enabled: false },
|
||||||
iris: { enabled: true },
|
|
||||||
emotion: { enabled: true },
|
emotion: { enabled: true },
|
||||||
description: { enabled: true },
|
description: { enabled: true },
|
||||||
},
|
},
|
||||||
hand: { enabled: false },
|
hand: { enabled: false },
|
||||||
gesture: { enabled: true },
|
gesture: { enabled: false },
|
||||||
body: { enabled: false },
|
body: { enabled: false },
|
||||||
filter: { enabled: true },
|
|
||||||
segmentation: { enabled: false },
|
segmentation: { enabled: false },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -73,9 +76,7 @@ async function SelectFaceCanvas(face) {
|
||||||
const squeeze = human.tf.squeeze(enhanced);
|
const squeeze = human.tf.squeeze(enhanced);
|
||||||
const normalize = human.tf.div(squeeze, 255);
|
const normalize = human.tf.div(squeeze, 255);
|
||||||
await human.tf.browser.toPixels(normalize, c);
|
await human.tf.browser.toPixels(normalize, c);
|
||||||
human.tf.dispose(enhanced);
|
human.tf.dispose([enhanced, squeeze, normalize]);
|
||||||
human.tf.dispose(squeeze);
|
|
||||||
human.tf.dispose(normalize);
|
|
||||||
const ctx = c.getContext('2d');
|
const ctx = c.getContext('2d');
|
||||||
ctx.font = 'small-caps 0.4rem "Lato"';
|
ctx.font = 'small-caps 0.4rem "Lato"';
|
||||||
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
|
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
|
||||||
|
@ -134,7 +135,7 @@ async function SelectFaceCanvas(face) {
|
||||||
title('Selected Face');
|
title('Selected Face');
|
||||||
}
|
}
|
||||||
|
|
||||||
async function AddFaceCanvas(index, res, fileName) {
|
function AddFaceCanvas(index, res, fileName) {
|
||||||
all[index] = res.face;
|
all[index] = res.face;
|
||||||
let ok = false;
|
let ok = false;
|
||||||
for (const i in res.face) {
|
for (const i in res.face) {
|
||||||
|
@ -161,7 +162,7 @@ async function AddFaceCanvas(index, res, fileName) {
|
||||||
});
|
});
|
||||||
// if we actually got face image tensor, draw canvas with that face
|
// if we actually got face image tensor, draw canvas with that face
|
||||||
if (res.face[i].tensor) {
|
if (res.face[i].tensor) {
|
||||||
await human.tf.browser.toPixels(res.face[i].tensor, canvas);
|
human.tf.browser.toPixels(res.face[i].tensor, canvas);
|
||||||
document.getElementById('faces').appendChild(canvas);
|
document.getElementById('faces').appendChild(canvas);
|
||||||
const ctx = canvas.getContext('2d');
|
const ctx = canvas.getContext('2d');
|
||||||
if (!ctx) return false;
|
if (!ctx) return false;
|
||||||
|
@ -169,7 +170,7 @@ async function AddFaceCanvas(index, res, fileName) {
|
||||||
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
|
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
|
||||||
ctx.fillText(`${res.face[i].age}y ${(100 * (res.face[i].genderScore || 0)).toFixed(1)}% ${res.face[i].gender}`, 4, canvas.height - 6);
|
ctx.fillText(`${res.face[i].age}y ${(100 * (res.face[i].genderScore || 0)).toFixed(1)}% ${res.face[i].gender}`, 4, canvas.height - 6);
|
||||||
const arr = db.map((rec) => rec.embedding);
|
const arr = db.map((rec) => rec.embedding);
|
||||||
const result = await human.match(res.face[i].embedding, arr);
|
const result = human.match(res.face[i].embedding, arr);
|
||||||
ctx.font = 'small-caps 1rem "Lato"';
|
ctx.font = 'small-caps 1rem "Lato"';
|
||||||
if (result.similarity && res.similarity > minScore) ctx.fillText(`${(100 * result.similarity).toFixed(1)}% ${db[result.index].name}`, 4, canvas.height - 30);
|
if (result.similarity && res.similarity > minScore) ctx.fillText(`${(100 * result.similarity).toFixed(1)}% ${db[result.index].name}`, 4, canvas.height - 30);
|
||||||
}
|
}
|
||||||
|
@ -184,7 +185,7 @@ async function AddImageElement(index, image, length) {
|
||||||
const img = new Image(128, 128);
|
const img = new Image(128, 128);
|
||||||
img.onload = () => { // must wait until image is loaded
|
img.onload = () => { // must wait until image is loaded
|
||||||
human.detect(img, userConfig).then(async (res) => {
|
human.detect(img, userConfig).then(async (res) => {
|
||||||
const ok = await AddFaceCanvas(index, res, image); // then wait until image is analyzed
|
const ok = AddFaceCanvas(index, res, image); // then wait until image is analyzed
|
||||||
// log('Add image:', index + 1, image, 'faces:', res.face.length);
|
// log('Add image:', index + 1, image, 'faces:', res.face.length);
|
||||||
if (ok) document.getElementById('images').appendChild(img); // and finally we can add it
|
if (ok) document.getElementById('images').appendChild(img); // and finally we can add it
|
||||||
resolve(true);
|
resolve(true);
|
||||||
|
@ -199,7 +200,7 @@ async function AddImageElement(index, image, length) {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
async function createFaceMatchDB() {
|
function createFaceMatchDB() {
|
||||||
log('Creating Faces DB...');
|
log('Creating Faces DB...');
|
||||||
for (const image of all) {
|
for (const image of all) {
|
||||||
for (const face of image) db.push({ name: 'unknown', source: face.fileName, embedding: face.embedding });
|
for (const face of image) db.push({ name: 'unknown', source: face.fileName, embedding: face.embedding });
|
||||||
|
@ -246,6 +247,9 @@ async function main() {
|
||||||
// images = ['/samples/in/solvay1927.jpg'];
|
// images = ['/samples/in/solvay1927.jpg'];
|
||||||
|
|
||||||
// download and analyze all images
|
// download and analyze all images
|
||||||
|
// const promises = [];
|
||||||
|
// for (let i = 0; i < images.length; i++) promises.push(AddImageElement(i, images[i], images.length));
|
||||||
|
// await Promise.all(promises);
|
||||||
for (let i = 0; i < images.length; i++) await AddImageElement(i, images[i], images.length);
|
for (let i = 0; i < images.length; i++) await AddImageElement(i, images[i], images.length);
|
||||||
|
|
||||||
// print stats
|
// print stats
|
||||||
|
@ -254,7 +258,7 @@ async function main() {
|
||||||
log(human.tf.engine().memory());
|
log(human.tf.engine().memory());
|
||||||
|
|
||||||
// if we didn't download db, generate it from current faces
|
// if we didn't download db, generate it from current faces
|
||||||
if (!db || db.length === 0) await createFaceMatchDB();
|
if (!db || db.length === 0) createFaceMatchDB();
|
||||||
|
|
||||||
title('');
|
title('');
|
||||||
log('Ready');
|
log('Ready');
|
||||||
|
|
|
@ -712,6 +712,7 @@ function setupMenu() {
|
||||||
|
|
||||||
menu.image = new Menu(document.body, '', { top, left: x[1] });
|
menu.image = new Menu(document.body, '', { top, left: x[1] });
|
||||||
menu.image.addBool('enabled', userConfig.filter, 'enabled', (val) => userConfig.filter.enabled = val);
|
menu.image.addBool('enabled', userConfig.filter, 'enabled', (val) => userConfig.filter.enabled = val);
|
||||||
|
menu.image.addBool('histogram equalization', userConfig.filter, 'equalization', (val) => userConfig.filter.equalization = val);
|
||||||
ui.menuWidth = menu.image.addRange('image width', userConfig.filter, 'width', 0, 3840, 10, (val) => userConfig.filter.width = parseInt(val));
|
ui.menuWidth = menu.image.addRange('image width', userConfig.filter, 'width', 0, 3840, 10, (val) => userConfig.filter.width = parseInt(val));
|
||||||
ui.menuHeight = menu.image.addRange('image height', userConfig.filter, 'height', 0, 2160, 10, (val) => userConfig.filter.height = parseInt(val));
|
ui.menuHeight = menu.image.addRange('image height', userConfig.filter, 'height', 0, 2160, 10, (val) => userConfig.filter.height = parseInt(val));
|
||||||
menu.image.addHTML('<hr style="border-style: inset; border-color: dimgray">');
|
menu.image.addHTML('<hr style="border-style: inset; border-color: dimgray">');
|
||||||
|
|
|
@ -7,7 +7,8 @@
|
||||||
// demo/typescript/index.ts
|
// demo/typescript/index.ts
|
||||||
import Human from "../../dist/human.esm.js";
|
import Human from "../../dist/human.esm.js";
|
||||||
var humanConfig = {
|
var humanConfig = {
|
||||||
modelBasePath: "../../models"
|
modelBasePath: "../../models",
|
||||||
|
filter: { equalization: false }
|
||||||
};
|
};
|
||||||
var human = new Human(humanConfig);
|
var human = new Human(humanConfig);
|
||||||
human.env["perfadd"] = false;
|
human.env["perfadd"] = false;
|
||||||
|
@ -79,8 +80,8 @@ async function drawLoop() {
|
||||||
setTimeout(drawLoop, 30);
|
setTimeout(drawLoop, 30);
|
||||||
}
|
}
|
||||||
async function main() {
|
async function main() {
|
||||||
log("human version:", human.version, "tfjs version:", human.tf.version_core);
|
log("human version:", human.version, "| tfjs version:", human.tf.version_core);
|
||||||
log("platform:", human.env.platform, "agent:", human.env.agent);
|
log("platform:", human.env.platform, "| agent:", human.env.agent);
|
||||||
status("loading...");
|
status("loading...");
|
||||||
await human.load();
|
await human.load();
|
||||||
log("backend:", human.tf.getBackend(), "| available:", human.env.backends);
|
log("backend:", human.tf.getBackend(), "| available:", human.env.backends);
|
||||||
|
|
|
@ -13,7 +13,7 @@ import Human from '../../dist/human.esm.js'; // equivalent of @vladmandic/human
|
||||||
|
|
||||||
const humanConfig = { // user configuration for human, used to fine-tune behavior
|
const humanConfig = { // user configuration for human, used to fine-tune behavior
|
||||||
modelBasePath: '../../models',
|
modelBasePath: '../../models',
|
||||||
filter: { equalization: true },
|
filter: { equalization: false },
|
||||||
// backend: 'webgpu',
|
// backend: 'webgpu',
|
||||||
// async: true,
|
// async: true,
|
||||||
// face: { enabled: false, detector: { rotation: true }, iris: { enabled: false }, description: { enabled: false }, emotion: { enabled: false } },
|
// face: { enabled: false, detector: { rotation: true }, iris: { enabled: false }, description: { enabled: false }, emotion: { enabled: false } },
|
||||||
|
@ -99,8 +99,8 @@ async function drawLoop() { // main screen refresh loop
|
||||||
}
|
}
|
||||||
|
|
||||||
async function main() { // main entry point
|
async function main() { // main entry point
|
||||||
log('human version:', human.version, 'tfjs version:', human.tf.version_core);
|
log('human version:', human.version, '| tfjs version:', human.tf.version_core);
|
||||||
log('platform:', human.env.platform, 'agent:', human.env.agent);
|
log('platform:', human.env.platform, '| agent:', human.env.agent);
|
||||||
status('loading...');
|
status('loading...');
|
||||||
await human.load(); // preload all models
|
await human.load(); // preload all models
|
||||||
log('backend:', human.tf.getBackend(), '| available:', human.env.backends);
|
log('backend:', human.tf.getBackend(), '| available:', human.env.backends);
|
||||||
|
|
|
@ -74,7 +74,7 @@
|
||||||
"canvas": "^2.8.0",
|
"canvas": "^2.8.0",
|
||||||
"dayjs": "^1.10.7",
|
"dayjs": "^1.10.7",
|
||||||
"esbuild": "^0.13.12",
|
"esbuild": "^0.13.12",
|
||||||
"eslint": "8.1.0",
|
"eslint": "8.2.0",
|
||||||
"eslint-config-airbnb-base": "^14.2.1",
|
"eslint-config-airbnb-base": "^14.2.1",
|
||||||
"eslint-plugin-html": "^6.2.0",
|
"eslint-plugin-html": "^6.2.0",
|
||||||
"eslint-plugin-import": "^2.25.2",
|
"eslint-plugin-import": "^2.25.2",
|
||||||
|
|
|
@ -122,7 +122,9 @@ export interface SegmentationConfig extends GenericConfig {
|
||||||
export interface FilterConfig {
|
export interface FilterConfig {
|
||||||
/** @property are image filters enabled? */
|
/** @property are image filters enabled? */
|
||||||
enabled: boolean,
|
enabled: boolean,
|
||||||
/** @property perform image histogram equalization */
|
/** @property perform image histogram equalization
|
||||||
|
* - equalization is performed on input as a whole and detected face before its passed for further analysis
|
||||||
|
*/
|
||||||
equalization: boolean,
|
equalization: boolean,
|
||||||
/** resize input width
|
/** resize input width
|
||||||
* - if both width and height are set to 0, there is no resizing
|
* - if both width and height are set to 0, there is no resizing
|
||||||
|
@ -229,6 +231,9 @@ export interface Config {
|
||||||
*/
|
*/
|
||||||
cacheSensitivity: number;
|
cacheSensitivity: number;
|
||||||
|
|
||||||
|
/** Perform immediate garbage collection on deallocated tensors instead of caching them */
|
||||||
|
deallocate: boolean;
|
||||||
|
|
||||||
/** Internal Variable */
|
/** Internal Variable */
|
||||||
skipAllowed: boolean;
|
skipAllowed: boolean;
|
||||||
|
|
||||||
|
@ -264,6 +269,7 @@ const config: Config = {
|
||||||
warmup: 'full',
|
warmup: 'full',
|
||||||
cacheSensitivity: 0.70,
|
cacheSensitivity: 0.70,
|
||||||
skipAllowed: false,
|
skipAllowed: false,
|
||||||
|
deallocate: false,
|
||||||
filter: {
|
filter: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
equalization: false,
|
equalization: false,
|
||||||
|
|
|
@ -13,10 +13,11 @@ import * as blazeface from './blazeface';
|
||||||
import * as util from './facemeshutil';
|
import * as util from './facemeshutil';
|
||||||
import * as coords from './facemeshcoords';
|
import * as coords from './facemeshcoords';
|
||||||
import * as iris from './iris';
|
import * as iris from './iris';
|
||||||
|
import { histogramEqualization } from '../image/enhance';
|
||||||
|
import { env } from '../util/env';
|
||||||
import type { GraphModel, Tensor } from '../tfjs/types';
|
import type { GraphModel, Tensor } from '../tfjs/types';
|
||||||
import type { FaceResult, Point } from '../result';
|
import type { FaceResult, Point } from '../result';
|
||||||
import type { Config } from '../config';
|
import type { Config } from '../config';
|
||||||
import { env } from '../util/env';
|
|
||||||
|
|
||||||
type BoxCache = { startPoint: Point, endPoint: Point, landmarks: Array<Point>, confidence: number };
|
type BoxCache = { startPoint: Point, endPoint: Point, landmarks: Array<Point>, confidence: number };
|
||||||
let boxCache: Array<BoxCache> = [];
|
let boxCache: Array<BoxCache> = [];
|
||||||
|
@ -73,6 +74,11 @@ export async function predict(input: Tensor, config: Config): Promise<FaceResult
|
||||||
rotationMatrix = util.fixedRotationMatrix;
|
rotationMatrix = util.fixedRotationMatrix;
|
||||||
face.tensor = util.cutBoxFromImageAndResize(box, input, config.face.mesh?.enabled ? [inputSize, inputSize] : [blazeface.size(), blazeface.size()]);
|
face.tensor = util.cutBoxFromImageAndResize(box, input, config.face.mesh?.enabled ? [inputSize, inputSize] : [blazeface.size(), blazeface.size()]);
|
||||||
}
|
}
|
||||||
|
if (config?.filter?.equalization) {
|
||||||
|
const equilized = await histogramEqualization(face.tensor as Tensor);
|
||||||
|
tf.dispose(face.tensor);
|
||||||
|
face.tensor = equilized;
|
||||||
|
}
|
||||||
face.boxScore = Math.round(100 * box.confidence) / 100;
|
face.boxScore = Math.round(100 * box.confidence) / 100;
|
||||||
if (!config.face.mesh?.enabled) { // mesh not enabled, return resuts from detector only
|
if (!config.face.mesh?.enabled) { // mesh not enabled, return resuts from detector only
|
||||||
face.box = util.getClampedBox(box, input);
|
face.box = util.getClampedBox(box, input);
|
||||||
|
|
|
@ -41,6 +41,9 @@ export function enhance(input): Tensor {
|
||||||
if (!model?.inputs[0].shape) return tensor; // model has no shape so no point continuing
|
if (!model?.inputs[0].shape) return tensor; // model has no shape so no point continuing
|
||||||
// do a tight crop of image and resize it to fit the model
|
// do a tight crop of image and resize it to fit the model
|
||||||
const crop = tf.image.resizeBilinear(tensor, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
|
const crop = tf.image.resizeBilinear(tensor, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
|
||||||
|
const norm = tf.mul(crop, 255);
|
||||||
|
tf.dispose(crop);
|
||||||
|
return norm;
|
||||||
/*
|
/*
|
||||||
const box = [[0.05, 0.15, 0.85, 0.85]]; // empyrical values for top, left, bottom, right
|
const box = [[0.05, 0.15, 0.85, 0.85]]; // empyrical values for top, left, bottom, right
|
||||||
const crop = (tensor.shape.length === 3)
|
const crop = (tensor.shape.length === 3)
|
||||||
|
@ -78,9 +81,6 @@ export function enhance(input): Tensor {
|
||||||
const darken = crop.sub(crop.min());
|
const darken = crop.sub(crop.min());
|
||||||
const lighten = darken.div(darken.max());
|
const lighten = darken.div(darken.max());
|
||||||
*/
|
*/
|
||||||
const norm = tf.mul(crop, 255);
|
|
||||||
tf.dispose(crop);
|
|
||||||
return norm;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function predict(image: Tensor, config: Config, idx, count) {
|
export async function predict(image: Tensor, config: Config, idx, count) {
|
||||||
|
|
|
@ -54,7 +54,9 @@ export const body = (res): GestureResult[] => {
|
||||||
// leaning
|
// leaning
|
||||||
const leftShoulder = res[i].keypoints.find((a) => (a.part === 'leftShoulder'));
|
const leftShoulder = res[i].keypoints.find((a) => (a.part === 'leftShoulder'));
|
||||||
const rightShoulder = res[i].keypoints.find((a) => (a.part === 'rightShoulder'));
|
const rightShoulder = res[i].keypoints.find((a) => (a.part === 'rightShoulder'));
|
||||||
if (leftShoulder && rightShoulder) gestures.push({ body: i, gesture: `leaning ${(leftShoulder.position[1] > rightShoulder.position[1]) ? 'left' : 'right'}` });
|
if (leftShoulder && rightShoulder && Math.abs(leftShoulder.positionRaw[1] - rightShoulder.positionRaw[1]) > 0.1) {
|
||||||
|
gestures.push({ body: i, gesture: `leaning ${(leftShoulder.position[1] > rightShoulder.position[1]) ? 'left' : 'right'}` });
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return gestures;
|
return gestures;
|
||||||
};
|
};
|
||||||
|
|
|
@ -405,7 +405,7 @@ export class Human {
|
||||||
|
|
||||||
timeStamp = now();
|
timeStamp = now();
|
||||||
this.state = 'image';
|
this.state = 'image';
|
||||||
const img = image.process(input, this.config) as { canvas: HTMLCanvasElement | OffscreenCanvas, tensor: Tensor };
|
const img = await image.process(input, this.config) as { canvas: HTMLCanvasElement | OffscreenCanvas, tensor: Tensor };
|
||||||
this.process = img;
|
this.process = img;
|
||||||
this.performance.inputProcess = this.env.perfadd ? (this.performance.inputProcess || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
this.performance.inputProcess = this.env.perfadd ? (this.performance.inputProcess || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
||||||
this.analyze('Get Image:');
|
this.analyze('Get Image:');
|
||||||
|
@ -423,7 +423,7 @@ export class Human {
|
||||||
if (!this.performance.cachedFrames) this.performance.cachedFrames = 0;
|
if (!this.performance.cachedFrames) this.performance.cachedFrames = 0;
|
||||||
(this.performance.totalFrames as number)++;
|
(this.performance.totalFrames as number)++;
|
||||||
if (this.config.skipAllowed) this.performance.cachedFrames++;
|
if (this.config.skipAllowed) this.performance.cachedFrames++;
|
||||||
this.performance.inputCheck = this.env.perfadd ? (this.performance.inputCheck || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
this.performance.cacheCheck = this.env.perfadd ? (this.performance.cacheCheck || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
||||||
this.analyze('Check Changed:');
|
this.analyze('Check Changed:');
|
||||||
|
|
||||||
// prepare where to store model results
|
// prepare where to store model results
|
||||||
|
|
|
@ -5,16 +5,20 @@
|
||||||
import * as tf from '../../dist/tfjs.esm.js';
|
import * as tf from '../../dist/tfjs.esm.js';
|
||||||
import type { Tensor } from '../exports';
|
import type { Tensor } from '../exports';
|
||||||
|
|
||||||
export function histogramEqualization(input: Tensor): Tensor {
|
export async function histogramEqualization(inputImage: Tensor): Promise<Tensor> {
|
||||||
const channels = tf.split(input, 3, 2);
|
// const maxValue = 254; // using 255 results in values slightly larger than 1 due to math rounding errors
|
||||||
|
const squeeze = inputImage.shape.length === 4 ? tf.squeeze(inputImage) : inputImage;
|
||||||
|
const channels = tf.split(squeeze, 3, 2);
|
||||||
const min: Tensor[] = [tf.min(channels[0]), tf.min(channels[1]), tf.min(channels[2])];
|
const min: Tensor[] = [tf.min(channels[0]), tf.min(channels[1]), tf.min(channels[2])];
|
||||||
const max: Tensor[] = [tf.max(channels[0]), tf.max(channels[1]), tf.max(channels[2])];
|
const max: Tensor[] = [tf.max(channels[0]), tf.max(channels[1]), tf.max(channels[2])];
|
||||||
|
const absMax = await Promise.all(max.map((channel) => channel.data()));
|
||||||
|
const maxValue = 0.99 * Math.max(absMax[0][0], absMax[1][0], absMax[2][0]);
|
||||||
const sub = [tf.sub(channels[0], min[0]), tf.sub(channels[1], min[1]), tf.sub(channels[2], min[2])];
|
const sub = [tf.sub(channels[0], min[0]), tf.sub(channels[1], min[1]), tf.sub(channels[2], min[2])];
|
||||||
const range = [tf.sub(max[0], min[0]), tf.sub(max[1], min[1]), tf.sub(max[2], min[2])];
|
const range = [tf.sub(max[0], min[0]), tf.sub(max[1], min[1]), tf.sub(max[2], min[2])];
|
||||||
const fact = [tf.div(255, range[0]), tf.div(255, range[1]), tf.div(255, range[2])];
|
const fact = [tf.div(maxValue, range[0]), tf.div(maxValue, range[1]), tf.div(maxValue, range[2])];
|
||||||
const enh = [tf.mul(sub[0], fact[0]), tf.mul(sub[1], fact[1]), tf.mul(sub[2], fact[2])];
|
const enh = [tf.mul(sub[0], fact[0]), tf.mul(sub[1], fact[1]), tf.mul(sub[2], fact[2])];
|
||||||
const rgb = tf.stack([enh[0], enh[1], enh[2]], 2);
|
const rgb = tf.stack([enh[0], enh[1], enh[2]], 2);
|
||||||
const reshape = tf.reshape(rgb, [1, input.shape[0], input.shape[1], 3]);
|
const reshape = tf.reshape(rgb, [1, squeeze.shape[0], squeeze.shape[1], 3]);
|
||||||
tf.dispose([...channels, ...min, ...max, ...sub, ...range, ...fact, ...enh, rgb]);
|
tf.dispose([...channels, ...min, ...max, ...sub, ...range, ...fact, ...enh, rgb, squeeze]);
|
||||||
return reshape;
|
return reshape; // output shape is [1, height, width, 3]
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,7 +6,7 @@ import * as tf from '../../dist/tfjs.esm.js';
|
||||||
import * as fxImage from './imagefx';
|
import * as fxImage from './imagefx';
|
||||||
import type { Input, AnyCanvas, Tensor, Config } from '../exports';
|
import type { Input, AnyCanvas, Tensor, Config } from '../exports';
|
||||||
import { env } from '../util/env';
|
import { env } from '../util/env';
|
||||||
import { log, now } from '../util/util';
|
import { log } from '../util/util';
|
||||||
import * as enhance from './enhance';
|
import * as enhance from './enhance';
|
||||||
|
|
||||||
const maxSize = 2048;
|
const maxSize = 2048;
|
||||||
|
@ -17,6 +17,13 @@ let tmpCanvas: AnyCanvas | null = null; // use global variable to avoid recreati
|
||||||
// @ts-ignore // imagefx is js module that should be converted to a class
|
// @ts-ignore // imagefx is js module that should be converted to a class
|
||||||
let fx: fxImage.GLImageFilter | null; // instance of imagefx
|
let fx: fxImage.GLImageFilter | null; // instance of imagefx
|
||||||
|
|
||||||
|
const last: { inputSum: number, cacheDiff: number, sumMethod: number, inputTensor: undefined | Tensor } = {
|
||||||
|
inputSum: 0,
|
||||||
|
cacheDiff: 1,
|
||||||
|
sumMethod: 0,
|
||||||
|
inputTensor: undefined,
|
||||||
|
};
|
||||||
|
|
||||||
export function canvas(width, height): AnyCanvas {
|
export function canvas(width, height): AnyCanvas {
|
||||||
let c;
|
let c;
|
||||||
if (env.browser) { // browser defines canvas object
|
if (env.browser) { // browser defines canvas object
|
||||||
|
@ -48,7 +55,7 @@ export function copy(input: AnyCanvas, output?: AnyCanvas) {
|
||||||
// process input image and return tensor
|
// process input image and return tensor
|
||||||
// input can be tensor, imagedata, htmlimageelement, htmlvideoelement
|
// input can be tensor, imagedata, htmlimageelement, htmlvideoelement
|
||||||
// input is resized and run through imagefx filter
|
// input is resized and run through imagefx filter
|
||||||
export function process(input: Input, config: Config, getTensor: boolean = true): { tensor: Tensor | null, canvas: AnyCanvas | null } {
|
export async function process(input: Input, config: Config, getTensor: boolean = true): Promise<{ tensor: Tensor | null, canvas: AnyCanvas | null }> {
|
||||||
if (!input) {
|
if (!input) {
|
||||||
// throw new Error('input is missing');
|
// throw new Error('input is missing');
|
||||||
if (config.debug) log('input is missing');
|
if (config.debug) log('input is missing');
|
||||||
|
@ -108,7 +115,7 @@ export function process(input: Input, config: Config, getTensor: boolean = true)
|
||||||
if ((config.filter.height || 0) > 0) targetHeight = config.filter.height;
|
if ((config.filter.height || 0) > 0) targetHeight = config.filter.height;
|
||||||
else if ((config.filter.width || 0) > 0) targetHeight = originalHeight * ((config.filter.width || 0) / originalWidth);
|
else if ((config.filter.width || 0) > 0) targetHeight = originalHeight * ((config.filter.width || 0) / originalWidth);
|
||||||
if (!targetWidth || !targetHeight) throw new Error('input cannot determine dimension');
|
if (!targetWidth || !targetHeight) throw new Error('input cannot determine dimension');
|
||||||
if (!inCanvas || (inCanvas.width !== targetWidth) || (inCanvas.height !== targetHeight)) inCanvas = canvas(targetWidth, targetHeight);
|
if (!inCanvas || (inCanvas?.width !== targetWidth) || (inCanvas?.height !== targetHeight)) inCanvas = canvas(targetWidth, targetHeight);
|
||||||
|
|
||||||
// draw input to our canvas
|
// draw input to our canvas
|
||||||
const inCtx = inCanvas.getContext('2d') as CanvasRenderingContext2D;
|
const inCtx = inCanvas.getContext('2d') as CanvasRenderingContext2D;
|
||||||
|
@ -118,14 +125,14 @@ export function process(input: Input, config: Config, getTensor: boolean = true)
|
||||||
if (config.filter.flip && typeof inCtx.translate !== 'undefined') {
|
if (config.filter.flip && typeof inCtx.translate !== 'undefined') {
|
||||||
inCtx.translate(originalWidth, 0);
|
inCtx.translate(originalWidth, 0);
|
||||||
inCtx.scale(-1, 1);
|
inCtx.scale(-1, 1);
|
||||||
inCtx.drawImage(input as AnyCanvas, 0, 0, originalWidth, originalHeight, 0, 0, inCanvas.width, inCanvas.height);
|
inCtx.drawImage(input as AnyCanvas, 0, 0, originalWidth, originalHeight, 0, 0, inCanvas?.width, inCanvas?.height);
|
||||||
inCtx.setTransform(1, 0, 0, 1, 0, 0); // resets transforms to defaults
|
inCtx.setTransform(1, 0, 0, 1, 0, 0); // resets transforms to defaults
|
||||||
} else {
|
} else {
|
||||||
inCtx.drawImage(input as AnyCanvas, 0, 0, originalWidth, originalHeight, 0, 0, inCanvas.width, inCanvas.height);
|
inCtx.drawImage(input as AnyCanvas, 0, 0, originalWidth, originalHeight, 0, 0, inCanvas?.width, inCanvas?.height);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!outCanvas || (inCanvas.width !== outCanvas.width) || (inCanvas.height !== outCanvas.height)) outCanvas = canvas(inCanvas.width, inCanvas.height); // init output canvas
|
if (!outCanvas || (inCanvas.width !== outCanvas.width) || (inCanvas?.height !== outCanvas?.height)) outCanvas = canvas(inCanvas.width, inCanvas.height); // init output canvas
|
||||||
|
|
||||||
// imagefx transforms using gl from input canvas to output canvas
|
// imagefx transforms using gl from input canvas to output canvas
|
||||||
if (config.filter.enabled && env.webgl.supported) {
|
if (config.filter.enabled && env.webgl.supported) {
|
||||||
|
@ -192,26 +199,16 @@ export function process(input: Input, config: Config, getTensor: boolean = true)
|
||||||
const rgb = tf.slice3d(pixels, [0, 0, 0], [-1, -1, 3]); // strip alpha channel
|
const rgb = tf.slice3d(pixels, [0, 0, 0], [-1, -1, 3]); // strip alpha channel
|
||||||
tf.dispose(pixels);
|
tf.dispose(pixels);
|
||||||
pixels = rgb;
|
pixels = rgb;
|
||||||
/*
|
|
||||||
const channels = tf.split(pixels, 4, 2); // split rgba to channels
|
|
||||||
tf.dispose(pixels);
|
|
||||||
const rgb = tf.stack([channels[0], channels[1], channels[2]], 2); // stack channels back to rgb and ignore alpha
|
|
||||||
pixels = tf.reshape(rgb, [rgb.shape[0], rgb.shape[1], 3]); // move extra dim from the end of tensor and use it as batch number instead
|
|
||||||
tf.dispose([rgb, ...channels]);
|
|
||||||
*/
|
|
||||||
}
|
}
|
||||||
if (!pixels) throw new Error('cannot create tensor from input');
|
if (!pixels) throw new Error('cannot create tensor from input');
|
||||||
const casted = tf.cast(pixels, 'float32');
|
const casted = tf.cast(pixels, 'float32');
|
||||||
const tensor = config.filter.equalization ? enhance.histogramEqualization(casted) : tf.expandDims(casted, 0);
|
const tensor = config.filter.equalization ? await enhance.histogramEqualization(casted) : tf.expandDims(casted, 0);
|
||||||
tf.dispose([pixels, casted]);
|
tf.dispose([pixels, casted]);
|
||||||
return { tensor, canvas: (config.filter.return ? outCanvas : null) };
|
return { tensor, canvas: (config.filter.return ? outCanvas : null) };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let lastInputSum = 0;
|
/*
|
||||||
let lastCacheDiff = 1;
|
|
||||||
let benchmarked = 0;
|
|
||||||
|
|
||||||
const checksum = async (input: Tensor): Promise<number> => { // use tf sum or js based sum loop depending on which is faster
|
const checksum = async (input: Tensor): Promise<number> => { // use tf sum or js based sum loop depending on which is faster
|
||||||
const resizeFact = 48;
|
const resizeFact = 48;
|
||||||
const reduced: Tensor = tf.image.resizeBilinear(input, [Math.trunc((input.shape[1] || 1) / resizeFact), Math.trunc((input.shape[2] || 1) / resizeFact)]);
|
const reduced: Tensor = tf.image.resizeBilinear(input, [Math.trunc((input.shape[1] || 1) / resizeFact), Math.trunc((input.shape[2] || 1) / resizeFact)]);
|
||||||
|
@ -227,29 +224,51 @@ const checksum = async (input: Tensor): Promise<number> => { // use tf sum or js
|
||||||
for (let i = 0; i < reducedData.length / 3; i++) sum0 += reducedData[3 * i + 2]; // look only at green value of each pixel
|
for (let i = 0; i < reducedData.length / 3; i++) sum0 += reducedData[3 * i + 2]; // look only at green value of each pixel
|
||||||
return sum0;
|
return sum0;
|
||||||
};
|
};
|
||||||
if (benchmarked === 0) {
|
if (last.sumMethod === 0) {
|
||||||
const t0 = now();
|
const t0 = now();
|
||||||
await jsSum();
|
await jsSum();
|
||||||
const t1 = now();
|
const t1 = now();
|
||||||
await tfSum();
|
await tfSum();
|
||||||
const t2 = now();
|
const t2 = now();
|
||||||
benchmarked = t1 - t0 < t2 - t1 ? 1 : 2;
|
last.sumMethod = t1 - t0 < t2 - t1 ? 1 : 2;
|
||||||
}
|
}
|
||||||
const res = benchmarked === 1 ? await jsSum() : await tfSum();
|
const res = last.sumMethod === 1 ? await jsSum() : await tfSum();
|
||||||
tf.dispose(reduced);
|
tf.dispose(reduced);
|
||||||
return res;
|
return res;
|
||||||
};
|
};
|
||||||
|
*/
|
||||||
|
|
||||||
export async function skip(config, input: Tensor) {
|
export async function skip(config, input: Tensor) {
|
||||||
if (config.cacheSensitivity === 0) return false;
|
let skipFrame = false;
|
||||||
const sum = await checksum(input);
|
if (config.cacheSensitivity === 0) return skipFrame;
|
||||||
const diff = 100 * (Math.max(sum, lastInputSum) / Math.min(sum, lastInputSum) - 1);
|
|
||||||
lastInputSum = sum;
|
/*
|
||||||
|
const checkSum = await checksum(input);
|
||||||
|
const diff = 100 * (Math.max(checkSum, last.inputSum) / Math.min(checkSum, last.inputSum) - 1);
|
||||||
|
last.inputSum = checkSum;
|
||||||
// if previous frame was skipped, skip this frame if changed more than cacheSensitivity
|
// if previous frame was skipped, skip this frame if changed more than cacheSensitivity
|
||||||
// if previous frame was not skipped, then look for cacheSensitivity or difference larger than one in previous frame to avoid resetting cache in subsequent frames unnecessarily
|
// if previous frame was not skipped, then look for cacheSensitivity or difference larger than one in previous frame to avoid resetting cache in subsequent frames unnecessarily
|
||||||
let skipFrame = diff < Math.max(config.cacheSensitivity, lastCacheDiff);
|
let skipFrame = diff < Math.max(config.cacheSensitivity, last.cacheDiff);
|
||||||
// if difference is above 10x threshold, don't use last value to force reset cache for significant change of scenes or images
|
// if difference is above 10x threshold, don't use last value to force reset cache for significant change of scenes or images
|
||||||
lastCacheDiff = diff > 10 * config.cacheSensitivity ? 0 : diff;
|
last.cacheDiff = diff > 10 * config.cacheSensitivity ? 0 : diff;
|
||||||
skipFrame = skipFrame && (lastCacheDiff > 0); // if no cached diff value then force no skip
|
skipFrame = skipFrame && (last.cacheDiff > 0); // if no cached diff value then force no skip
|
||||||
|
*/
|
||||||
|
|
||||||
|
if (!last.inputTensor) {
|
||||||
|
last.inputTensor = tf.clone(input);
|
||||||
|
} else if (last.inputTensor.shape[1] !== input.shape[1] || last.inputTensor.shape[2] !== input.shape[2]) { // input resolution changed
|
||||||
|
tf.dispose(last.inputTensor);
|
||||||
|
last.inputTensor = tf.clone(input);
|
||||||
|
} else {
|
||||||
|
const t: Record<string, Tensor> = {};
|
||||||
|
t.diff = tf.sub(input, last.inputTensor);
|
||||||
|
t.squared = tf.mul(t.diff, t.diff);
|
||||||
|
t.sum = tf.sum(t.squared);
|
||||||
|
const diffSum = await t.sum.data();
|
||||||
|
const diffRelative = diffSum[0] / (input.shape[1] || 1) / (input.shape[2] || 1) / 255 / 3; // squared difference relative to input resolution and averaged per channel
|
||||||
|
tf.dispose([last.inputTensor, t.diff, t.squared, t.sum]);
|
||||||
|
last.inputTensor = tf.clone(input);
|
||||||
|
skipFrame = diffRelative <= config.cacheSensitivity;
|
||||||
|
}
|
||||||
return skipFrame;
|
return skipFrame;
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,7 +31,7 @@ export async function process(input: Input, background: Input | undefined, confi
|
||||||
if (busy) return { data: [], canvas: null, alpha: null };
|
if (busy) return { data: [], canvas: null, alpha: null };
|
||||||
busy = true;
|
busy = true;
|
||||||
if (!model) await load(config);
|
if (!model) await load(config);
|
||||||
const inputImage = image.process(input, config);
|
const inputImage = await image.process(input, config);
|
||||||
const width = inputImage.canvas?.width || 0;
|
const width = inputImage.canvas?.width || 0;
|
||||||
const height = inputImage.canvas?.height || 0;
|
const height = inputImage.canvas?.height || 0;
|
||||||
if (!inputImage.tensor) return { data: [], canvas: null, alpha: null };
|
if (!inputImage.tensor) return { data: [], canvas: null, alpha: null };
|
||||||
|
@ -85,7 +85,7 @@ export async function process(input: Input, background: Input | undefined, confi
|
||||||
let mergedCanvas: HTMLCanvasElement | OffscreenCanvas | null = null;
|
let mergedCanvas: HTMLCanvasElement | OffscreenCanvas | null = null;
|
||||||
if (background && compositeCanvas) { // draw background with segmentation as overlay if background is present
|
if (background && compositeCanvas) { // draw background with segmentation as overlay if background is present
|
||||||
mergedCanvas = image.canvas(width, height);
|
mergedCanvas = image.canvas(width, height);
|
||||||
const bgImage = image.process(background, config);
|
const bgImage = await image.process(background, config);
|
||||||
tf.dispose(bgImage.tensor);
|
tf.dispose(bgImage.tensor);
|
||||||
const ctxMerge = mergedCanvas.getContext('2d') as CanvasRenderingContext2D;
|
const ctxMerge = mergedCanvas.getContext('2d') as CanvasRenderingContext2D;
|
||||||
ctxMerge.drawImage(bgImage.canvas as HTMLCanvasElement, 0, 0, mergedCanvas.width, mergedCanvas.height);
|
ctxMerge.drawImage(bgImage.canvas as HTMLCanvasElement, 0, 0, mergedCanvas.width, mergedCanvas.height);
|
||||||
|
|
|
@ -13,6 +13,7 @@ function registerCustomOps() {
|
||||||
kernelFunc: (op) => tf.tidy(() => tf.sub(op.inputs.a, tf.mul(tf.div(op.inputs.a, op.inputs.b), op.inputs.b))),
|
kernelFunc: (op) => tf.tidy(() => tf.sub(op.inputs.a, tf.mul(tf.div(op.inputs.a, op.inputs.b), op.inputs.b))),
|
||||||
};
|
};
|
||||||
tf.registerKernel(kernelMod);
|
tf.registerKernel(kernelMod);
|
||||||
|
env.kernels.push('mod');
|
||||||
}
|
}
|
||||||
if (!env.kernels.includes('floormod')) {
|
if (!env.kernels.includes('floormod')) {
|
||||||
const kernelMod = {
|
const kernelMod = {
|
||||||
|
@ -21,8 +22,8 @@ function registerCustomOps() {
|
||||||
kernelFunc: (op) => tf.tidy(() => tf.floorDiv(op.inputs.a / op.inputs.b) * op.inputs.b + tf.mod(op.inputs.a, op.inputs.b)),
|
kernelFunc: (op) => tf.tidy(() => tf.floorDiv(op.inputs.a / op.inputs.b) * op.inputs.b + tf.mod(op.inputs.a, op.inputs.b)),
|
||||||
};
|
};
|
||||||
tf.registerKernel(kernelMod);
|
tf.registerKernel(kernelMod);
|
||||||
|
env.kernels.push('floormod');
|
||||||
}
|
}
|
||||||
env.updateBackend();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function check(instance, force = false) {
|
export async function check(instance, force = false) {
|
||||||
|
@ -123,8 +124,9 @@ export async function check(instance, force = false) {
|
||||||
instance.performance.initBackend = Math.trunc(now() - timeStamp);
|
instance.performance.initBackend = Math.trunc(now() - timeStamp);
|
||||||
instance.config.backend = tf.getBackend();
|
instance.config.backend = tf.getBackend();
|
||||||
|
|
||||||
env.updateBackend(); // update env on backend init
|
await env.updateBackend(); // update env on backend init
|
||||||
registerCustomOps();
|
registerCustomOps();
|
||||||
|
// await env.updateBackend(); // update env on backend init
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,13 +68,11 @@ export async function register(instance): Promise<void> {
|
||||||
log('possible browser memory leak using webgl or conflict with multiple backend registrations');
|
log('possible browser memory leak using webgl or conflict with multiple backend registrations');
|
||||||
instance.emit('error');
|
instance.emit('error');
|
||||||
throw new Error('browser webgl error');
|
throw new Error('browser webgl error');
|
||||||
/*
|
// log('resetting humangl backend');
|
||||||
log('resetting humangl backend');
|
// env.initial = true;
|
||||||
env.initial = true;
|
// models.reset(instance);
|
||||||
models.reset(instance);
|
// await tf.removeBackend(config.name);
|
||||||
await tf.removeBackend(config.name);
|
// await register(instance); // re-register
|
||||||
await register(instance); // re-register
|
|
||||||
*/
|
|
||||||
});
|
});
|
||||||
config.canvas.addEventListener('webglcontextrestored', (e) => {
|
config.canvas.addEventListener('webglcontextrestored', (e) => {
|
||||||
log('error: humangl context restored:', e);
|
log('error: humangl context restored:', e);
|
||||||
|
|
|
@ -192,7 +192,7 @@ async function test(Human, inputConfig) {
|
||||||
else log('state', 'passed: warmup face result match');
|
else log('state', 'passed: warmup face result match');
|
||||||
config.warmup = 'body';
|
config.warmup = 'body';
|
||||||
res = await testWarmup(human, 'default');
|
res = await testWarmup(human, 'default');
|
||||||
if (!res || res?.face?.length !== 1 || res?.body?.length !== 1 || res?.hand?.length !== 1 || res?.gesture?.length !== 6) log('error', 'failed: warmup body result mismatch', res?.face?.length, res?.body?.length, res?.hand?.length, res?.gesture?.length);
|
if (!res || res?.face?.length !== 1 || res?.body?.length !== 1 || res?.hand?.length !== 1 || res?.gesture?.length !== 5) log('error', 'failed: warmup body result mismatch', res?.face?.length, res?.body?.length, res?.hand?.length, res?.gesture?.length);
|
||||||
else log('state', 'passed: warmup body result match');
|
else log('state', 'passed: warmup body result match');
|
||||||
log('state', 'details:', {
|
log('state', 'details:', {
|
||||||
face: { boxScore: res.face[0].boxScore, faceScore: res.face[0].faceScore, age: res.face[0].age, gender: res.face[0].gender, genderScore: res.face[0].genderScore },
|
face: { boxScore: res.face[0].boxScore, faceScore: res.face[0].faceScore, age: res.face[0].age, gender: res.face[0].gender, genderScore: res.face[0].genderScore },
|
||||||
|
@ -278,7 +278,7 @@ async function test(Human, inputConfig) {
|
||||||
config.body = { minConfidence: 0.0001 };
|
config.body = { minConfidence: 0.0001 };
|
||||||
config.hand = { minConfidence: 0.0001 };
|
config.hand = { minConfidence: 0.0001 };
|
||||||
res = await testDetect(human, 'samples/in/ai-body.jpg', 'default');
|
res = await testDetect(human, 'samples/in/ai-body.jpg', 'default');
|
||||||
if (!res || res?.face?.length !== 1 || res?.body?.length !== 1 || res?.hand?.length !== 2 || res?.gesture?.length !== 8) log('error', 'failed: sensitive result mismatch', res?.face?.length, res?.body?.length, res?.hand?.length, res?.gesture?.length);
|
if (!res || res?.face?.length !== 1 || res?.body?.length !== 1 || res?.hand?.length !== 2 || res?.gesture?.length !== 7) log('error', 'failed: sensitive result mismatch', res?.face?.length, res?.body?.length, res?.hand?.length, res?.gesture?.length);
|
||||||
else log('state', 'passed: sensitive result match');
|
else log('state', 'passed: sensitive result match');
|
||||||
|
|
||||||
// test sensitive details face
|
// test sensitive details face
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
// export all from build bundle
|
// export all from build bundle
|
||||||
export * from '@tensorflow/tfjs/dist/index.js';
|
export * from '@tensorflow/tfjs/dist/index.js';
|
||||||
// export * from '@tensorflow/tfjs-backend-webgl/dist/index.js';
|
export * from '@tensorflow/tfjs-backend-webgl/dist/index.js';
|
||||||
// export * from '@tensorflow/tfjs-backend-wasm/dist/index.js';
|
// export * from '@tensorflow/tfjs-backend-wasm/dist/index.js';
|
||||||
|
|
||||||
// add webgpu to bundle, experimental
|
// add webgpu to bundle, experimental
|
||||||
|
|
2
wiki
2
wiki
|
@ -1 +1 @@
|
||||||
Subproject commit 0deb501cf47e1783e8ca4426b7bf4697196f09e2
|
Subproject commit e5a6342e4e2dd5d79b73cafada222ef4b1d1621a
|
Loading…
Reference in New Issue