mirror of https://github.com/vladmandic/human
fix mobilefacenet module
parent
1cb7b51924
commit
48dc679b2e
|
@ -9,7 +9,10 @@
|
|||
|
||||
## Changelog
|
||||
|
||||
### **HEAD -> main** 2021/11/12 mandic00@live.com
|
||||
### **HEAD -> main** 2021/11/13 mandic00@live.com
|
||||
|
||||
|
||||
### **origin/main** 2021/11/12 mandic00@live.com
|
||||
|
||||
- implement optional face masking
|
||||
- add similarity score range normalization
|
||||
|
|
4
TODO.md
4
TODO.md
|
@ -47,8 +47,12 @@ New:
|
|||
checks if input appears to be a real-world live image or a recording
|
||||
best used together with `antispoofing` that checks if input appears to have a realistic face
|
||||
- new face masking option in `face.config.detector.mask`
|
||||
result is shading of face image outside of face area which is useful for increased sensitivity of other modules that rely on detected face as input
|
||||
- new face crop option in `face.config.detector.cropFactor`
|
||||
result is user-definable fine-tuning for other modules that rely on detected face as input
|
||||
|
||||
Other:
|
||||
- Improved **Safari** compatibility
|
||||
- Improved `similarity` and `match` score range normalization
|
||||
- Documentation overhaul
|
||||
- Fixed optional `gear`, `ssrnet`, `mobilefacenet` modules
|
||||
|
|
|
@ -78,6 +78,7 @@ var humanConfig = {
|
|||
enabled: true,
|
||||
detector: { rotation: true, return: true, cropFactor: 1.6, mask: false },
|
||||
description: { enabled: true },
|
||||
mobilefacenet: { enabled: false, modelPath: "https://vladmandic.github.io/human-models/models/mobilefacenet.json" },
|
||||
iris: { enabled: true },
|
||||
emotion: { enabled: false },
|
||||
antispoof: { enabled: true },
|
||||
|
@ -88,6 +89,7 @@ var humanConfig = {
|
|||
object: { enabled: false },
|
||||
gesture: { enabled: true }
|
||||
};
|
||||
var matchOptions = { order: 2, multiplier: 25, min: 0.2, max: 0.8 };
|
||||
var options = {
|
||||
minConfidence: 0.6,
|
||||
minSize: 224,
|
||||
|
@ -97,7 +99,8 @@ var options = {
|
|||
threshold: 0.5,
|
||||
mask: humanConfig.face.detector.mask,
|
||||
rotation: humanConfig.face.detector.rotation,
|
||||
cropFactor: humanConfig.face.detector.cropFactor
|
||||
cropFactor: humanConfig.face.detector.cropFactor,
|
||||
...matchOptions
|
||||
};
|
||||
var ok = {
|
||||
faceCount: false,
|
||||
|
@ -254,6 +257,7 @@ async function detectFace() {
|
|||
(_a = dom.canvas.getContext("2d")) == null ? void 0 : _a.clearRect(0, 0, options.minSize, options.minSize);
|
||||
if (!current.face || !current.face.tensor || !current.face.embedding)
|
||||
return false;
|
||||
console.log("face record:", current.face);
|
||||
human.tf.browser.toPixels(current.face.tensor, dom.canvas);
|
||||
if (await count() === 0) {
|
||||
log2("face database is empty");
|
||||
|
@ -263,7 +267,7 @@ async function detectFace() {
|
|||
}
|
||||
const db2 = await load();
|
||||
const descriptors = db2.map((rec) => rec.descriptor);
|
||||
const res = await human.match(current.face.embedding, descriptors);
|
||||
const res = await human.match(current.face.embedding, descriptors, matchOptions);
|
||||
current.record = db2[res.index] || null;
|
||||
if (current.record) {
|
||||
log2(`best match: ${current.record.name} | id: ${current.record.id} | similarity: ${Math.round(1e3 * res.similarity) / 10}%`);
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -16,7 +16,8 @@ const humanConfig = { // user configuration for human, used to fine-tune behavio
|
|||
face: {
|
||||
enabled: true,
|
||||
detector: { rotation: true, return: true, cropFactor: 1.6, mask: false }, // return tensor is used to get detected face image
|
||||
description: { enabled: true },
|
||||
description: { enabled: true }, // default model for face descriptor extraction is faceres
|
||||
mobilefacenet: { enabled: false, modelPath: 'https://vladmandic.github.io/human-models/models/mobilefacenet.json' }, // alternative model
|
||||
iris: { enabled: true }, // needed to determine gaze direction
|
||||
emotion: { enabled: false }, // not needed
|
||||
antispoof: { enabled: true }, // enable optional antispoof module
|
||||
|
@ -28,6 +29,9 @@ const humanConfig = { // user configuration for human, used to fine-tune behavio
|
|||
gesture: { enabled: true }, // parses face and iris gestures
|
||||
};
|
||||
|
||||
// const matchOptions = { order: 2, multiplier: 1000, min: 0.0, max: 1.0 }; // for embedding model
|
||||
const matchOptions = { order: 2, multiplier: 25, min: 0.2, max: 0.8 }; // for faceres model
|
||||
|
||||
const options = {
|
||||
minConfidence: 0.6, // overal face confidence for box, face, gender, real, live
|
||||
minSize: 224, // min input to face descriptor model before degradation
|
||||
|
@ -38,6 +42,7 @@ const options = {
|
|||
mask: humanConfig.face.detector.mask,
|
||||
rotation: humanConfig.face.detector.rotation,
|
||||
cropFactor: humanConfig.face.detector.cropFactor,
|
||||
...matchOptions,
|
||||
};
|
||||
|
||||
const ok = { // must meet all rules
|
||||
|
@ -194,6 +199,8 @@ async function deleteRecord() {
|
|||
async function detectFace() {
|
||||
dom.canvas.getContext('2d')?.clearRect(0, 0, options.minSize, options.minSize);
|
||||
if (!current.face || !current.face.tensor || !current.face.embedding) return false;
|
||||
// eslint-disable-next-line no-console
|
||||
console.log('face record:', current.face);
|
||||
human.tf.browser.toPixels(current.face.tensor as unknown as TensorLike, dom.canvas);
|
||||
if (await indexDb.count() === 0) {
|
||||
log('face database is empty');
|
||||
|
@ -203,7 +210,7 @@ async function detectFace() {
|
|||
}
|
||||
const db = await indexDb.load();
|
||||
const descriptors = db.map((rec) => rec.descriptor);
|
||||
const res = await human.match(current.face.embedding, descriptors);
|
||||
const res = await human.match(current.face.embedding, descriptors, matchOptions);
|
||||
current.record = db[res.index] || null;
|
||||
if (current.record) {
|
||||
log(`best match: ${current.record.name} | id: ${current.record.id} | similarity: ${Math.round(1000 * res.similarity) / 10}%`);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -1,112 +0,0 @@
|
|||
/**
|
||||
* EfficientPose model implementation
|
||||
*
|
||||
* Based on: [**BecauseofAI MobileFace**](https://github.com/becauseofAI/MobileFace)
|
||||
*
|
||||
* Obsolete and replaced by `faceres` that performs age/gender/descriptor analysis
|
||||
*/
|
||||
|
||||
import { log, join } from '../util/util';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import type { Tensor, GraphModel } from '../tfjs/types';
|
||||
import { env } from '../util/env';
|
||||
|
||||
let model: GraphModel | null;
|
||||
|
||||
export async function load(config) {
|
||||
const modelUrl = join(config.modelBasePath, config.face.embedding.modelPath);
|
||||
if (env.initial) model = null;
|
||||
if (!model) {
|
||||
model = await tf.loadGraphModel(modelUrl) as unknown as GraphModel;
|
||||
if (!model) log('load model failed:', config.face.embedding.modelPath);
|
||||
else if (config.debug) log('load model:', modelUrl);
|
||||
} else if (config.debug) log('cached model:', modelUrl);
|
||||
return model;
|
||||
}
|
||||
|
||||
export function enhance(input): Tensor {
|
||||
const image = tf.tidy(() => {
|
||||
// input received from detector is already normalized to 0..1
|
||||
// input is also assumed to be straightened
|
||||
// const data = tf.image.resizeBilinear(input, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false); // just resize to fit the embedding model
|
||||
// do a tight crop of image and resize it to fit the model
|
||||
const box = [[0.05, 0.15, 0.85, 0.85]]; // empyrical values for top, left, bottom, right
|
||||
const tensor = input.image || input.tensor;
|
||||
if (!(tensor instanceof tf.Tensor)) return null;
|
||||
if (!model || !model.inputs || !model.inputs[0].shape) return null;
|
||||
const crop = (tensor.shape.length === 3)
|
||||
? tf.image.cropAndResize(tf.expandDims(tensor, 0), box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]) // add batch dimension if missing
|
||||
: tf.image.cropAndResize(tensor, box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]);
|
||||
|
||||
// convert to black&white to avoid colorization impact
|
||||
const rgb = [0.2989, 0.5870, 0.1140]; // factors for red/green/blue colors when converting to grayscale: https://www.mathworks.com/help/matlab/ref/rgb2gray.html
|
||||
const [red, green, blue] = tf.split(crop, 3, 3);
|
||||
const redNorm = tf.mul(red, rgb[0]);
|
||||
const greenNorm = tf.mul(green, rgb[1]);
|
||||
const blueNorm = tf.mul(blue, rgb[2]);
|
||||
const grayscale = tf.addN([redNorm, greenNorm, blueNorm]);
|
||||
const merge = tf.stack([grayscale, grayscale, grayscale], 3).squeeze(4);
|
||||
|
||||
/*
|
||||
// optional increase image contrast
|
||||
// or do it per-channel so mean is done on each channel
|
||||
// or do it based on histogram
|
||||
const mean = merge.mean();
|
||||
const factor = 5;
|
||||
const contrast = merge.sub(mean).mul(factor).add(mean);
|
||||
*/
|
||||
|
||||
// normalize brightness from 0..1
|
||||
const darken = tf.sub(merge, merge.min());
|
||||
const lighten = tf.div(darken, darken.max());
|
||||
|
||||
return lighten;
|
||||
});
|
||||
return image;
|
||||
}
|
||||
|
||||
export async function predict(input, config): Promise<number[]> {
|
||||
if (!model) return [];
|
||||
return new Promise(async (resolve) => {
|
||||
// let data: Array<[]> = [];
|
||||
let data: Array<number> = [];
|
||||
if (config.face.embedding.enabled) {
|
||||
const image = enhance(input);
|
||||
const dataT = model?.execute(image) as Tensor;
|
||||
/*
|
||||
const dataT = tf.tidy(() => {
|
||||
/*
|
||||
// if needed convert from NHWC to NCHW
|
||||
const nchw = image.transpose([3, 0, 1, 2]);
|
||||
|
||||
const res = model.execute(image);
|
||||
|
||||
// optionally do it twice with flipped image and average results
|
||||
const res1 = model.execute(image);
|
||||
const flipped = tf.image.flipLeftRight(image);
|
||||
const res2 = model.execute(flipped);
|
||||
const merge = tf.stack([res1, res2], 2).squeeze();
|
||||
const res = reshape.logSumExp(1);
|
||||
|
||||
// optional normalize outputs with l2 normalization
|
||||
const scaled = tf.tidy(() => {
|
||||
const l2 = res.norm('euclidean');
|
||||
const scale = res.div(l2);
|
||||
return scale;
|
||||
});
|
||||
|
||||
// optional reduce feature vector complexity
|
||||
const reshape = tf.reshape(res, [128, 2]); // split 256 vectors into 128 x 2
|
||||
const reduce = reshape.logSumExp(1); // reduce 2nd dimension by calculating logSumExp on it
|
||||
|
||||
return reduce;
|
||||
});
|
||||
*/
|
||||
const output = await dataT.data();
|
||||
data = Array.from(output); // convert typed array to simple array
|
||||
tf.dispose(dataT);
|
||||
tf.dispose(image);
|
||||
}
|
||||
resolve(data);
|
||||
});
|
||||
}
|
|
@ -15,6 +15,7 @@ import * as liveness from './liveness';
|
|||
import * as gear from '../gear/gear';
|
||||
import * as ssrnetAge from '../gear/ssrnet-age';
|
||||
import * as ssrnetGender from '../gear/ssrnet-gender';
|
||||
import * as mobilefacenet from './mobilefacenet';
|
||||
import type { FaceResult } from '../result';
|
||||
import type { Tensor } from '../tfjs/types';
|
||||
import type { Human } from '../human';
|
||||
|
@ -28,7 +29,7 @@ export const detectFace = async (parent: Human /* instance of human */, input: T
|
|||
let gearRes;
|
||||
let genderRes;
|
||||
let emotionRes;
|
||||
let embeddingRes;
|
||||
let mobilefacenetRes;
|
||||
let antispoofRes;
|
||||
let livenessRes;
|
||||
let descRes;
|
||||
|
@ -93,7 +94,7 @@ export const detectFace = async (parent: Human /* instance of human */, input: T
|
|||
parent.state = 'run:liveness';
|
||||
timeStamp = now();
|
||||
livenessRes = parent.config.face.liveness?.enabled ? await liveness.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : null;
|
||||
parent.performance.antispoof = env.perfadd ? (parent.performance.antispoof || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
||||
parent.performance.liveness = env.perfadd ? (parent.performance.antispoof || 0) + Math.trunc(now() - timeStamp) : Math.trunc(now() - timeStamp);
|
||||
}
|
||||
parent.analyze('End Liveness:');
|
||||
|
||||
|
@ -105,7 +106,7 @@ export const detectFace = async (parent: Human /* instance of human */, input: T
|
|||
parent.state = 'run:gear';
|
||||
timeStamp = now();
|
||||
gearRes = parent.config.face['gear']?.enabled ? await gear.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : {};
|
||||
parent.performance.emotion = Math.trunc(now() - timeStamp);
|
||||
parent.performance.gear = Math.trunc(now() - timeStamp);
|
||||
}
|
||||
parent.analyze('End GEAR:');
|
||||
|
||||
|
@ -119,10 +120,22 @@ export const detectFace = async (parent: Human /* instance of human */, input: T
|
|||
timeStamp = now();
|
||||
ageRes = parent.config.face['ssrnet']?.enabled ? await ssrnetAge.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : {};
|
||||
genderRes = parent.config.face['ssrnet']?.enabled ? await ssrnetGender.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : {};
|
||||
parent.performance.emotion = Math.trunc(now() - timeStamp);
|
||||
parent.performance.ssrnet = Math.trunc(now() - timeStamp);
|
||||
}
|
||||
parent.analyze('End SSRNet:');
|
||||
|
||||
// run gear, inherits face from blazeface
|
||||
parent.analyze('Start MobileFaceNet:');
|
||||
if (parent.config.async) {
|
||||
mobilefacenetRes = parent.config.face['mobilefacenet']?.enabled ? mobilefacenet.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : {};
|
||||
} else {
|
||||
parent.state = 'run:mobilefacenet';
|
||||
timeStamp = now();
|
||||
mobilefacenetRes = parent.config.face['mobilefacenet']?.enabled ? await mobilefacenet.predict(faces[i].tensor || tf.tensor([]), parent.config, i, faces.length) : {};
|
||||
parent.performance.mobilefacenet = Math.trunc(now() - timeStamp);
|
||||
}
|
||||
parent.analyze('End MobileFaceNet:');
|
||||
|
||||
// run emotion, inherits face from blazeface
|
||||
parent.analyze('Start Description:');
|
||||
if (parent.config.async) {
|
||||
|
@ -137,13 +150,15 @@ export const detectFace = async (parent: Human /* instance of human */, input: T
|
|||
|
||||
// if async wait for results
|
||||
if (parent.config.async) {
|
||||
[ageRes, genderRes, emotionRes, embeddingRes, descRes, gearRes, antispoofRes, livenessRes] = await Promise.all([ageRes, genderRes, emotionRes, embeddingRes, descRes, gearRes, antispoofRes, livenessRes]);
|
||||
[ageRes, genderRes, emotionRes, mobilefacenetRes, descRes, gearRes, antispoofRes, livenessRes] = await Promise.all([ageRes, genderRes, emotionRes, mobilefacenetRes, descRes, gearRes, antispoofRes, livenessRes]);
|
||||
}
|
||||
parent.analyze('Finish Face:');
|
||||
|
||||
// override age/gender if alternative models are used
|
||||
if (parent.config.face['ssrnet']?.enabled && ageRes && genderRes) descRes = { age: ageRes.age, gender: genderRes.gender, genderScore: genderRes.genderScore };
|
||||
if (parent.config.face['gear']?.enabled && gearRes) descRes = { age: gearRes.age, gender: gearRes.gender, genderScore: gearRes.genderScore, race: gearRes.race };
|
||||
// override descriptor if embedding model is used
|
||||
if (parent.config.face['mobilefacenet']?.enabled && mobilefacenetRes) descRes.descriptor = mobilefacenetRes;
|
||||
|
||||
// calculate iris distance
|
||||
// iris: array[ center, left, top, right, bottom]
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
/**
|
||||
* EfficientPose model implementation
|
||||
*
|
||||
* Based on: [**BecauseofAI MobileFace**](https://github.com/becauseofAI/MobileFace)
|
||||
*
|
||||
* Obsolete and replaced by `faceres` that performs age/gender/descriptor analysis
|
||||
*/
|
||||
|
||||
import { log, join, now } from '../util/util';
|
||||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import type { Tensor, GraphModel } from '../tfjs/types';
|
||||
import type { Config } from '../config';
|
||||
import { env } from '../util/env';
|
||||
|
||||
let model: GraphModel | null;
|
||||
const last: Array<number[]> = [];
|
||||
let lastCount = 0;
|
||||
let lastTime = 0;
|
||||
let skipped = Number.MAX_SAFE_INTEGER;
|
||||
|
||||
export async function load(config: Config): Promise<GraphModel> {
|
||||
const modelUrl = join(config.modelBasePath, config.face['mobilefacenet'].modelPath);
|
||||
if (env.initial) model = null;
|
||||
if (!model) {
|
||||
model = await tf.loadGraphModel(modelUrl) as unknown as GraphModel;
|
||||
if (!model) log('load model failed:', config.face['mobilefacenet'].modelPath);
|
||||
else if (config.debug) log('load model:', modelUrl);
|
||||
} else if (config.debug) log('cached model:', modelUrl);
|
||||
return model;
|
||||
}
|
||||
|
||||
/*
|
||||
// convert to black&white to avoid colorization impact
|
||||
const rgb = [0.2989, 0.5870, 0.1140]; // factors for red/green/blue colors when converting to grayscale: https://www.mathworks.com/help/matlab/ref/rgb2gray.html
|
||||
const [red, green, blue] = tf.split(crop, 3, 3);
|
||||
const redNorm = tf.mul(red, rgb[0]);
|
||||
const greenNorm = tf.mul(green, rgb[1]);
|
||||
const blueNorm = tf.mul(blue, rgb[2]);
|
||||
const grayscale = tf.addN([redNorm, greenNorm, blueNorm]);
|
||||
const merge = tf.stack([grayscale, grayscale, grayscale], 3).squeeze(4);
|
||||
|
||||
// optional increase image contrast
|
||||
// or do it per-channel so mean is done on each channel
|
||||
// or do it based on histogram
|
||||
const mean = merge.mean();
|
||||
const factor = 5;
|
||||
const contrast = merge.sub(mean).mul(factor).add(mean);
|
||||
*/
|
||||
|
||||
export async function predict(input: Tensor, config: Config, idx, count): Promise<number[]> {
|
||||
if (!model) return [];
|
||||
const skipFrame = skipped < (config.face['embedding']?.skipFrames || 0);
|
||||
const skipTime = (config.face['embedding']?.skipTime || 0) > (now() - lastTime);
|
||||
if (config.skipAllowed && skipTime && skipFrame && (lastCount === count) && last[idx]) {
|
||||
skipped++;
|
||||
return last[idx];
|
||||
}
|
||||
return new Promise(async (resolve) => {
|
||||
let data: Array<number> = [];
|
||||
if (config.face['embedding']?.enabled && model?.inputs[0].shape) {
|
||||
const t: Record<string, Tensor> = {};
|
||||
t.crop = tf.image.resizeBilinear(input, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false); // just resize to fit the embedding model
|
||||
// do a tight crop of image and resize it to fit the model
|
||||
// const box = [[0.05, 0.15, 0.85, 0.85]]; // empyrical values for top, left, bottom, right
|
||||
// t.crop = tf.image.cropAndResize(input, box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]);
|
||||
t.data = model?.execute(t.crop) as Tensor;
|
||||
/*
|
||||
// optional normalize outputs with l2 normalization
|
||||
const scaled = tf.tidy(() => {
|
||||
const l2 = res.norm('euclidean');
|
||||
const scale = res.div(l2);
|
||||
return scale;
|
||||
});
|
||||
|
||||
// optional reduce feature vector complexity
|
||||
const reshape = tf.reshape(res, [128, 2]); // split 256 vectors into 128 x 2
|
||||
const reduce = reshape.logSumExp(1); // reduce 2nd dimension by calculating logSumExp on it
|
||||
*/
|
||||
const output = await t.data.data();
|
||||
data = Array.from(output); // convert typed array to simple array
|
||||
}
|
||||
last[idx] = data;
|
||||
lastCount = count;
|
||||
lastTime = now();
|
||||
resolve(data);
|
||||
});
|
||||
}
|
|
@ -62,6 +62,7 @@ export async function predict(image: Tensor, config: Config, idx, count): Promis
|
|||
let age = ageSorted[0][0]; // pick best starting point
|
||||
for (let i = 1; i < ageSorted.length; i++) age += ageSorted[i][1] * (ageSorted[i][0] - age); // adjust with each other choice by weight
|
||||
obj.age = Math.round(10 * age) / 10;
|
||||
Object.keys(t).forEach((tensor) => tf.dispose(t[tensor]));
|
||||
last[idx] = obj;
|
||||
lastCount = count;
|
||||
lastTime = now();
|
||||
|
|
|
@ -13,6 +13,7 @@ import * as blazepose from './body/blazepose';
|
|||
import * as centernet from './object/centernet';
|
||||
import * as efficientpose from './body/efficientpose';
|
||||
import * as emotion from './gear/emotion';
|
||||
import * as mobilefacenet from './face/mobilefacenet';
|
||||
import * as facemesh from './face/facemesh';
|
||||
import * as faceres from './face/faceres';
|
||||
import * as handpose from './hand/handpose';
|
||||
|
@ -39,7 +40,7 @@ export class Models {
|
|||
blazepose: null | GraphModel | Promise<GraphModel> = null;
|
||||
centernet: null | GraphModel | Promise<GraphModel> = null;
|
||||
efficientpose: null | GraphModel | Promise<GraphModel> = null;
|
||||
embedding: null | GraphModel | Promise<GraphModel> = null;
|
||||
mobilefacenet: null | GraphModel | Promise<GraphModel> = null;
|
||||
emotion: null | GraphModel | Promise<GraphModel> = null;
|
||||
facedetect: null | GraphModel | Promise<GraphModel> = null;
|
||||
faceiris: null | GraphModel | Promise<GraphModel> = null;
|
||||
|
@ -84,6 +85,7 @@ export async function load(instance: Human): Promise<void> {
|
|||
if (instance.config.face.enabled && instance.config.face['gear']?.enabled && !instance.models.gear) instance.models.gear = gear.load(instance.config);
|
||||
if (instance.config.face.enabled && instance.config.face['ssrnet']?.enabled && !instance.models.ssrnetage) instance.models.ssrnetage = ssrnetAge.load(instance.config);
|
||||
if (instance.config.face.enabled && instance.config.face['ssrnet']?.enabled && !instance.models.ssrnetgender) instance.models.ssrnetgender = ssrnetGender.load(instance.config);
|
||||
if (instance.config.face.enabled && instance.config.face['mobilefacenet']?.enabled && !instance.models.mobilefacenet) instance.models.mobilefacenet = mobilefacenet.load(instance.config);
|
||||
if (instance.config.hand.enabled && !instance.models.handtrack && instance.config.hand.detector?.modelPath?.includes('handtrack')) instance.models.handtrack = handtrack.loadDetect(instance.config);
|
||||
if (instance.config.hand.enabled && instance.config.hand.landmarks && !instance.models.handskeleton && instance.config.hand.detector?.modelPath?.includes('handtrack')) instance.models.handskeleton = handtrack.loadSkeleton(instance.config);
|
||||
if (instance.config.object.enabled && !instance.models.centernet && instance.config.object?.modelPath?.includes('centernet')) instance.models.centernet = centernet.load(instance.config);
|
||||
|
|
|
@ -1,26 +1,26 @@
|
|||
2021-11-13 12:09:51 [36mINFO: [39m @vladmandic/human version 2.5.1
|
||||
2021-11-13 12:09:51 [36mINFO: [39m User: vlado Platform: linux Arch: x64 Node: v17.0.1
|
||||
2021-11-13 12:09:51 [36mINFO: [39m Application: {"name":"@vladmandic/human","version":"2.5.1"}
|
||||
2021-11-13 12:09:51 [36mINFO: [39m Environment: {"profile":"production","config":".build.json","package":"package.json","tsconfig":true,"eslintrc":true,"git":true}
|
||||
2021-11-13 12:09:51 [36mINFO: [39m Toolchain: {"build":"0.6.3","esbuild":"0.13.13","typescript":"4.4.4","typedoc":"0.22.8","eslint":"8.2.0"}
|
||||
2021-11-13 12:09:51 [36mINFO: [39m Build: {"profile":"production","steps":["clean","compile","typings","typedoc","lint","changelog"]}
|
||||
2021-11-13 12:09:51 [35mSTATE:[39m Clean: {"locations":["dist/*","types/*","typedoc/*"]}
|
||||
2021-11-13 12:09:51 [35mSTATE:[39m Compile: {"name":"tfjs/nodejs/cpu","format":"cjs","platform":"node","input":"tfjs/tf-node.ts","output":"dist/tfjs.esm.js","files":1,"inputBytes":102,"outputBytes":1275}
|
||||
2021-11-13 12:09:51 [35mSTATE:[39m Compile: {"name":"human/nodejs/cpu","format":"cjs","platform":"node","input":"src/human.ts","output":"dist/human.node.js","files":60,"inputBytes":538983,"outputBytes":457718}
|
||||
2021-11-13 12:09:51 [35mSTATE:[39m Compile: {"name":"tfjs/nodejs/gpu","format":"cjs","platform":"node","input":"tfjs/tf-node-gpu.ts","output":"dist/tfjs.esm.js","files":1,"inputBytes":110,"outputBytes":1283}
|
||||
2021-11-13 12:09:51 [35mSTATE:[39m Compile: {"name":"human/nodejs/gpu","format":"cjs","platform":"node","input":"src/human.ts","output":"dist/human.node-gpu.js","files":60,"inputBytes":538991,"outputBytes":457722}
|
||||
2021-11-13 12:09:51 [35mSTATE:[39m Compile: {"name":"tfjs/nodejs/wasm","format":"cjs","platform":"node","input":"tfjs/tf-node-wasm.ts","output":"dist/tfjs.esm.js","files":1,"inputBytes":149,"outputBytes":1350}
|
||||
2021-11-13 12:09:52 [35mSTATE:[39m Compile: {"name":"human/nodejs/wasm","format":"cjs","platform":"node","input":"src/human.ts","output":"dist/human.node-wasm.js","files":60,"inputBytes":539058,"outputBytes":457794}
|
||||
2021-11-13 12:09:52 [35mSTATE:[39m Compile: {"name":"tfjs/browser/version","format":"esm","platform":"browser","input":"tfjs/tf-version.ts","output":"dist/tfjs.version.js","files":1,"inputBytes":1063,"outputBytes":1652}
|
||||
2021-11-13 12:09:52 [35mSTATE:[39m Compile: {"name":"tfjs/browser/esm/nobundle","format":"esm","platform":"browser","input":"tfjs/tf-browser.ts","output":"dist/tfjs.esm.js","files":2,"inputBytes":2326,"outputBytes":912}
|
||||
2021-11-13 12:09:52 [35mSTATE:[39m Compile: {"name":"human/browser/esm/nobundle","format":"esm","platform":"browser","input":"src/human.ts","output":"dist/human.esm-nobundle.js","files":60,"inputBytes":538620,"outputBytes":459966}
|
||||
2021-11-13 12:09:52 [35mSTATE:[39m Compile: {"name":"tfjs/browser/esm/custom","format":"esm","platform":"browser","input":"tfjs/tf-custom.ts","output":"dist/tfjs.esm.js","files":2,"inputBytes":2562703,"outputBytes":2497652}
|
||||
2021-11-13 12:09:52 [35mSTATE:[39m Compile: {"name":"human/browser/iife/bundle","format":"iife","platform":"browser","input":"src/human.ts","output":"dist/human.js","files":60,"inputBytes":3035360,"outputBytes":1621354}
|
||||
2021-11-13 12:09:53 [35mSTATE:[39m Compile: {"name":"human/browser/esm/bundle","format":"esm","platform":"browser","input":"src/human.ts","output":"dist/human.esm.js","files":60,"inputBytes":3035360,"outputBytes":2963133}
|
||||
2021-11-13 12:10:13 [35mSTATE:[39m Typings: {"input":"src/human.ts","output":"types","files":52}
|
||||
2021-11-13 12:10:21 [35mSTATE:[39m TypeDoc: {"input":"src/human.ts","output":"typedoc","objects":50,"generated":true}
|
||||
2021-11-13 12:10:21 [35mSTATE:[39m Compile: {"name":"demo/typescript","format":"esm","platform":"browser","input":"demo/typescript/index.ts","output":"demo/typescript/index.js","files":1,"inputBytes":5801,"outputBytes":3822}
|
||||
2021-11-13 12:10:21 [35mSTATE:[39m Compile: {"name":"demo/faceid","format":"esm","platform":"browser","input":"demo/faceid/index.ts","output":"demo/faceid/index.js","files":2,"inputBytes":14654,"outputBytes":11518}
|
||||
2021-11-13 12:11:00 [35mSTATE:[39m Lint: {"locations":["*.json","src/**/*.ts","test/**/*.js","demo/**/*.js"],"files":92,"errors":0,"warnings":0}
|
||||
2021-11-13 12:11:01 [35mSTATE:[39m ChangeLog: {"repository":"https://github.com/vladmandic/human","branch":"main","output":"CHANGELOG.md"}
|
||||
2021-11-13 12:11:01 [36mINFO: [39m Done...
|
||||
2021-11-13 17:19:22 [36mINFO: [39m @vladmandic/human version 2.5.1
|
||||
2021-11-13 17:19:22 [36mINFO: [39m User: vlado Platform: linux Arch: x64 Node: v17.0.1
|
||||
2021-11-13 17:19:22 [36mINFO: [39m Application: {"name":"@vladmandic/human","version":"2.5.1"}
|
||||
2021-11-13 17:19:22 [36mINFO: [39m Environment: {"profile":"production","config":".build.json","package":"package.json","tsconfig":true,"eslintrc":true,"git":true}
|
||||
2021-11-13 17:19:22 [36mINFO: [39m Toolchain: {"build":"0.6.3","esbuild":"0.13.13","typescript":"4.4.4","typedoc":"0.22.8","eslint":"8.2.0"}
|
||||
2021-11-13 17:19:22 [36mINFO: [39m Build: {"profile":"production","steps":["clean","compile","typings","typedoc","lint","changelog"]}
|
||||
2021-11-13 17:19:22 [35mSTATE:[39m Clean: {"locations":["dist/*","types/*","typedoc/*"]}
|
||||
2021-11-13 17:19:22 [35mSTATE:[39m Compile: {"name":"tfjs/nodejs/cpu","format":"cjs","platform":"node","input":"tfjs/tf-node.ts","output":"dist/tfjs.esm.js","files":1,"inputBytes":102,"outputBytes":1275}
|
||||
2021-11-13 17:19:23 [35mSTATE:[39m Compile: {"name":"human/nodejs/cpu","format":"cjs","platform":"node","input":"src/human.ts","output":"dist/human.node.js","files":61,"inputBytes":543749,"outputBytes":460526}
|
||||
2021-11-13 17:19:23 [35mSTATE:[39m Compile: {"name":"tfjs/nodejs/gpu","format":"cjs","platform":"node","input":"tfjs/tf-node-gpu.ts","output":"dist/tfjs.esm.js","files":1,"inputBytes":110,"outputBytes":1283}
|
||||
2021-11-13 17:19:23 [35mSTATE:[39m Compile: {"name":"human/nodejs/gpu","format":"cjs","platform":"node","input":"src/human.ts","output":"dist/human.node-gpu.js","files":61,"inputBytes":543757,"outputBytes":460530}
|
||||
2021-11-13 17:19:23 [35mSTATE:[39m Compile: {"name":"tfjs/nodejs/wasm","format":"cjs","platform":"node","input":"tfjs/tf-node-wasm.ts","output":"dist/tfjs.esm.js","files":1,"inputBytes":149,"outputBytes":1350}
|
||||
2021-11-13 17:19:23 [35mSTATE:[39m Compile: {"name":"human/nodejs/wasm","format":"cjs","platform":"node","input":"src/human.ts","output":"dist/human.node-wasm.js","files":61,"inputBytes":543824,"outputBytes":460602}
|
||||
2021-11-13 17:19:23 [35mSTATE:[39m Compile: {"name":"tfjs/browser/version","format":"esm","platform":"browser","input":"tfjs/tf-version.ts","output":"dist/tfjs.version.js","files":1,"inputBytes":1063,"outputBytes":1652}
|
||||
2021-11-13 17:19:23 [35mSTATE:[39m Compile: {"name":"tfjs/browser/esm/nobundle","format":"esm","platform":"browser","input":"tfjs/tf-browser.ts","output":"dist/tfjs.esm.js","files":2,"inputBytes":2326,"outputBytes":912}
|
||||
2021-11-13 17:19:23 [35mSTATE:[39m Compile: {"name":"human/browser/esm/nobundle","format":"esm","platform":"browser","input":"src/human.ts","output":"dist/human.esm-nobundle.js","files":61,"inputBytes":543386,"outputBytes":462792}
|
||||
2021-11-13 17:19:23 [35mSTATE:[39m Compile: {"name":"tfjs/browser/esm/custom","format":"esm","platform":"browser","input":"tfjs/tf-custom.ts","output":"dist/tfjs.esm.js","files":2,"inputBytes":2562703,"outputBytes":2497652}
|
||||
2021-11-13 17:19:24 [35mSTATE:[39m Compile: {"name":"human/browser/iife/bundle","format":"iife","platform":"browser","input":"src/human.ts","output":"dist/human.js","files":61,"inputBytes":3040126,"outputBytes":1622880}
|
||||
2021-11-13 17:19:24 [35mSTATE:[39m Compile: {"name":"human/browser/esm/bundle","format":"esm","platform":"browser","input":"src/human.ts","output":"dist/human.esm.js","files":61,"inputBytes":3040126,"outputBytes":2965877}
|
||||
2021-11-13 17:19:50 [35mSTATE:[39m Typings: {"input":"src/human.ts","output":"types","files":53}
|
||||
2021-11-13 17:20:02 [35mSTATE:[39m TypeDoc: {"input":"src/human.ts","output":"typedoc","objects":50,"generated":true}
|
||||
2021-11-13 17:20:02 [35mSTATE:[39m Compile: {"name":"demo/typescript","format":"esm","platform":"browser","input":"demo/typescript/index.ts","output":"demo/typescript/index.js","files":1,"inputBytes":5801,"outputBytes":3822}
|
||||
2021-11-13 17:20:02 [35mSTATE:[39m Compile: {"name":"demo/faceid","format":"esm","platform":"browser","input":"demo/faceid/index.ts","output":"demo/faceid/index.js","files":2,"inputBytes":15166,"outputBytes":11786}
|
||||
2021-11-13 17:20:44 [35mSTATE:[39m Lint: {"locations":["*.json","src/**/*.ts","test/**/*.js","demo/**/*.js"],"files":92,"errors":0,"warnings":0}
|
||||
2021-11-13 17:20:44 [35mSTATE:[39m ChangeLog: {"repository":"https://github.com/vladmandic/human","branch":"main","output":"CHANGELOG.md"}
|
||||
2021-11-13 17:20:44 [36mINFO: [39m Done...
|
||||
|
|
1348
test/test.log
1348
test/test.log
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,11 @@
|
|||
/**
|
||||
* EfficientPose model implementation
|
||||
*
|
||||
* Based on: [**BecauseofAI MobileFace**](https://github.com/becauseofAI/MobileFace)
|
||||
*
|
||||
* Obsolete and replaced by `faceres` that performs age/gender/descriptor analysis
|
||||
*/
|
||||
import type { Tensor, GraphModel } from '../tfjs/types';
|
||||
import type { Config } from '../config';
|
||||
export declare function load(config: Config): Promise<GraphModel>;
|
||||
export declare function predict(input: Tensor, config: Config, idx: any, count: any): Promise<number[]>;
|
|
@ -16,7 +16,7 @@ export declare class Models {
|
|||
blazepose: null | GraphModel | Promise<GraphModel>;
|
||||
centernet: null | GraphModel | Promise<GraphModel>;
|
||||
efficientpose: null | GraphModel | Promise<GraphModel>;
|
||||
embedding: null | GraphModel | Promise<GraphModel>;
|
||||
mobilefacenet: null | GraphModel | Promise<GraphModel>;
|
||||
emotion: null | GraphModel | Promise<GraphModel>;
|
||||
facedetect: null | GraphModel | Promise<GraphModel>;
|
||||
faceiris: null | GraphModel | Promise<GraphModel>;
|
||||
|
|
Loading…
Reference in New Issue