update node demos

pull/46/head
Vladimir Mandic 2021-03-26 10:26:02 -04:00
parent 23bdd3f086
commit 40b3a65bdc
27 changed files with 245674 additions and 5111 deletions

View File

@ -1,7 +1,7 @@
# @vladmandic/face-api # @vladmandic/face-api
Version: **1.1.4** Version: **1.1.5**
Description: **FaceAPI: AI-powered Face Detection, Face Embedding & Recognition Using Tensorflow/JS** Description: **FaceAPI: AI-powered Face Detection, Description & Recognition using Tensorflow/JS**
Author: **Vladimir Mandic <mandic00@live.com>** Author: **Vladimir Mandic <mandic00@live.com>**
License: **MIT** </LICENSE> License: **MIT** </LICENSE>
@ -9,8 +9,12 @@ Repository: **<git+https://github.com/vladmandic/face-api.git>**
## Changelog ## Changelog
### **HEAD -> master** 2021/03/19 mandic00@live.com ### **HEAD -> master** 2021/03/25 mandic00@live.com
### **1.1.5** 2021/03/23 mandic00@live.com
- add node-canvas demo
- refactoring - refactoring
### **1.1.4** 2021/03/18 mandic00@live.com ### **1.1.4** 2021/03/18 mandic00@live.com

View File

@ -54,9 +54,7 @@ Example can be accessed directly using Git pages using URL:
Three NodeJS examples are: Three NodeJS examples are:
- `/demo/node-singleprocess.js`: - `/demo/node.js`:
Regular usage of `FaceAPI` from `NodeJS`
- `/demo/node-singleprocess.js`:
Regular usage of `FaceAPI` from `NodeJS` Regular usage of `FaceAPI` from `NodeJS`
Using `TFJS` native methods to load images Using `TFJS` native methods to load images
- `/demo/node-canvas.js`: - `/demo/node-canvas.js`:

View File

@ -5,24 +5,24 @@ const process = require('process');
const path = require('path'); const path = require('path');
// eslint-disable-next-line import/no-extraneous-dependencies, node/no-unpublished-require // eslint-disable-next-line import/no-extraneous-dependencies, node/no-unpublished-require
const log = require('@vladmandic/pilogger'); const log = require('@vladmandic/pilogger');
// eslint-disable-next-line import/no-extraneous-dependencies, node/no-unpublished-require // eslint-disable-next-line import/no-extraneous-dependencies, node/no-unpublished-require, no-unused-vars
const tf = require('@tensorflow/tfjs-node'); const tf = require('@tensorflow/tfjs-node');
// eslint-disable-next-line import/no-extraneous-dependencies, node/no-unpublished-require
const canvas = require('canvas');
const faceapi = require('../dist/face-api.node.js'); // this is equivalent to '@vladmandic/faceapi' const faceapi = require('../dist/face-api.node.js'); // this is equivalent to '@vladmandic/faceapi'
const modelPathRoot = '../model'; const modelPathRoot = '../model';
const imgPathRoot = './demo'; // modify to include your sample images const imgPathRoot = './demo'; // modify to include your sample images
const minScore = 0.1; const minConfidence = 0.15;
const maxResults = 5; const maxResults = 5;
let optionsSSDMobileNet; let optionsSSDMobileNet;
async function image(img) { async function image(input) {
const buffer = fs.readFileSync(img); const img = await canvas.loadImage(input);
const decoded = tf.node.decodeImage(buffer); const c = canvas.createCanvas(img.width, img.height);
const casted = decoded.toFloat(); const ctx = c.getContext('2d');
const result = casted.expandDims(0); ctx.drawImage(img, 0, 0, img.width, img.height);
decoded.dispose(); return c;
casted.dispose();
return result;
} }
async function detect(tensor) { async function detect(tensor) {
@ -35,10 +35,19 @@ async function detect(tensor) {
return result; return result;
} }
function print(face) {
const expression = Object.entries(face.expressions).reduce((acc, val) => ((val[1] > acc[1]) ? val : acc), ['', 0]);
const box = [face.alignedRect._box._x, face.alignedRect._box._y, face.alignedRect._box._width, face.alignedRect._box._height];
const gender = `Gender: ${Math.round(100 * face.genderProbability)}% ${face.gender}`;
log.data(`Detection confidence: ${Math.round(100 * face.detection._score)}% ${gender} Age: ${Math.round(10 * face.age) / 10} Expression: ${Math.round(100 * expression[1])}% ${expression[0]} Box: ${box.map((a) => Math.round(a))}`);
}
async function main() { async function main() {
log.header(); log.header();
log.info('FaceAPI single-process test'); log.info('FaceAPI single-process test');
faceapi.env.monkeyPatch({ Canvas: canvas.Canvas, Image: canvas.Image, ImageData: canvas.ImageData });
await faceapi.tf.setBackend('tensorflow'); await faceapi.tf.setBackend('tensorflow');
await faceapi.tf.enableProdMode(); await faceapi.tf.enableProdMode();
await faceapi.tf.ENV.set('DEBUG', false); await faceapi.tf.ENV.set('DEBUG', false);
@ -53,33 +62,27 @@ async function main() {
await faceapi.nets.faceLandmark68Net.loadFromDisk(modelPath); await faceapi.nets.faceLandmark68Net.loadFromDisk(modelPath);
await faceapi.nets.faceRecognitionNet.loadFromDisk(modelPath); await faceapi.nets.faceRecognitionNet.loadFromDisk(modelPath);
await faceapi.nets.faceExpressionNet.loadFromDisk(modelPath); await faceapi.nets.faceExpressionNet.loadFromDisk(modelPath);
optionsSSDMobileNet = new faceapi.SsdMobilenetv1Options({ minConfidence: minScore, maxResults }); optionsSSDMobileNet = new faceapi.SsdMobilenetv1Options({ minConfidence, maxResults });
if (process.argv.length !== 3) { if (process.argv.length !== 3) {
const t0 = process.hrtime.bigint(); const t0 = process.hrtime.bigint();
const dir = fs.readdirSync(imgPathRoot); const dir = fs.readdirSync(imgPathRoot);
for (const img of dir) { for (const img of dir) {
if (!img.toLocaleLowerCase().endsWith('.jpg')) continue; if (!img.toLocaleLowerCase().endsWith('.jpg')) continue;
const tensor = await image(path.join(imgPathRoot, img)); const c = await image(path.join(imgPathRoot, img));
const result = await detect(tensor); const result = await detect(c);
log.data('Image:', img, 'Detected faces:', result.length); log.data('Image:', img, 'Detected faces:', result.length);
for (const i of result) { for (const face of result) print(face);
log.data('Gender:', Math.round(100 * i.genderProbability), 'probability', i.gender, 'with age', Math.round(10 * i.age) / 10);
}
tensor.dispose();
} }
const t1 = process.hrtime.bigint(); const t1 = process.hrtime.bigint();
log.info('Processed', dir.length, 'images in', Math.trunc(parseInt(t1 - t0) / 1000 / 1000), 'ms'); log.info('Processed', dir.length, 'images in', Math.trunc(parseInt(t1 - t0) / 1000 / 1000), 'ms');
} else { } else {
const param = process.argv[2]; const param = process.argv[2];
if (fs.existsSync(param)) { if (fs.existsSync(param)) {
const tensor = await image(param); const c = await image(param);
const result = await detect(tensor); const result = await detect(c);
log.data('Image:', param, 'Detected faces:', result.length); log.data('Image:', param, 'Detected faces:', result.length);
for (const i of result) { for (const face of result) print(face);
log.data('Gender:', Math.round(100 * i.genderProbability), 'probability', i.gender, 'with age', Math.round(10 * i.age) / 10);
}
tensor.dispose();
} }
} }
} }

View File

@ -12,7 +12,7 @@ const faceapi = require('../dist/face-api.node.js'); // this is equivalent to '@
// options used by faceapi // options used by faceapi
const modelPathRoot = '../model'; const modelPathRoot = '../model';
const minScore = 0.1; const minConfidence = 0.15;
const maxResults = 5; const maxResults = 5;
let optionsSSDMobileNet; let optionsSSDMobileNet;
@ -62,7 +62,7 @@ async function main() {
await faceapi.nets.faceLandmark68Net.loadFromDisk(modelPath); await faceapi.nets.faceLandmark68Net.loadFromDisk(modelPath);
await faceapi.nets.faceRecognitionNet.loadFromDisk(modelPath); await faceapi.nets.faceRecognitionNet.loadFromDisk(modelPath);
await faceapi.nets.faceExpressionNet.loadFromDisk(modelPath); await faceapi.nets.faceExpressionNet.loadFromDisk(modelPath);
optionsSSDMobileNet = new faceapi.SsdMobilenetv1Options({ minConfidence: minScore, maxResults }); optionsSSDMobileNet = new faceapi.SsdMobilenetv1Options({ minConfidence, maxResults });
// now we're ready, so send message back to main that it knows it can use this worker // now we're ready, so send message back to main that it knows it can use this worker
process.send({ ready: true }); process.send({ ready: true });

View File

@ -5,24 +5,24 @@ const process = require('process');
const path = require('path'); const path = require('path');
// eslint-disable-next-line import/no-extraneous-dependencies, node/no-unpublished-require // eslint-disable-next-line import/no-extraneous-dependencies, node/no-unpublished-require
const log = require('@vladmandic/pilogger'); const log = require('@vladmandic/pilogger');
// eslint-disable-next-line import/no-extraneous-dependencies, node/no-unpublished-require, no-unused-vars
const tf = require('@tensorflow/tfjs-node');
// eslint-disable-next-line import/no-extraneous-dependencies, node/no-unpublished-require // eslint-disable-next-line import/no-extraneous-dependencies, node/no-unpublished-require
const canvas = require('canvas'); const tf = require('@tensorflow/tfjs-node');
const faceapi = require('../dist/face-api.node.js'); // this is equivalent to '@vladmandic/faceapi' const faceapi = require('../dist/face-api.node.js'); // this is equivalent to '@vladmandic/faceapi'
const modelPathRoot = '../model'; const modelPathRoot = '../model';
const imgPathRoot = './demo'; // modify to include your sample images const imgPathRoot = './demo'; // modify to include your sample images
const minScore = 0.1; const minConfidence = 0.15;
const maxResults = 5; const maxResults = 5;
let optionsSSDMobileNet; let optionsSSDMobileNet;
async function image(input) { async function image(img) {
const img = canvas.loadImage(input); const buffer = fs.readFileSync(img);
const c = canvas.createCanvas(img.width, img.height); const decoded = tf.node.decodeImage(buffer);
const ctx = c.getContext('2d'); const casted = decoded.toFloat();
ctx.drawImage(img, 0, 0, img.width, img.height); const result = casted.expandDims(0);
return c; decoded.dispose();
casted.dispose();
return result;
} }
async function detect(tensor) { async function detect(tensor) {
@ -35,12 +35,17 @@ async function detect(tensor) {
return result; return result;
} }
function print(face) {
const expression = Object.entries(face.expressions).reduce((acc, val) => ((val[1] > acc[1]) ? val : acc), ['', 0]);
const box = [face.alignedRect._box._x, face.alignedRect._box._y, face.alignedRect._box._width, face.alignedRect._box._height];
const gender = `Gender: ${Math.round(100 * face.genderProbability)}% ${face.gender}`;
log.data(`Detection confidence: ${Math.round(100 * face.detection._score)}% ${gender} Age: ${Math.round(10 * face.age) / 10} Expression: ${Math.round(100 * expression[1])}% ${expression[0]} Box: ${box.map((a) => Math.round(a))}`);
}
async function main() { async function main() {
log.header(); log.header();
log.info('FaceAPI single-process test'); log.info('FaceAPI single-process test');
faceapi.env.monkeyPatch({ Canvas: canvas.Canvas, Image: canvas.Image, ImageData: canvas.ImageData });
await faceapi.tf.setBackend('tensorflow'); await faceapi.tf.setBackend('tensorflow');
await faceapi.tf.enableProdMode(); await faceapi.tf.enableProdMode();
await faceapi.tf.ENV.set('DEBUG', false); await faceapi.tf.ENV.set('DEBUG', false);
@ -55,7 +60,7 @@ async function main() {
await faceapi.nets.faceLandmark68Net.loadFromDisk(modelPath); await faceapi.nets.faceLandmark68Net.loadFromDisk(modelPath);
await faceapi.nets.faceRecognitionNet.loadFromDisk(modelPath); await faceapi.nets.faceRecognitionNet.loadFromDisk(modelPath);
await faceapi.nets.faceExpressionNet.loadFromDisk(modelPath); await faceapi.nets.faceExpressionNet.loadFromDisk(modelPath);
optionsSSDMobileNet = new faceapi.SsdMobilenetv1Options({ minConfidence: minScore, maxResults }); optionsSSDMobileNet = new faceapi.SsdMobilenetv1Options({ minConfidence, maxResults });
if (process.argv.length !== 3) { if (process.argv.length !== 3) {
const t0 = process.hrtime.bigint(); const t0 = process.hrtime.bigint();
@ -65,9 +70,7 @@ async function main() {
const tensor = await image(path.join(imgPathRoot, img)); const tensor = await image(path.join(imgPathRoot, img));
const result = await detect(tensor); const result = await detect(tensor);
log.data('Image:', img, 'Detected faces:', result.length); log.data('Image:', img, 'Detected faces:', result.length);
for (const i of result) { for (const face of result) print(face);
log.data('Gender:', Math.round(100 * i.genderProbability), 'probability', i.gender, 'with age', Math.round(10 * i.age) / 10);
}
tensor.dispose(); tensor.dispose();
} }
const t1 = process.hrtime.bigint(); const t1 = process.hrtime.bigint();
@ -78,9 +81,7 @@ async function main() {
const tensor = await image(param); const tensor = await image(param);
const result = await detect(tensor); const result = await detect(tensor);
log.data('Image:', param, 'Detected faces:', result.length); log.data('Image:', param, 'Detected faces:', result.length);
for (const i of result) { for (const face of result) print(face);
log.data('Gender:', Math.round(100 * i.genderProbability), 'probability', i.gender, 'with age', Math.round(10 * i.age) / 10);
}
tensor.dispose(); tensor.dispose();
} }
} }

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

77988
dist/face-api.esm.js vendored

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

77878
dist/face-api.js vendored

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

4678
dist/face-api.node.js vendored

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

76100
dist/tfjs.esm.js vendored

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -21,7 +21,7 @@
"url": "git+https://github.com/vladmandic/face-api.git" "url": "git+https://github.com/vladmandic/face-api.git"
}, },
"scripts": { "scripts": {
"start": "node --trace-warnings demo/node-singleprocess.js", "start": "node --trace-warnings demo/node.js",
"dev": "node --trace-warnings server/serve.js", "dev": "node --trace-warnings server/serve.js",
"build": "rimraf dist/* types/* typedoc/* && node server/build.js", "build": "rimraf dist/* types/* typedoc/* && node server/build.js",
"lint": "eslint src/**/* demo/*.js server/*.js", "lint": "eslint src/**/* demo/*.js server/*.js",
@ -43,14 +43,14 @@
"@tensorflow/tfjs-backend-wasm": "^3.3.0", "@tensorflow/tfjs-backend-wasm": "^3.3.0",
"@tensorflow/tfjs-node": "^3.3.0", "@tensorflow/tfjs-node": "^3.3.0",
"@tensorflow/tfjs-node-gpu": "^3.3.0", "@tensorflow/tfjs-node-gpu": "^3.3.0",
"@types/node": "^14.14.35", "@types/node": "^14.14.36",
"@typescript-eslint/eslint-plugin": "^4.19.0", "@typescript-eslint/eslint-plugin": "^4.19.0",
"@typescript-eslint/parser": "^4.19.0", "@typescript-eslint/parser": "^4.19.0",
"@vladmandic/pilogger": "^0.2.15", "@vladmandic/pilogger": "^0.2.15",
"canvas": "^2.7.0", "canvas": "^2.7.0",
"chokidar": "^3.5.1", "chokidar": "^3.5.1",
"dayjs": "^1.10.4", "dayjs": "^1.10.4",
"esbuild": "^0.9.6", "esbuild": "^0.10.1",
"eslint": "^7.22.0", "eslint": "^7.22.0",
"eslint-config-airbnb-base": "^14.2.1", "eslint-config-airbnb-base": "^14.2.1",
"eslint-plugin-import": "^2.22.1", "eslint-plugin-import": "^2.22.1",
@ -61,7 +61,7 @@
"seedrandom": "^3.0.5", "seedrandom": "^3.0.5",
"simple-git": "^2.37.0", "simple-git": "^2.37.0",
"tslib": "^2.1.0", "tslib": "^2.1.0",
"typedoc": "^0.20.33", "typedoc": "^0.20.34",
"typescript": "^4.2.3" "typescript": "^4.2.3"
} }
} }

View File

@ -42,9 +42,9 @@ const tsconfig = {
// common configuration // common configuration
const common = { const common = {
banner, banner,
minifyWhitespace: true, minifyWhitespace: false,
minifyIdentifiers: true, minifyIdentifiers: false,
minifySyntax: true, minifySyntax: false,
bundle: true, bundle: true,
sourcemap: true, sourcemap: true,
metafile: true, metafile: true,

View File

@ -4,9 +4,7 @@ import { isMediaLoaded } from './isMediaLoaded';
export function awaitMediaLoaded(media: HTMLImageElement | HTMLVideoElement | HTMLCanvasElement) { export function awaitMediaLoaded(media: HTMLImageElement | HTMLVideoElement | HTMLCanvasElement) {
// eslint-disable-next-line consistent-return // eslint-disable-next-line consistent-return
return new Promise((resolve, reject) => { return new Promise((resolve, reject) => {
if (media instanceof env.getEnv().Canvas || isMediaLoaded(media)) { if (media instanceof env.getEnv().Canvas || isMediaLoaded(media)) return resolve(null);
return resolve(null);
}
function onError(e: Event) { function onError(e: Event) {
if (!e.currentTarget) return; if (!e.currentTarget) return;

View File

@ -16,29 +16,17 @@ import { TNetInput } from './types';
*/ */
export async function extractFaces(input: TNetInput, detections: Array<FaceDetection | Rect>): Promise<HTMLCanvasElement[]> { export async function extractFaces(input: TNetInput, detections: Array<FaceDetection | Rect>): Promise<HTMLCanvasElement[]> {
const { Canvas } = env.getEnv(); const { Canvas } = env.getEnv();
let canvas = input as HTMLCanvasElement; let canvas = input as HTMLCanvasElement;
if (!(input instanceof Canvas)) { if (!(input instanceof Canvas)) {
const netInput = await toNetInput(input); const netInput = await toNetInput(input);
if (netInput.batchSize > 1) throw new Error('extractFaces - batchSize > 1 not supported');
if (netInput.batchSize > 1) {
throw new Error('extractFaces - batchSize > 1 not supported');
}
const tensorOrCanvas = netInput.getInput(0); const tensorOrCanvas = netInput.getInput(0);
canvas = tensorOrCanvas instanceof Canvas canvas = tensorOrCanvas instanceof Canvas ? tensorOrCanvas : await imageTensorToCanvas(tensorOrCanvas);
? tensorOrCanvas
: await imageTensorToCanvas(tensorOrCanvas);
} }
const ctx = getContext2dOrThrow(canvas); const ctx = getContext2dOrThrow(canvas);
const boxes = detections const boxes = detections
.map((det) => (det instanceof FaceDetection .map((det) => (det instanceof FaceDetection ? det.forSize(canvas.width, canvas.height).box.floor() : det))
? det.forSize(canvas.width, canvas.height).box.floor()
: det))
.map((box) => box.clipAtImageBorders(canvas.width, canvas.height)); .map((box) => box.clipAtImageBorders(canvas.width, canvas.height));
return boxes.map(({ x, y, width, height }) => { return boxes.map(({ x, y, width, height }) => {
const faceImg = createCanvas({ width, height }); const faceImg = createCanvas({ width, height });
if (width > 0 && height > 0) getContext2dOrThrow(faceImg).putImageData(ctx.getImageData(x, y, width, height), 0, 0); if (width > 0 && height > 0) getContext2dOrThrow(faceImg).putImageData(ctx.getImageData(x, y, width, height), 0, 0);

View File

@ -13,44 +13,23 @@ import { TNetInput } from './types';
* @returns A NetInput instance, which can be passed into one of the neural networks. * @returns A NetInput instance, which can be passed into one of the neural networks.
*/ */
export async function toNetInput(inputs: TNetInput): Promise<NetInput> { export async function toNetInput(inputs: TNetInput): Promise<NetInput> {
if (inputs instanceof NetInput) { if (inputs instanceof NetInput) return inputs;
return inputs; const inputArgArray = Array.isArray(inputs) ? inputs : [inputs];
} if (!inputArgArray.length) throw new Error('toNetInput - empty array passed as input');
const inputArgArray = Array.isArray(inputs)
? inputs
: [inputs];
if (!inputArgArray.length) {
throw new Error('toNetInput - empty array passed as input');
}
const getIdxHint = (idx: number) => (Array.isArray(inputs) ? ` at input index ${idx}:` : ''); const getIdxHint = (idx: number) => (Array.isArray(inputs) ? ` at input index ${idx}:` : '');
const inputArray = inputArgArray.map(resolveInput); const inputArray = inputArgArray.map(resolveInput);
inputArray.forEach((input, i) => { inputArray.forEach((input, i) => {
if (!isMediaElement(input) && !isTensor3D(input) && !isTensor4D(input)) { if (!isMediaElement(input) && !isTensor3D(input) && !isTensor4D(input)) {
if (typeof inputArgArray[i] === 'string') { if (typeof inputArgArray[i] === 'string') throw new Error(`toNetInput -${getIdxHint(i)} string passed, but could not resolve HTMLElement for element id ${inputArgArray[i]}`);
throw new Error(`toNetInput -${getIdxHint(i)} string passed, but could not resolve HTMLElement for element id ${inputArgArray[i]}`);
}
throw new Error(`toNetInput -${getIdxHint(i)} expected media to be of type HTMLImageElement | HTMLVideoElement | HTMLCanvasElement | tf.Tensor3D, or to be an element id`); throw new Error(`toNetInput -${getIdxHint(i)} expected media to be of type HTMLImageElement | HTMLVideoElement | HTMLCanvasElement | tf.Tensor3D, or to be an element id`);
} }
if (isTensor4D(input)) { if (isTensor4D(input)) {
// if tf.Tensor4D is passed in the input array, the batch size has to be 1 // if tf.Tensor4D is passed in the input array, the batch size has to be 1
const batchSize = input.shape[0]; const batchSize = input.shape[0];
if (batchSize !== 1) { if (batchSize !== 1) throw new Error(`toNetInput -${getIdxHint(i)} tf.Tensor4D with batchSize ${batchSize} passed, but not supported in input array`);
throw new Error(`toNetInput -${getIdxHint(i)} tf.Tensor4D with batchSize ${batchSize} passed, but not supported in input array`);
}
} }
}); });
// wait for all media elements being loaded // wait for all media elements being loaded
await Promise.all( await Promise.all(inputArray.map((input) => isMediaElement(input) && awaitMediaLoaded(input)));
inputArray.map((input) => isMediaElement(input) && awaitMediaLoaded(input)),
);
return new NetInput(inputArray, Array.isArray(inputs)); return new NetInput(inputArray, Array.isArray(inputs));
} }

View File

@ -13,12 +13,8 @@ import { PredictAllFaceExpressionsTask, PredictSingleFaceExpressionsTask } from
import { FaceDetectionOptions } from './types'; import { FaceDetectionOptions } from './types';
export class DetectFacesTaskBase<TReturn> extends ComposableTask<TReturn> { export class DetectFacesTaskBase<TReturn> extends ComposableTask<TReturn> {
constructor( // eslint-disable-next-line no-unused-vars
// eslint-disable-next-line no-unused-vars constructor(protected input: TNetInput, protected options: FaceDetectionOptions = new SsdMobilenetv1Options()) {
protected input: TNetInput,
// eslint-disable-next-line no-unused-vars
protected options: FaceDetectionOptions = new SsdMobilenetv1Options(),
) {
super(); super();
} }
} }

View File

@ -4,21 +4,13 @@ import { SsdMobilenetv1Options } from '../ssdMobilenetv1/index';
import { ITinyYolov2Options, TinyYolov2Options } from '../tinyYolov2/index'; import { ITinyYolov2Options, TinyYolov2Options } from '../tinyYolov2/index';
import { detectAllFaces } from './detectFaces'; import { detectAllFaces } from './detectFaces';
// export allFaces API for backward compatibility export async function allFacesSsdMobilenetv1(input: TNetInput, minConfidence?: number): Promise<WithFaceDescriptor<WithFaceLandmarks<WithFaceDetection<{}>>>[]> {
export async function allFacesSsdMobilenetv1(
input: TNetInput,
minConfidence?: number,
): Promise<WithFaceDescriptor<WithFaceLandmarks<WithFaceDetection<{}>>>[]> {
return detectAllFaces(input, new SsdMobilenetv1Options(minConfidence ? { minConfidence } : {})) return detectAllFaces(input, new SsdMobilenetv1Options(minConfidence ? { minConfidence } : {}))
.withFaceLandmarks() .withFaceLandmarks()
.withFaceDescriptors(); .withFaceDescriptors();
} }
export async function allFacesTinyYolov2( export async function allFacesTinyYolov2(input: TNetInput, forwardParams: ITinyYolov2Options = {}): Promise<WithFaceDescriptor<WithFaceLandmarks<WithFaceDetection<{}>>>[]> {
input: TNetInput,
forwardParams: ITinyYolov2Options = {},
): Promise<WithFaceDescriptor<WithFaceLandmarks<WithFaceDetection<{}>>>[]> {
return detectAllFaces(input, new TinyYolov2Options(forwardParams)) return detectAllFaces(input, new TinyYolov2Options(forwardParams))
.withFaceLandmarks() .withFaceLandmarks()
.withFaceDescriptors(); .withFaceDescriptors();

View File

@ -43,10 +43,7 @@ export class SsdMobilenetv1 extends NeuralNetwork<NetParams> {
const { maxResults, minConfidence } = new SsdMobilenetv1Options(options); const { maxResults, minConfidence } = new SsdMobilenetv1Options(options);
const netInput = await toNetInput(input); const netInput = await toNetInput(input);
const { const { boxes: _boxes, scores: _scores } = this.forwardInput(netInput);
boxes: _boxes,
scores: _scores,
} = this.forwardInput(netInput);
const boxes = _boxes[0]; const boxes = _boxes[0];
const scores = _scores[0]; const scores = _scores[0];
@ -57,13 +54,7 @@ export class SsdMobilenetv1 extends NeuralNetwork<NetParams> {
const scoresData = Array.from(scores.dataSync()); const scoresData = Array.from(scores.dataSync());
const iouThreshold = 0.5; const iouThreshold = 0.5;
const indices = nonMaxSuppression( const indices = nonMaxSuppression(boxes, scoresData as number[], maxResults, iouThreshold, minConfidence);
boxes,
scoresData as number[],
maxResults,
iouThreshold,
minConfidence,
);
const reshapedDims = netInput.getReshapedInputDimensions(0); const reshapedDims = netInput.getReshapedInputDimensions(0);
const inputSize = netInput.inputSize as number; const inputSize = netInput.inputSize as number;
@ -83,16 +74,8 @@ export class SsdMobilenetv1 extends NeuralNetwork<NetParams> {
].map((val) => val * padX); ].map((val) => val * padX);
return new FaceDetection( return new FaceDetection(
scoresData[idx] as number, scoresData[idx] as number,
new Rect( new Rect(left, top, right - left, bottom - top),
left, { height: netInput.getInputHeight(0), width: netInput.getInputWidth(0) },
top,
right - left,
bottom - top,
),
{
height: netInput.getInputHeight(0),
width: netInput.getInputWidth(0),
},
); );
}); });