add node-wasm demo

pull/54/head
Vladimir Mandic 2021-04-26 14:45:49 -04:00
parent b8830e8cd3
commit 158dbc6208
15 changed files with 643 additions and 56854 deletions

View File

@ -9,11 +9,13 @@ Repository: **<git+https://github.com/vladmandic/face-api.git>**
## Changelog
### **HEAD -> master** 2021/04/25 mandic00@live.com
- major version full rebuild
### **1.2.1** 2021/04/22 mandic00@live.com
### **origin/master** 2021/04/20 mandic00@live.com
- add npmrc
- add canvas/image based demo to decode webp
### **1.1.12** 2021/04/13 mandic00@live.com

View File

@ -57,11 +57,14 @@ Three NodeJS examples are:
- `/demo/node.js`:
Regular usage of `FaceAPI` from `NodeJS`
Using `TFJS` native methods to load images
Using `TFJS` native methods to load images without external dependencies
- `/demo/node-canvas.js`:
Regular usage of `FaceAPI` from `NodeJS`
Using external `canvas` module to load images
Which also allows for image drawing and saving inside `NodeJS`
Which also allows for image drawing and saving inside `NodeJS` environment
- `/demo/node-wasm.js`:
Same as `node-canvas`, but using `WASM` backend in `NodeJS` environment
Because why not :)
- `/demo/node-multiprocess.js`:
Multiprocessing showcase that uses pool of worker processes
(`node-multiprocess-worker.js`)

97
demo/node-wasm.js Normal file
View File

@ -0,0 +1,97 @@
// @ts-nocheck
const fs = require('fs');
const process = require('process');
const path = require('path');
// eslint-disable-next-line import/no-extraneous-dependencies, node/no-unpublished-require
const log = require('@vladmandic/pilogger');
// eslint-disable-next-line import/no-extraneous-dependencies, node/no-unpublished-require, no-unused-vars
const tf = require('@tensorflow/tfjs');
// eslint-disable-next-line import/no-extraneous-dependencies, node/no-unpublished-require
require('@tensorflow/tfjs-backend-wasm');
// eslint-disable-next-line import/no-extraneous-dependencies, node/no-unpublished-require, no-unused-vars
const canvas = require('canvas');
const faceapi = require('../dist/face-api.node-cpu.js'); // this is equivalent to '@vladmandic/faceapi'
const modelPathRoot = '../model';
const imgPathRoot = './demo'; // modify to include your sample images
const minConfidence = 0.15;
const maxResults = 5;
let optionsSSDMobileNet;
async function image(input) {
const img = await canvas.loadImage(input);
const c = canvas.createCanvas(img.width, img.height);
const ctx = c.getContext('2d');
ctx.drawImage(img, 0, 0, img.width, img.height);
// const out = fs.createWriteStream('test.jpg');
// const stream = c.createJPEGStream({ quality: 0.6, progressive: true, chromaSubsampling: true });
// stream.pipe(out);
return c;
}
async function detect(tensor) {
const result = await faceapi
.detectAllFaces(tensor, optionsSSDMobileNet)
.withFaceLandmarks()
.withFaceExpressions()
.withFaceDescriptors()
.withAgeAndGender();
return result;
}
function print(face) {
const expression = Object.entries(face.expressions).reduce((acc, val) => ((val[1] > acc[1]) ? val : acc), ['', 0]);
const box = [face.alignedRect._box._x, face.alignedRect._box._y, face.alignedRect._box._width, face.alignedRect._box._height];
const gender = `Gender: ${Math.round(100 * face.genderProbability)}% ${face.gender}`;
log.data(`Detection confidence: ${Math.round(100 * face.detection._score)}% ${gender} Age: ${Math.round(10 * face.age) / 10} Expression: ${Math.round(100 * expression[1])}% ${expression[0]} Box: ${box.map((a) => Math.round(a))}`);
}
async function main() {
log.header();
log.info('FaceAPI single-process test');
faceapi.env.monkeyPatch({ Canvas: canvas.Canvas, Image: canvas.Image, ImageData: canvas.ImageData });
await faceapi.tf.setBackend('wasm');
await faceapi.tf.enableProdMode();
await faceapi.tf.ENV.set('DEBUG', false);
await faceapi.tf.ready();
log.state(`Version: TensorFlow/JS ${faceapi.tf?.version_core} FaceAPI ${faceapi.version.faceapi} Backend: ${faceapi.tf?.getBackend()}`);
log.info('Loading FaceAPI models');
const modelPath = path.join(__dirname, modelPathRoot);
await faceapi.nets.ssdMobilenetv1.loadFromDisk(modelPath);
await faceapi.nets.ageGenderNet.loadFromDisk(modelPath);
await faceapi.nets.faceLandmark68Net.loadFromDisk(modelPath);
await faceapi.nets.faceRecognitionNet.loadFromDisk(modelPath);
await faceapi.nets.faceExpressionNet.loadFromDisk(modelPath);
optionsSSDMobileNet = new faceapi.SsdMobilenetv1Options({ minConfidence, maxResults });
if (process.argv.length !== 3) {
const t0 = process.hrtime.bigint();
const dir = fs.readdirSync(imgPathRoot);
let numImages = 0;
for (const img of dir) {
if (!img.toLocaleLowerCase().endsWith('.jpg')) continue;
numImages += 1;
const c = await image(path.join(imgPathRoot, img));
const result = await detect(c);
log.data('Image:', img, 'Detected faces:', result.length);
for (const face of result) print(face);
}
const t1 = process.hrtime.bigint();
log.info('Processed', numImages, 'images in', Math.trunc(parseInt(t1 - t0) / 1000 / 1000), 'ms');
} else {
const param = process.argv[2];
if (fs.existsSync(param) || param.startsWith('http:') || param.startsWith('https:')) {
const c = await image(param);
const result = await detect(c);
log.data('Image:', param, 'Detected faces:', result.length);
for (const face of result) print(face);
}
}
}
main();

File diff suppressed because one or more lines are too long

19106
dist/face-api.esm.js vendored

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

19106
dist/face-api.js vendored

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

19136
dist/tfjs.esm.js vendored

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@ -53,7 +53,7 @@
"canvas": "^2.7.0",
"chokidar": "^3.5.1",
"dayjs": "^1.10.4",
"esbuild": "^0.11.14",
"esbuild": "^0.11.15",
"eslint": "^7.25.0",
"eslint-config-airbnb-base": "^14.2.1",
"eslint-plugin-import": "^2.22.1",

View File

@ -5,15 +5,6 @@ import { PointwiseConvParams } from './types';
export function pointwiseConvLayer(x: tf.Tensor4D, params: PointwiseConvParams, strides: [number, number]) {
return tf.tidy(() => {
let out = tf.conv2d(x, params.filters, strides, 'same');
/*
if (x.shape[1] === 512 && x.shape[3] === 3) {
console.log('Input:', x.shape, x.size, 'sum:', x.reshape([786432]).sum().dataSync()[0]); // input does not change (checked values)
console.log('Filter:', params.filters.shape, params.filters.size, 'sum:', params.filters.reshape([864]).sum().dataSync()[0]); // params do not change (checked values)
console.log('Strides', strides);
console.log('Conv2d 1st 5 values:', out.shape, out.size, out.dataSync().slice(0, 5)); // output has different values!
console.log('Conv2D sum of all values:', tf.reshape(out, [2097152]).sum().dataSync()[0]); // silly sum just to see how much results diverged
}
*/
out = tf.add(out, params.batch_norm_offset);
return tf.clipByValue(out, 0, 6);
});