add node-canvas demo

pull/46/head
Vladimir Mandic 2021-03-23 09:36:41 -04:00
parent 9ccaf781ab
commit 2ad4fc24db
11 changed files with 125 additions and 25 deletions

View File

@ -69,10 +69,17 @@ Example can be accessed directly using Git pages using URL:
### NodeJS ### NodeJS
Two NodeJS examples are: Three NodeJS examples are:
- `/demo/node-singleprocess.js`: - `/demo/node-singleprocess.js`:
Regular usage of `FaceAPI` from `NodeJS` Regular usage of `FaceAPI` from `NodeJS`
- `/demo/node-singleprocess.js`:
Regular usage of `FaceAPI` from `NodeJS`
Using `TFJS` native methods to load images
- `/demo/node-canvas.js`:
Regular usage of `FaceAPI` from `NodeJS`
Using external `canvas` module to load images
Which also allows for image drawing and saving inside `NodeJS`
- `/demo/node-multiprocess.js`: - `/demo/node-multiprocess.js`:
Multiprocessing showcase that uses pool of worker processes Multiprocessing showcase that uses pool of worker processes
(`node-multiprocess-worker.js`) (`node-multiprocess-worker.js`)

87
demo/node-canvas.js Normal file
View File

@ -0,0 +1,87 @@
// @ts-nocheck
const fs = require('fs');
const process = require('process');
const path = require('path');
// eslint-disable-next-line import/no-extraneous-dependencies, node/no-unpublished-require
const log = require('@vladmandic/pilogger');
// eslint-disable-next-line import/no-extraneous-dependencies, node/no-unpublished-require
const tf = require('@tensorflow/tfjs-node');
const faceapi = require('../dist/face-api.node.js'); // this is equivalent to '@vladmandic/faceapi'
const modelPathRoot = '../model';
const imgPathRoot = './demo'; // modify to include your sample images
const minScore = 0.1;
const maxResults = 5;
let optionsSSDMobileNet;
async function image(img) {
const buffer = fs.readFileSync(img);
const decoded = tf.node.decodeImage(buffer);
const casted = decoded.toFloat();
const result = casted.expandDims(0);
decoded.dispose();
casted.dispose();
return result;
}
async function detect(tensor) {
const result = await faceapi
.detectAllFaces(tensor, optionsSSDMobileNet)
.withFaceLandmarks()
.withFaceExpressions()
.withFaceDescriptors()
.withAgeAndGender();
return result;
}
async function main() {
log.header();
log.info('FaceAPI single-process test');
await faceapi.tf.setBackend('tensorflow');
await faceapi.tf.enableProdMode();
await faceapi.tf.ENV.set('DEBUG', false);
await faceapi.tf.ready();
log.state(`Version: TensorFlow/JS ${faceapi.tf?.version_core} FaceAPI ${faceapi.version.faceapi} Backend: ${faceapi.tf?.getBackend()}`);
log.info('Loading FaceAPI models');
const modelPath = path.join(__dirname, modelPathRoot);
await faceapi.nets.ssdMobilenetv1.loadFromDisk(modelPath);
await faceapi.nets.ageGenderNet.loadFromDisk(modelPath);
await faceapi.nets.faceLandmark68Net.loadFromDisk(modelPath);
await faceapi.nets.faceRecognitionNet.loadFromDisk(modelPath);
await faceapi.nets.faceExpressionNet.loadFromDisk(modelPath);
optionsSSDMobileNet = new faceapi.SsdMobilenetv1Options({ minConfidence: minScore, maxResults });
if (process.argv.length !== 3) {
const t0 = process.hrtime.bigint();
const dir = fs.readdirSync(imgPathRoot);
for (const img of dir) {
if (!img.toLocaleLowerCase().endsWith('.jpg')) continue;
const tensor = await image(path.join(imgPathRoot, img));
const result = await detect(tensor);
log.data('Image:', img, 'Detected faces:', result.length);
for (const i of result) {
log.data('Gender:', Math.round(100 * i.genderProbability), 'probability', i.gender, 'with age', Math.round(10 * i.age) / 10);
}
tensor.dispose();
}
const t1 = process.hrtime.bigint();
log.info('Processed', dir.length, 'images in', Math.trunc(parseInt(t1 - t0) / 1000 / 1000), 'ms');
} else {
const param = process.argv[2];
if (fs.existsSync(param)) {
const tensor = await image(param);
const result = await detect(tensor);
log.data('Image:', param, 'Detected faces:', result.length);
for (const i of result) {
log.data('Gender:', Math.round(100 * i.genderProbability), 'probability', i.gender, 'with age', Math.round(10 * i.age) / 10);
}
tensor.dispose();
}
}
}
main();

View File

@ -5,8 +5,10 @@ const process = require('process');
const path = require('path'); const path = require('path');
// eslint-disable-next-line import/no-extraneous-dependencies, node/no-unpublished-require // eslint-disable-next-line import/no-extraneous-dependencies, node/no-unpublished-require
const log = require('@vladmandic/pilogger'); const log = require('@vladmandic/pilogger');
// eslint-disable-next-line import/no-extraneous-dependencies, node/no-unpublished-require // eslint-disable-next-line import/no-extraneous-dependencies, node/no-unpublished-require, no-unused-vars
const tf = require('@tensorflow/tfjs-node'); const tf = require('@tensorflow/tfjs-node');
// eslint-disable-next-line import/no-extraneous-dependencies, node/no-unpublished-require
const canvas = require('canvas');
const faceapi = require('../dist/face-api.node.js'); // this is equivalent to '@vladmandic/faceapi' const faceapi = require('../dist/face-api.node.js'); // this is equivalent to '@vladmandic/faceapi'
const modelPathRoot = '../model'; const modelPathRoot = '../model';
@ -15,14 +17,12 @@ const minScore = 0.1;
const maxResults = 5; const maxResults = 5;
let optionsSSDMobileNet; let optionsSSDMobileNet;
async function image(img) { async function image(input) {
const buffer = fs.readFileSync(img); const img = canvas.loadImage(input);
const decoded = tf.node.decodeImage(buffer); const c = canvas.createCanvas(img.width, img.height);
const casted = decoded.toFloat(); const ctx = c.getContext('2d');
const result = casted.expandDims(0); ctx.drawImage(img, 0, 0, img.width, img.height);
decoded.dispose(); return c;
casted.dispose();
return result;
} }
async function detect(tensor) { async function detect(tensor) {
@ -39,6 +39,8 @@ async function main() {
log.header(); log.header();
log.info('FaceAPI single-process test'); log.info('FaceAPI single-process test');
faceapi.env.monkeyPatch({ Canvas: canvas.Canvas, Image: canvas.Image, ImageData: canvas.ImageData });
await faceapi.tf.setBackend('tensorflow'); await faceapi.tf.setBackend('tensorflow');
await faceapi.tf.enableProdMode(); await faceapi.tf.enableProdMode();
await faceapi.tf.ENV.set('DEBUG', false); await faceapi.tf.ENV.set('DEBUG', false);
@ -63,6 +65,9 @@ async function main() {
const tensor = await image(path.join(imgPathRoot, img)); const tensor = await image(path.join(imgPathRoot, img));
const result = await detect(tensor); const result = await detect(tensor);
log.data('Image:', img, 'Detected faces:', result.length); log.data('Image:', img, 'Detected faces:', result.length);
for (const i of result) {
log.data('Gender:', Math.round(100 * i.genderProbability), 'probability', i.gender, 'with age', Math.round(10 * i.age) / 10);
}
tensor.dispose(); tensor.dispose();
} }
const t1 = process.hrtime.bigint(); const t1 = process.hrtime.bigint();
@ -74,7 +79,7 @@ async function main() {
const result = await detect(tensor); const result = await detect(tensor);
log.data('Image:', param, 'Detected faces:', result.length); log.data('Image:', param, 'Detected faces:', result.length);
for (const i of result) { for (const i of result) {
log.data('Gender:', i.genderProbability, i.gender, 'Age:', i.age); log.data('Gender:', Math.round(100 * i.genderProbability), 'probability', i.gender, 'with age', Math.round(10 * i.age) / 10);
} }
tensor.dispose(); tensor.dispose();
} }

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -44,12 +44,13 @@
"@tensorflow/tfjs-node": "^3.3.0", "@tensorflow/tfjs-node": "^3.3.0",
"@tensorflow/tfjs-node-gpu": "^3.3.0", "@tensorflow/tfjs-node-gpu": "^3.3.0",
"@types/node": "^14.14.35", "@types/node": "^14.14.35",
"@typescript-eslint/eslint-plugin": "^4.18.0", "@typescript-eslint/eslint-plugin": "^4.19.0",
"@typescript-eslint/parser": "^4.18.0", "@typescript-eslint/parser": "^4.19.0",
"@vladmandic/pilogger": "^0.2.15", "@vladmandic/pilogger": "^0.2.15",
"canvas": "^2.7.0",
"chokidar": "^3.5.1", "chokidar": "^3.5.1",
"dayjs": "^1.10.4", "dayjs": "^1.10.4",
"esbuild": "^0.9.5", "esbuild": "^0.9.6",
"eslint": "^7.22.0", "eslint": "^7.22.0",
"eslint-config-airbnb-base": "^14.2.1", "eslint-config-airbnb-base": "^14.2.1",
"eslint-plugin-import": "^2.22.1", "eslint-plugin-import": "^2.22.1",
@ -60,7 +61,7 @@
"seedrandom": "^3.0.5", "seedrandom": "^3.0.5",
"simple-git": "^2.37.0", "simple-git": "^2.37.0",
"tslib": "^2.1.0", "tslib": "^2.1.0",
"typedoc": "^0.20.32", "typedoc": "^0.20.33",
"typescript": "^4.2.3" "typescript": "^4.2.3"
} }
} }

View File

@ -7,11 +7,11 @@ export function pointwiseConvLayer(x: tf.Tensor4D, params: PointwiseConvParams,
let out = tf.conv2d(x, params.filters, strides, 'same'); let out = tf.conv2d(x, params.filters, strides, 'same');
/* /*
if (x.shape[1] === 512 && x.shape[3] === 3) { if (x.shape[1] === 512 && x.shape[3] === 3) {
console.log('Input:', x.shape, x.size); // input does not change (checked values) console.log('Input:', x.shape, x.size, 'sum:', x.reshape([786432]).sum().dataSync()[0]); // input does not change (checked values)
console.log('Filter:', params.filters.shape, params.filters.size); // params do not change (checked values) console.log('Filter:', params.filters.shape, params.filters.size, 'sum:', params.filters.reshape([864]).sum().dataSync()[0]); // params do not change (checked values)
console.log('Strides', strides); console.log('Strides', strides);
console.log('Conv2d Output:', out.shape, out.size, out.dataSync()[0]); // output has different values! console.log('Conv2d 1st 5 values:', out.shape, out.size, out.dataSync().slice(0, 5)); // output has different values!
console.log('Sum of all Conv2D values:', tf.reshape(out, [2097152]).sum().dataSync()[0]); // silly sum just to see how much results diverged console.log('Conv2D sum of all values:', tf.reshape(out, [2097152]).sum().dataSync()[0]); // silly sum just to see how much results diverged
} }
*/ */
out = tf.add(out, params.batch_norm_offset); out = tf.add(out, params.batch_norm_offset);