From 2e36f43efb561c67d132bc75f112d26320a197b1 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Tue, 31 Aug 2021 14:48:55 -0400 Subject: [PATCH] added demo node-canvas --- CHANGELOG.md | 5 +- TODO.md | 12 ---- demo/nodejs/node-canvas.js | 128 ++++++++----------------------------- src/draw/draw.ts | 29 ++++++--- src/human.ts | 7 +- wiki | 2 +- 6 files changed, 60 insertions(+), 123 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 36ee42c9..ab85bffb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,7 +9,10 @@ Repository: **** ## Changelog -### **HEAD -> main** 2021/08/23 mandic00@live.com +### **HEAD -> main** 2021/08/31 mandic00@live.com + + +### **origin/main** 2021/08/31 mandic00@live.com - implement finger poses in hand detection and gestures - implemented movenet-multipose model diff --git a/TODO.md b/TODO.md index ac97e4b2..3d97a3e1 100644 --- a/TODO.md +++ b/TODO.md @@ -25,12 +25,8 @@ WebGL shader optimizations for faster load and initial detection Enhanced rotation correction for face detection is not working in NodeJS due to missing kernel op in TFJS Feature is automatically disabled in NodeJS without user impact -- Backend NodeJS missing kernel op `FlipLeftRight` - - *Target: `Human` v2.2 with `TFJS` v3.9* - Backend NodeJS missing kernel op `RotateWithOffset` - *Target: N/A*
@@ -39,15 +35,10 @@ Feature is automatically disabled in NodeJS without user impact Enhanced rotation correction for hand detection is not working in NodeJS due to missing kernel op in TFJS Feature is automatically disabled in NodeJS without user impact -- Backend NodeJS missing kernel op `FlipLeftRight` - - *Target: `Human` v2.2 with `TFJS` v3.9* - Backend NodeJS missing kernel op `RotateWithOffset` - *Target: N/A* Hand detection using WASM backend has reduced precision due to math rounding errors in backend -*Target: N/A*
@@ -57,7 +48,6 @@ MoveNet MultiPose model does not work with WASM backend due to missing F32 imple - Backend WASM missing F32 implementation - *Target: N/A* ### Object Detection @@ -65,10 +55,8 @@ Object detection using CenterNet or NanoDet models is not working when using WAS - Backend WASM missing kernel op `Mod` - *Target: `Human` v2.2 with `TFJS` v3.9* - Backend WASM missing kernel op `SparseToDense` - *Target: `Human` v2.2 with `TFJS` v3.9* ### WebGPU Backend diff --git a/demo/nodejs/node-canvas.js b/demo/nodejs/node-canvas.js index 8cc75ce5..789e770f 100644 --- a/demo/nodejs/node-canvas.js +++ b/demo/nodejs/node-canvas.js @@ -4,13 +4,11 @@ const log = require('@vladmandic/pilogger'); const fs = require('fs'); -const path = require('path'); const process = require('process'); +const canvas = require('canvas'); let fetch; // fetch is dynamically imported later -// const canvas = require('canvas'); - // for NodeJS, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human // eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars const tf = require('@tensorflow/tfjs-node'); // or const tf = require('@tensorflow/tfjs-node-gpu'); @@ -23,24 +21,18 @@ let human = null; const myConfig = { backend: 'tensorflow', modelBasePath: 'file://models/', - debug: true, - async: false, - filter: { - enabled: true, - flip: true, - }, + debug: false, + async: true, + filter: { enabled: false }, face: { enabled: true, - detector: { enabled: true, rotation: false }, + detector: { enabled: true }, mesh: { enabled: true }, iris: { enabled: true }, description: { enabled: true }, emotion: { enabled: true }, }, - hand: { - enabled: true, - }, - // body: { modelPath: 'blazepose.json', enabled: true }, + hand: { enabled: true }, body: { enabled: true }, object: { enabled: true }, }; @@ -52,14 +44,13 @@ async function init() { await human.tf.ready(); // pre-load models log.info('Human:', human.version); - log.info('Active Configuration', human.config); await human.load(); const loaded = Object.keys(human.models).filter((a) => human.models[a]); log.info('Loaded:', loaded); log.info('Memory state:', human.tf.engine().memory()); } -async function detect(input) { +async function detect(input, output) { // read input image file and create tensor to be used for processing let buffer; log.info('Loading image:', input); @@ -102,59 +93,11 @@ async function detect(input) { // dispose image tensor as we no longer need it human.tf.dispose(tensor); - // print data to console - log.data('Results:'); - if (result && result.face && result.face.length > 0) { - for (let i = 0; i < result.face.length; i++) { - const face = result.face[i]; - const emotion = face.emotion.reduce((prev, curr) => (prev.score > curr.score ? prev : curr)); - log.data(` Face: #${i} boxScore:${face.boxScore} faceScore:${face.faceScore} age:${face.age} genderScore:${face.genderScore} gender:${face.gender} emotionScore:${emotion.score} emotion:${emotion.emotion} iris:${face.iris}`); - } - } else { - log.data(' Face: N/A'); - } - if (result && result.body && result.body.length > 0) { - for (let i = 0; i < result.body.length; i++) { - const body = result.body[i]; - log.data(` Body: #${i} score:${body.score} keypoints:${body.keypoints?.length}`); - } - } else { - log.data(' Body: N/A'); - } - if (result && result.hand && result.hand.length > 0) { - for (let i = 0; i < result.hand.length; i++) { - const hand = result.hand[i]; - log.data(` Hand: #${i} score:${hand.score} keypoints:${hand.keypoints?.length}`); - } - } else { - log.data(' Hand: N/A'); - } - if (result && result.gesture && result.gesture.length > 0) { - for (let i = 0; i < result.gesture.length; i++) { - const [key, val] = Object.entries(result.gesture[i]); - log.data(` Gesture: ${key[0]}#${key[1]} gesture:${val[1]}`); - } - } else { - log.data(' Gesture: N/A'); - } - if (result && result.object && result.object.length > 0) { - for (let i = 0; i < result.object.length; i++) { - const object = result.object[i]; - log.data(` Object: #${i} score:${object.score} label:${object.label}`); - } - } else { - log.data(' Object: N/A'); - } - // print data to console if (result) { // invoke persons getter const persons = result.persons; - - // write result objects to file - // fs.writeFileSync('result.json', JSON.stringify(result, null, 2)); - - log.data('Persons:'); + log.data('Detected:'); for (let i = 0; i < persons.length; i++) { const face = persons[i].face; const faceTxt = face ? `score:${face.score} age:${face.age} gender:${face.gender} iris:${face.iris}` : null; @@ -164,26 +107,23 @@ async function detect(input) { } } - return result; -} + // load and draw original image + const outputCanvas = new canvas.Canvas(tensor.shape[2], tensor.shape[1], 'image'); // decoded tensor shape tells us width and height + const ctx = outputCanvas.getContext('2d'); + const original = await canvas.loadImage(buffer); // we already have input as buffer, so lets reuse it + ctx.drawImage(original, 0, 0, outputCanvas.width, outputCanvas.height); // draw original to new canvas -async function test() { - process.on('unhandledRejection', (err) => { - // @ts-ignore // no idea if exception message is compelte - log.error(err?.message || err || 'no error message'); - }); + // draw human results on canvas + human.setCanvas(outputCanvas); // tell human to use this canvas + human.draw.all(outputCanvas, result); // human will draw results as overlays on canvas - // test with embedded full body image - let result; + // write canvas to new image file + const out = fs.createWriteStream(output); + out.on('finish', () => log.state('Created output image:', output)); + out.on('error', (err) => log.error('Error creating image:', output, err)); + const stream = outputCanvas.createJPEGStream({ quality: 0.5, progressive: true, chromaSubsampling: true }); + stream.pipe(out); - log.state('Processing embedded warmup image: face'); - myConfig.warmup = 'face'; - result = await human.warmup(myConfig); - - log.state('Processing embedded warmup image: full'); - myConfig.warmup = 'full'; - result = await human.warmup(myConfig); - // no need to print results as they are printed to console during detection from within the library due to human.config.debug set return result; } @@ -192,26 +132,14 @@ async function main() { log.info('Current folder:', process.env.PWD); fetch = (await import('node-fetch')).default; await init(); - const f = process.argv[2]; - if (process.argv.length !== 3) { - log.warn('Parameters: missing'); - await test(); - } else if (!fs.existsSync(f) && !f.startsWith('http')) { + const input = process.argv[2]; + const output = process.argv[3]; + if (process.argv.length !== 4) { + log.error('Parameters: missing'); + } else if (!fs.existsSync(input) && !input.startsWith('http')) { log.error(`File not found: ${process.argv[2]}`); } else { - if (fs.existsSync(f)) { - const stat = fs.statSync(f); - if (stat.isDirectory()) { - const dir = fs.readdirSync(f); - for (const file of dir) { - await detect(path.join(f, file)); - } - } else { - await detect(f); - } - } else { - await detect(f); - } + await detect(input, output); } } diff --git a/src/draw/draw.ts b/src/draw/draw.ts index e3f313dc..460d39ee 100644 --- a/src/draw/draw.ts +++ b/src/draw/draw.ts @@ -66,6 +66,19 @@ export const options: DrawOptions = { bufferedOutput: true, }; +let Canvas; + +export function setCanvas(obj) { + if (obj.getContext) Canvas = obj; + else throw new Error('Human: Canvas is not functional'); +} + +const checkCanvas = (input) => { + if ((typeof HTMLCanvasElement !== 'undefined') && (input instanceof HTMLCanvasElement)) return true; + if (typeof Canvas !== 'undefined') return true; + return false; +}; + const rad2deg = (theta) => Math.round((theta * 180) / Math.PI); function point(ctx, x, y, z = 0, localOptions) { @@ -137,7 +150,7 @@ function curves(ctx, points: [number, number, number?][] = [], localOptions) { export async function gesture(inCanvas: HTMLCanvasElement, result: Array, drawOptions?: DrawOptions) { const localOptions = mergeDeep(options, drawOptions); if (!result || !inCanvas) return; - if (!(inCanvas instanceof HTMLCanvasElement)) return; + if (!checkCanvas(inCanvas)) return; const ctx = inCanvas.getContext('2d'); if (!ctx) return; ctx.font = localOptions.font; @@ -164,7 +177,7 @@ export async function gesture(inCanvas: HTMLCanvasElement, result: Array, drawOptions?: DrawOptions) { const localOptions = mergeDeep(options, drawOptions); if (!result || !inCanvas) return; - if (!(inCanvas instanceof HTMLCanvasElement)) return; + if (!checkCanvas(inCanvas)) return; const ctx = inCanvas.getContext('2d'); if (!ctx) return; for (const f of result) { @@ -268,7 +281,7 @@ export async function face(inCanvas: HTMLCanvasElement, result: Array, dra export async function body(inCanvas: HTMLCanvasElement, result: Array, drawOptions?: DrawOptions) { const localOptions = mergeDeep(options, drawOptions); if (!result || !inCanvas) return; - if (!(inCanvas instanceof HTMLCanvasElement)) return; + if (!checkCanvas(inCanvas)) return; const ctx = inCanvas.getContext('2d'); if (!ctx) return; ctx.lineJoin = 'round'; @@ -380,7 +393,7 @@ export async function body(inCanvas: HTMLCanvasElement, result: Array, dra export async function hand(inCanvas: HTMLCanvasElement, result: Array, drawOptions?: DrawOptions) { const localOptions = mergeDeep(options, drawOptions); if (!result || !inCanvas) return; - if (!(inCanvas instanceof HTMLCanvasElement)) return; + if (!checkCanvas(inCanvas)) return; const ctx = inCanvas.getContext('2d'); if (!ctx) return; ctx.lineJoin = 'round'; @@ -446,7 +459,7 @@ export async function hand(inCanvas: HTMLCanvasElement, result: Array, dra export async function object(inCanvas: HTMLCanvasElement, result: Array, drawOptions?: DrawOptions) { const localOptions = mergeDeep(options, drawOptions); if (!result || !inCanvas) return; - if (!(inCanvas instanceof HTMLCanvasElement)) return; + if (!checkCanvas(inCanvas)) return; const ctx = inCanvas.getContext('2d'); if (!ctx) return; ctx.lineJoin = 'round'; @@ -473,7 +486,7 @@ export async function object(inCanvas: HTMLCanvasElement, result: Array, d export async function person(inCanvas: HTMLCanvasElement, result: Array, drawOptions?: DrawOptions) { const localOptions = mergeDeep(options, drawOptions); if (!result || !inCanvas) return; - if (!(inCanvas instanceof HTMLCanvasElement)) return; + if (!checkCanvas(inCanvas)) return; const ctx = inCanvas.getContext('2d'); if (!ctx) return; ctx.lineJoin = 'round'; @@ -500,7 +513,7 @@ export async function person(inCanvas: HTMLCanvasElement, result: Array, export async function canvas(inCanvas: HTMLCanvasElement, outCanvas: HTMLCanvasElement) { if (!inCanvas || !outCanvas) return; - if (!(inCanvas instanceof HTMLCanvasElement) || !(outCanvas instanceof HTMLCanvasElement)) return; + if (!checkCanvas(inCanvas) || !checkCanvas(outCanvas)) return; const outCtx = inCanvas.getContext('2d'); outCtx?.drawImage(inCanvas, 0, 0); } @@ -509,7 +522,7 @@ export async function all(inCanvas: HTMLCanvasElement, result: Result, drawOptio const timestamp = now(); const localOptions = mergeDeep(options, drawOptions); if (!result || !inCanvas) return null; - if (!(inCanvas instanceof HTMLCanvasElement)) return null; + if (!checkCanvas(inCanvas)) return null; const promise = Promise.all([ face(inCanvas, result.face, localOptions), diff --git a/src/human.ts b/src/human.ts index cb72a7e5..24668883 100644 --- a/src/human.ts +++ b/src/human.ts @@ -190,7 +190,12 @@ export class Human { this.#lastInputSum = 1; } - // version = () => Human.version; + /** + * Sets internal canvas methods + * + * @param canvas + */ + setCanvas = (canvas) => draw.setCanvas(canvas); // helper function: measure tensor leak /** @hidden */ diff --git a/wiki b/wiki index fcc3945d..7f55fd1c 160000 --- a/wiki +++ b/wiki @@ -1 +1 @@ -Subproject commit fcc3945dedd5682f06c45c32504a86a1a7e2a20b +Subproject commit 7f55fd1c8aea22f33a767da840147b15aeeed034