mirror of https://github.com/vladmandic/human
support segmentation for nodejs
parent
d3113d6baf
commit
c52f1c979c
|
@ -11,6 +11,7 @@
|
|||
|
||||
### **HEAD -> main** 2021/09/22 mandic00@live.com
|
||||
|
||||
- redo segmentation and handtracking
|
||||
- prototype handtracking
|
||||
- automated browser tests
|
||||
- support for dynamic backend switching
|
||||
|
|
5
TODO.md
5
TODO.md
|
@ -13,11 +13,6 @@
|
|||
|
||||
<br>
|
||||
|
||||
### Segmentation
|
||||
|
||||
- Implement `NodeJS` support
|
||||
- Test for leaks
|
||||
|
||||
### Backends
|
||||
|
||||
- Optimize shader packing for WebGL backend:
|
||||
|
|
|
@ -245,14 +245,14 @@ async function drawResults(input) {
|
|||
if (userConfig.segmentation.enabled && ui.buffered) { // refresh segmentation if using buffered output
|
||||
const seg = await human.segmentation(input, ui.background);
|
||||
if (seg.alpha) {
|
||||
let c = document.getElementById('segmentation-mask');
|
||||
let ctx = c.getContext('2d');
|
||||
ctx.clearRect(0, 0, c.width, c.height); // need to clear as seg.alpha is alpha based canvas so it adds
|
||||
ctx.drawImage(seg.alpha, 0, 0, seg.alpha.width, seg.alpha.height, 0, 0, c.width, c.height);
|
||||
c = document.getElementById('segmentation-canvas');
|
||||
ctx = c.getContext('2d');
|
||||
ctx.clearRect(0, 0, c.width, c.height); // need to clear as seg.alpha is alpha based canvas so it adds
|
||||
ctx.drawImage(seg.canvas, 0, 0, seg.alpha.width, seg.alpha.height, 0, 0, c.width, c.height);
|
||||
const canvasSegMask = document.getElementById('segmentation-mask');
|
||||
const ctxSegMask = canvasSegMask.getContext('2d');
|
||||
ctxSegMask.clearRect(0, 0, canvasSegMask.width, canvasSegMask.height); // need to clear as seg.alpha is alpha based canvas so it adds
|
||||
ctxSegMask.drawImage(seg.alpha, 0, 0, seg.alpha.width, seg.alpha.height, 0, 0, canvasSegMask.width, canvasSegMask.height);
|
||||
const canvasSegCanvas = document.getElementById('segmentation-canvas');
|
||||
const ctxSegCanvas = canvasSegCanvas.getContext('2d');
|
||||
ctxSegCanvas.clearRect(0, 0, canvasSegCanvas.width, canvasSegCanvas.height); // need to clear as seg.alpha is alpha based canvas so it adds
|
||||
ctxSegCanvas.drawImage(seg.canvas, 0, 0, seg.alpha.width, seg.alpha.height, 0, 0, canvasSegCanvas.width, canvasSegCanvas.height);
|
||||
}
|
||||
// result.canvas = seg.alpha;
|
||||
} else if (!result.canvas || ui.buffered) { // refresh with input if using buffered output or if missing canvas
|
||||
|
|
|
@ -21,11 +21,15 @@ const config = { // just enable all and leave default settings
|
|||
async function main() {
|
||||
log.header();
|
||||
|
||||
globalThis.Canvas = canvas.Canvas; // patch global namespace with canvas library
|
||||
globalThis.ImageData = canvas.ImageData; // patch global namespace with canvas library
|
||||
// human.env.Canvas = canvas.Canvas; // alternatively monkey-patch human to use external canvas library
|
||||
// human.env.ImageData = canvas.ImageData; // alternatively monkey-patch human to use external canvas library
|
||||
|
||||
// init
|
||||
const human = new Human.Human(config); // create instance of human
|
||||
log.info('Human:', human.version);
|
||||
// @ts-ignore
|
||||
human.env.Canvas = canvas.Canvas; // monkey-patch human to use external canvas library
|
||||
|
||||
await human.load(); // pre-load models
|
||||
log.info('Loaded models:', Object.keys(human.models).filter((a) => human.models[a]));
|
||||
log.info('Memory state:', human.tf.engine().memory());
|
||||
|
@ -46,6 +50,10 @@ async function main() {
|
|||
// run detection
|
||||
const result = await human.detect(inputCanvas);
|
||||
|
||||
// run segmentation
|
||||
// const seg = await human.segmentation(inputCanvas);
|
||||
// log.data('Segmentation:', { data: seg.data.length, alpha: typeof seg.alpha, canvas: typeof seg.canvas });
|
||||
|
||||
// print results summary
|
||||
const persons = result.persons; // invoke persons getter, only used to print summary on console
|
||||
for (let i = 0; i < persons.length; i++) {
|
||||
|
|
|
@ -34,6 +34,7 @@ export type Env = {
|
|||
kernels: string[],
|
||||
Canvas: undefined,
|
||||
Image: undefined,
|
||||
ImageData: undefined,
|
||||
}
|
||||
|
||||
// eslint-disable-next-line import/no-mutable-exports
|
||||
|
@ -69,6 +70,7 @@ export let env: Env = {
|
|||
kernels: [],
|
||||
Canvas: undefined,
|
||||
Image: undefined,
|
||||
ImageData: undefined,
|
||||
};
|
||||
|
||||
export async function cpuInfo() {
|
||||
|
|
16
src/human.ts
16
src/human.ts
|
@ -297,7 +297,7 @@ export class Human {
|
|||
* @param background?: {@link Input}
|
||||
* @returns { data, canvas, alpha }
|
||||
*/
|
||||
async segmentation(input: Input, background?: Input): Promise<{ data: Uint8ClampedArray | null, canvas: HTMLCanvasElement | OffscreenCanvas | null, alpha: HTMLCanvasElement | OffscreenCanvas | null }> {
|
||||
async segmentation(input: Input, background?: Input): Promise<{ data: number[], canvas: HTMLCanvasElement | OffscreenCanvas | null, alpha: HTMLCanvasElement | OffscreenCanvas | null }> {
|
||||
return segmentation.process(input, background, this.config);
|
||||
}
|
||||
|
||||
|
@ -441,20 +441,6 @@ export class Human {
|
|||
this.performance.image = Math.trunc(now() - timeStamp);
|
||||
this.analyze('Get Image:');
|
||||
|
||||
// segmentation is only run explicitly via human.segmentation() which calls segmentation.process()
|
||||
/*
|
||||
if (this.config.segmentation.enabled && process && img.tensor && img.canvas) {
|
||||
this.analyze('Start Segmentation:');
|
||||
this.state = 'detect:segmentation';
|
||||
timeStamp = now();
|
||||
const seg = await segmentation.predict(img, this.config);
|
||||
img = { canvas: seg.canvas, tensor: seg.tensor };
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0) this.performance.segmentation = elapsedTime;
|
||||
this.analyze('End Segmentation:');
|
||||
}
|
||||
*/
|
||||
|
||||
if (!img.tensor) {
|
||||
if (this.config.debug) log('could not convert input to tensor');
|
||||
resolve({ error: 'could not convert input to tensor' });
|
||||
|
|
|
@ -30,7 +30,8 @@ export function canvas(width, height): HTMLCanvasElement | OffscreenCanvas {
|
|||
}
|
||||
} else {
|
||||
// @ts-ignore // env.canvas is an external monkey-patch
|
||||
c = (typeof env.Canvas !== 'undefined') ? new env.Canvas(width, height) : null;
|
||||
if (typeof env.Canvas !== 'undefined') c = new env.Canvas(width, height);
|
||||
else if (typeof globalThis.Canvas !== 'undefined') c = new globalThis.Canvas(width, height);
|
||||
}
|
||||
// if (!c) throw new Error('cannot create canvas');
|
||||
return c;
|
||||
|
@ -51,6 +52,7 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
|
|||
!(input instanceof tf.Tensor)
|
||||
&& !(typeof Image !== 'undefined' && input instanceof Image)
|
||||
&& !(typeof env.Canvas !== 'undefined' && input instanceof env.Canvas)
|
||||
&& !(typeof globalThis.Canvas !== 'undefined' && input instanceof globalThis.Canvas)
|
||||
&& !(typeof ImageData !== 'undefined' && input instanceof ImageData)
|
||||
&& !(typeof ImageBitmap !== 'undefined' && input instanceof ImageBitmap)
|
||||
&& !(typeof HTMLImageElement !== 'undefined' && input instanceof HTMLImageElement)
|
||||
|
|
|
@ -327,7 +327,8 @@ async function test(Human, inputConfig) {
|
|||
]);
|
||||
|
||||
// test monkey-patch
|
||||
human.env.Canvas = canvasJS.Canvas; // monkey-patch human to use external canvas library
|
||||
globalThis.Canvas = canvasJS.Canvas; // monkey-patch to use external canvas library
|
||||
globalThis.ImageData = canvasJS.ImageData; // monkey-patch to use external canvas library
|
||||
const inputImage = await canvasJS.loadImage('samples/ai-face.jpg'); // load image using canvas library
|
||||
const inputCanvas = new canvasJS.Canvas(inputImage.width, inputImage.height); // create canvas
|
||||
const ctx = inputCanvas.getContext('2d');
|
||||
|
@ -338,7 +339,7 @@ async function test(Human, inputConfig) {
|
|||
|
||||
// test segmentation
|
||||
res = await human.segmentation(inputCanvas, inputCanvas);
|
||||
if (!res || !res.data) log('error', 'failed: segmentation', res);
|
||||
if (!res || !res.data || !res.canvas) log('error', 'failed: segmentation');
|
||||
else log('state', 'passed: segmentation', [res.data.length]);
|
||||
human.env.Canvas = undefined;
|
||||
|
||||
|
|
|
@ -5,9 +5,9 @@ const Human = require('../dist/human.node-wasm.js');
|
|||
const test = require('./test-main.js').test;
|
||||
|
||||
// @ts-ignore
|
||||
Human.env.Canvas = Canvas;
|
||||
Human.env.Canvas = Canvas; // requires monkey-patch as wasm does not have tf.browser namespace
|
||||
// @ts-ignore
|
||||
Human.env.Image = Image;
|
||||
Human.env.Image = Image; // requires monkey-patch as wasm does not have tf.browser namespace
|
||||
|
||||
const config = {
|
||||
// modelBasePath: 'http://localhost:10030/models/',
|
||||
|
|
Loading…
Reference in New Issue