mirror of https://github.com/vladmandic/human
modularize build platform
parent
f345538794
commit
9f162d473b
|
@ -11,9 +11,7 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
|
|||
|
||||
### **HEAD -> main** 2021/06/05 mandic00@live.com
|
||||
|
||||
|
||||
### **origin/main** 2021/06/05 mandic00@live.com
|
||||
|
||||
- minor git corruption
|
||||
- unified build
|
||||
- enable body segmentation and background replacement
|
||||
- work on body segmentation
|
||||
|
|
|
@ -27,9 +27,8 @@ const userConfig = {
|
|||
hand: { enabled: false },
|
||||
gesture: { enabled: false },
|
||||
body: { enabled: false },
|
||||
filter: {
|
||||
enabled: false,
|
||||
},
|
||||
filter: { enabled: true },
|
||||
segmentation: { enabled: false },
|
||||
};
|
||||
|
||||
const human = new Human(userConfig); // new instance of human
|
||||
|
|
|
@ -31,6 +31,7 @@ let userConfig = {
|
|||
warmup: 'none',
|
||||
backend: 'humangl',
|
||||
wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.6.0/dist/',
|
||||
segmentation: { enabled: true },
|
||||
/*
|
||||
async: false,
|
||||
cacheSensitivity: 0,
|
||||
|
@ -210,10 +211,9 @@ async function drawResults(input) {
|
|||
// draw fps chart
|
||||
await menu.process.updateChart('FPS', ui.detectFPS);
|
||||
|
||||
// get updated canvas if missing or if we want buffering, but skip if segmentation is enabled
|
||||
if (userConfig.segmentation.enabled) {
|
||||
if (userConfig.segmentation.enabled && ui.buffered) { // refresh segmentation if using buffered output
|
||||
result.canvas = await human.segmentation(input, ui.background, userConfig);
|
||||
} else if (!result.canvas || ui.buffered) {
|
||||
} else if (!result.canvas || ui.buffered) { // refresh with input if using buffered output or if missing canvas
|
||||
const image = await human.image(input);
|
||||
result.canvas = image.canvas;
|
||||
human.tf.dispose(image.tensor);
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -1,10 +1,9 @@
|
|||
|
||||
/*
|
||||
Human library
|
||||
homepage: <https://github.com/vladmandic/human>
|
||||
author: <https://github.com/vladmandic>'
|
||||
*/
|
||||
|
||||
/*
|
||||
Human library
|
||||
homepage: <https://github.com/vladmandic/human>
|
||||
author: <https://github.com/vladmandic>'
|
||||
*/
|
||||
var __create = Object.create;
|
||||
var __defProp = Object.defineProperty;
|
||||
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
||||
|
@ -4241,7 +4240,7 @@ function similarity(embedding1, embedding2, order = 2) {
|
|||
return 0;
|
||||
if ((embedding1 == null ? void 0 : embedding1.length) !== (embedding2 == null ? void 0 : embedding2.length))
|
||||
return 0;
|
||||
const distance = 5 * embedding1.map((val, i) => Math.abs(embedding1[i] - embedding2[i]) ** order).reduce((sum, now2) => sum + now2, 0) ** (1 / order);
|
||||
const distance = 5 * embedding1.map((_val, i) => Math.abs(embedding1[i] - embedding2[i]) ** order).reduce((sum, now2) => sum + now2, 0) ** (1 / order);
|
||||
const res = Math.max(0, 100 - distance) / 100;
|
||||
return res;
|
||||
}
|
||||
|
@ -8696,7 +8695,7 @@ async function process2(res, inputSize, outputShape, config3) {
|
|||
nmsIdx = nms.dataSync();
|
||||
tf17.dispose(nms);
|
||||
}
|
||||
results = results.filter((a, idx) => nmsIdx.includes(idx)).sort((a, b) => b.score - a.score);
|
||||
results = results.filter((_val, idx) => nmsIdx.includes(idx)).sort((a, b) => b.score - a.score);
|
||||
return results;
|
||||
}
|
||||
async function predict9(image18, config3) {
|
||||
|
@ -10433,9 +10432,11 @@ async function load12(config3) {
|
|||
log("cached model:", model9["modelUrl"]);
|
||||
return model9;
|
||||
}
|
||||
async function predict11(input, config3) {
|
||||
var _a, _b, _c, _d;
|
||||
if (!config3.segmentation.enabled || !input.tensor || !input.canvas)
|
||||
async function predict11(input) {
|
||||
var _a, _b;
|
||||
const width = ((_a = input.tensor) == null ? void 0 : _a.shape[1]) || 0;
|
||||
const height = ((_b = input.tensor) == null ? void 0 : _b.shape[2]) || 0;
|
||||
if (!input.tensor)
|
||||
return null;
|
||||
if (!model9 || !model9.inputs[0].shape)
|
||||
return null;
|
||||
|
@ -10444,9 +10445,6 @@ async function predict11(input, config3) {
|
|||
const res = model9.predict(norm);
|
||||
tf20.dispose(resizeInput);
|
||||
tf20.dispose(norm);
|
||||
const overlay = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(input.canvas.width, input.canvas.height) : document.createElement("canvas");
|
||||
overlay.width = input.canvas.width;
|
||||
overlay.height = input.canvas.height;
|
||||
const squeeze7 = tf20.squeeze(res, 0);
|
||||
let resizeOutput;
|
||||
if (squeeze7.shape[2] === 2) {
|
||||
|
@ -10457,31 +10455,37 @@ async function predict11(input, config3) {
|
|||
tf20.dispose(softmax);
|
||||
tf20.dispose(bg);
|
||||
tf20.dispose(fg);
|
||||
const crop = tf20.image.cropAndResize(pad, [[0, 0, 0.5, 0.5]], [0], [(_a = input.tensor) == null ? void 0 : _a.shape[1], (_b = input.tensor) == null ? void 0 : _b.shape[2]]);
|
||||
const crop = tf20.image.cropAndResize(pad, [[0, 0, 0.5, 0.5]], [0], [width, height]);
|
||||
resizeOutput = crop.squeeze(0);
|
||||
tf20.dispose(crop);
|
||||
tf20.dispose(expand);
|
||||
tf20.dispose(pad);
|
||||
} else {
|
||||
resizeOutput = tf20.image.resizeBilinear(squeeze7, [(_c = input.tensor) == null ? void 0 : _c.shape[1], (_d = input.tensor) == null ? void 0 : _d.shape[2]]);
|
||||
resizeOutput = tf20.image.resizeBilinear(squeeze7, [width, height]);
|
||||
}
|
||||
if (typeof document === "undefined")
|
||||
return resizeOutput.dataSync();
|
||||
const overlay = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(width, height) : document.createElement("canvas");
|
||||
overlay.width = width;
|
||||
overlay.height = height;
|
||||
if (tf20.browser)
|
||||
await tf20.browser.toPixels(resizeOutput, overlay);
|
||||
tf20.dispose(resizeOutput);
|
||||
tf20.dispose(squeeze7);
|
||||
tf20.dispose(res);
|
||||
const alphaCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(input.canvas.width, input.canvas.height) : document.createElement("canvas");
|
||||
alphaCanvas.width = input.canvas.width;
|
||||
alphaCanvas.height = input.canvas.height;
|
||||
const alphaCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(width, height) : document.createElement("canvas");
|
||||
alphaCanvas.width = width;
|
||||
alphaCanvas.height = height;
|
||||
const ctxAlpha = alphaCanvas.getContext("2d");
|
||||
ctxAlpha.filter = "blur(8px";
|
||||
await ctxAlpha.drawImage(overlay, 0, 0);
|
||||
const alpha = ctxAlpha.getImageData(0, 0, input.canvas.width, input.canvas.height).data;
|
||||
const original = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(input.canvas.width, input.canvas.height) : document.createElement("canvas");
|
||||
original.width = input.canvas.width;
|
||||
original.height = input.canvas.height;
|
||||
const alpha = ctxAlpha.getImageData(0, 0, width, height).data;
|
||||
const original = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(width, height) : document.createElement("canvas");
|
||||
original.width = width;
|
||||
original.height = height;
|
||||
const ctx = original.getContext("2d");
|
||||
await ctx.drawImage(input.canvas, 0, 0);
|
||||
if (input.canvas)
|
||||
await ctx.drawImage(input.canvas, 0, 0);
|
||||
ctx.globalCompositeOperation = "darken";
|
||||
ctx.filter = "blur(8px)";
|
||||
await ctx.drawImage(overlay, 0, 0);
|
||||
|
@ -10495,12 +10499,10 @@ async function process5(input, background, config3) {
|
|||
if (busy)
|
||||
return null;
|
||||
busy = true;
|
||||
if (!config3.segmentation.enabled)
|
||||
config3.segmentation.enabled = true;
|
||||
if (!model9)
|
||||
await load12(config3);
|
||||
const img = process4(input, config3);
|
||||
const alpha = await predict11(img, config3);
|
||||
const alpha = await predict11(img);
|
||||
tf20.dispose(img.tensor);
|
||||
if (background && alpha) {
|
||||
const tmp = process4(background, config3);
|
||||
|
@ -11562,6 +11564,7 @@ var Human = class {
|
|||
return new Promise(async (resolve) => {
|
||||
this.state = "config";
|
||||
let timeStamp;
|
||||
let elapsedTime;
|
||||
this.config = mergeDeep(this.config, userConfig);
|
||||
this.state = "check";
|
||||
const error = __privateGet(this, _sanity).call(this, input);
|
||||
|
@ -11573,14 +11576,28 @@ var Human = class {
|
|||
await __privateGet(this, _checkBackend).call(this);
|
||||
await this.load();
|
||||
timeStamp = now();
|
||||
const process6 = process4(input, this.config);
|
||||
let process6 = process4(input, this.config);
|
||||
this.performance.image = Math.trunc(now() - timeStamp);
|
||||
this.analyze("Get Image:");
|
||||
if (this.config.segmentation.enabled && process6 && process6.tensor) {
|
||||
this.analyze("Start Segmentation:");
|
||||
this.state = "run:segmentation";
|
||||
timeStamp = now();
|
||||
await predict11(process6);
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0)
|
||||
this.performance.segmentation = elapsedTime;
|
||||
if (process6.canvas) {
|
||||
process6.tensor.dispose();
|
||||
process6 = process4(process6.canvas, this.config);
|
||||
}
|
||||
this.analyze("End Segmentation:");
|
||||
}
|
||||
if (!process6 || !process6.tensor) {
|
||||
log("could not convert input to tensor");
|
||||
resolve({ error: "could not convert input to tensor" });
|
||||
return;
|
||||
}
|
||||
this.performance.image = Math.trunc(now() - timeStamp);
|
||||
this.analyze("Get Image:");
|
||||
timeStamp = now();
|
||||
this.config.skipFrame = await __privateGet(this, _skipFrame).call(this, process6.tensor);
|
||||
if (!this.performance.frames)
|
||||
|
@ -11596,7 +11613,6 @@ var Human = class {
|
|||
let bodyRes;
|
||||
let handRes;
|
||||
let objectRes;
|
||||
let elapsedTime;
|
||||
if (this.config.async) {
|
||||
faceRes = this.config.face.enabled ? detectFace(this, process6.tensor) : [];
|
||||
if (this.performance.face)
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
|
||||
/*
|
||||
Human library
|
||||
homepage: <https://github.com/vladmandic/human>
|
||||
author: <https://github.com/vladmandic>'
|
||||
*/
|
||||
|
||||
/*
|
||||
Human library
|
||||
homepage: <https://github.com/vladmandic/human>
|
||||
author: <https://github.com/vladmandic>'
|
||||
*/
|
||||
var __create = Object.create;
|
||||
var __defProp = Object.defineProperty;
|
||||
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
||||
|
@ -4242,7 +4241,7 @@ function similarity(embedding1, embedding2, order = 2) {
|
|||
return 0;
|
||||
if ((embedding1 == null ? void 0 : embedding1.length) !== (embedding2 == null ? void 0 : embedding2.length))
|
||||
return 0;
|
||||
const distance = 5 * embedding1.map((val, i) => Math.abs(embedding1[i] - embedding2[i]) ** order).reduce((sum, now2) => sum + now2, 0) ** (1 / order);
|
||||
const distance = 5 * embedding1.map((_val, i) => Math.abs(embedding1[i] - embedding2[i]) ** order).reduce((sum, now2) => sum + now2, 0) ** (1 / order);
|
||||
const res = Math.max(0, 100 - distance) / 100;
|
||||
return res;
|
||||
}
|
||||
|
@ -8697,7 +8696,7 @@ async function process2(res, inputSize, outputShape, config3) {
|
|||
nmsIdx = nms.dataSync();
|
||||
tf17.dispose(nms);
|
||||
}
|
||||
results = results.filter((a, idx) => nmsIdx.includes(idx)).sort((a, b) => b.score - a.score);
|
||||
results = results.filter((_val, idx) => nmsIdx.includes(idx)).sort((a, b) => b.score - a.score);
|
||||
return results;
|
||||
}
|
||||
async function predict9(image18, config3) {
|
||||
|
@ -10434,9 +10433,11 @@ async function load12(config3) {
|
|||
log("cached model:", model9["modelUrl"]);
|
||||
return model9;
|
||||
}
|
||||
async function predict11(input, config3) {
|
||||
var _a, _b, _c, _d;
|
||||
if (!config3.segmentation.enabled || !input.tensor || !input.canvas)
|
||||
async function predict11(input) {
|
||||
var _a, _b;
|
||||
const width = ((_a = input.tensor) == null ? void 0 : _a.shape[1]) || 0;
|
||||
const height = ((_b = input.tensor) == null ? void 0 : _b.shape[2]) || 0;
|
||||
if (!input.tensor)
|
||||
return null;
|
||||
if (!model9 || !model9.inputs[0].shape)
|
||||
return null;
|
||||
|
@ -10445,9 +10446,6 @@ async function predict11(input, config3) {
|
|||
const res = model9.predict(norm);
|
||||
tf20.dispose(resizeInput);
|
||||
tf20.dispose(norm);
|
||||
const overlay = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(input.canvas.width, input.canvas.height) : document.createElement("canvas");
|
||||
overlay.width = input.canvas.width;
|
||||
overlay.height = input.canvas.height;
|
||||
const squeeze7 = tf20.squeeze(res, 0);
|
||||
let resizeOutput;
|
||||
if (squeeze7.shape[2] === 2) {
|
||||
|
@ -10458,31 +10456,37 @@ async function predict11(input, config3) {
|
|||
tf20.dispose(softmax);
|
||||
tf20.dispose(bg);
|
||||
tf20.dispose(fg);
|
||||
const crop = tf20.image.cropAndResize(pad, [[0, 0, 0.5, 0.5]], [0], [(_a = input.tensor) == null ? void 0 : _a.shape[1], (_b = input.tensor) == null ? void 0 : _b.shape[2]]);
|
||||
const crop = tf20.image.cropAndResize(pad, [[0, 0, 0.5, 0.5]], [0], [width, height]);
|
||||
resizeOutput = crop.squeeze(0);
|
||||
tf20.dispose(crop);
|
||||
tf20.dispose(expand);
|
||||
tf20.dispose(pad);
|
||||
} else {
|
||||
resizeOutput = tf20.image.resizeBilinear(squeeze7, [(_c = input.tensor) == null ? void 0 : _c.shape[1], (_d = input.tensor) == null ? void 0 : _d.shape[2]]);
|
||||
resizeOutput = tf20.image.resizeBilinear(squeeze7, [width, height]);
|
||||
}
|
||||
if (typeof document === "undefined")
|
||||
return resizeOutput.dataSync();
|
||||
const overlay = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(width, height) : document.createElement("canvas");
|
||||
overlay.width = width;
|
||||
overlay.height = height;
|
||||
if (tf20.browser)
|
||||
await tf20.browser.toPixels(resizeOutput, overlay);
|
||||
tf20.dispose(resizeOutput);
|
||||
tf20.dispose(squeeze7);
|
||||
tf20.dispose(res);
|
||||
const alphaCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(input.canvas.width, input.canvas.height) : document.createElement("canvas");
|
||||
alphaCanvas.width = input.canvas.width;
|
||||
alphaCanvas.height = input.canvas.height;
|
||||
const alphaCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(width, height) : document.createElement("canvas");
|
||||
alphaCanvas.width = width;
|
||||
alphaCanvas.height = height;
|
||||
const ctxAlpha = alphaCanvas.getContext("2d");
|
||||
ctxAlpha.filter = "blur(8px";
|
||||
await ctxAlpha.drawImage(overlay, 0, 0);
|
||||
const alpha = ctxAlpha.getImageData(0, 0, input.canvas.width, input.canvas.height).data;
|
||||
const original = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(input.canvas.width, input.canvas.height) : document.createElement("canvas");
|
||||
original.width = input.canvas.width;
|
||||
original.height = input.canvas.height;
|
||||
const alpha = ctxAlpha.getImageData(0, 0, width, height).data;
|
||||
const original = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(width, height) : document.createElement("canvas");
|
||||
original.width = width;
|
||||
original.height = height;
|
||||
const ctx = original.getContext("2d");
|
||||
await ctx.drawImage(input.canvas, 0, 0);
|
||||
if (input.canvas)
|
||||
await ctx.drawImage(input.canvas, 0, 0);
|
||||
ctx.globalCompositeOperation = "darken";
|
||||
ctx.filter = "blur(8px)";
|
||||
await ctx.drawImage(overlay, 0, 0);
|
||||
|
@ -10496,12 +10500,10 @@ async function process5(input, background, config3) {
|
|||
if (busy)
|
||||
return null;
|
||||
busy = true;
|
||||
if (!config3.segmentation.enabled)
|
||||
config3.segmentation.enabled = true;
|
||||
if (!model9)
|
||||
await load12(config3);
|
||||
const img = process4(input, config3);
|
||||
const alpha = await predict11(img, config3);
|
||||
const alpha = await predict11(img);
|
||||
tf20.dispose(img.tensor);
|
||||
if (background && alpha) {
|
||||
const tmp = process4(background, config3);
|
||||
|
@ -11563,6 +11565,7 @@ var Human = class {
|
|||
return new Promise(async (resolve) => {
|
||||
this.state = "config";
|
||||
let timeStamp;
|
||||
let elapsedTime;
|
||||
this.config = mergeDeep(this.config, userConfig);
|
||||
this.state = "check";
|
||||
const error = __privateGet(this, _sanity).call(this, input);
|
||||
|
@ -11574,14 +11577,28 @@ var Human = class {
|
|||
await __privateGet(this, _checkBackend).call(this);
|
||||
await this.load();
|
||||
timeStamp = now();
|
||||
const process6 = process4(input, this.config);
|
||||
let process6 = process4(input, this.config);
|
||||
this.performance.image = Math.trunc(now() - timeStamp);
|
||||
this.analyze("Get Image:");
|
||||
if (this.config.segmentation.enabled && process6 && process6.tensor) {
|
||||
this.analyze("Start Segmentation:");
|
||||
this.state = "run:segmentation";
|
||||
timeStamp = now();
|
||||
await predict11(process6);
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0)
|
||||
this.performance.segmentation = elapsedTime;
|
||||
if (process6.canvas) {
|
||||
process6.tensor.dispose();
|
||||
process6 = process4(process6.canvas, this.config);
|
||||
}
|
||||
this.analyze("End Segmentation:");
|
||||
}
|
||||
if (!process6 || !process6.tensor) {
|
||||
log("could not convert input to tensor");
|
||||
resolve({ error: "could not convert input to tensor" });
|
||||
return;
|
||||
}
|
||||
this.performance.image = Math.trunc(now() - timeStamp);
|
||||
this.analyze("Get Image:");
|
||||
timeStamp = now();
|
||||
this.config.skipFrame = await __privateGet(this, _skipFrame).call(this, process6.tensor);
|
||||
if (!this.performance.frames)
|
||||
|
@ -11597,7 +11614,6 @@ var Human = class {
|
|||
let bodyRes;
|
||||
let handRes;
|
||||
let objectRes;
|
||||
let elapsedTime;
|
||||
if (this.config.async) {
|
||||
faceRes = this.config.face.enabled ? detectFace(this, process6.tensor) : [];
|
||||
if (this.performance.face)
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
|
||||
/*
|
||||
Human library
|
||||
homepage: <https://github.com/vladmandic/human>
|
||||
author: <https://github.com/vladmandic>'
|
||||
*/
|
||||
|
||||
/*
|
||||
Human library
|
||||
homepage: <https://github.com/vladmandic/human>
|
||||
author: <https://github.com/vladmandic>'
|
||||
*/
|
||||
var __create = Object.create;
|
||||
var __defProp = Object.defineProperty;
|
||||
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
||||
|
@ -4241,7 +4240,7 @@ function similarity(embedding1, embedding2, order = 2) {
|
|||
return 0;
|
||||
if ((embedding1 == null ? void 0 : embedding1.length) !== (embedding2 == null ? void 0 : embedding2.length))
|
||||
return 0;
|
||||
const distance = 5 * embedding1.map((val, i) => Math.abs(embedding1[i] - embedding2[i]) ** order).reduce((sum, now2) => sum + now2, 0) ** (1 / order);
|
||||
const distance = 5 * embedding1.map((_val, i) => Math.abs(embedding1[i] - embedding2[i]) ** order).reduce((sum, now2) => sum + now2, 0) ** (1 / order);
|
||||
const res = Math.max(0, 100 - distance) / 100;
|
||||
return res;
|
||||
}
|
||||
|
@ -8696,7 +8695,7 @@ async function process2(res, inputSize, outputShape, config3) {
|
|||
nmsIdx = nms.dataSync();
|
||||
tf17.dispose(nms);
|
||||
}
|
||||
results = results.filter((a, idx) => nmsIdx.includes(idx)).sort((a, b) => b.score - a.score);
|
||||
results = results.filter((_val, idx) => nmsIdx.includes(idx)).sort((a, b) => b.score - a.score);
|
||||
return results;
|
||||
}
|
||||
async function predict9(image18, config3) {
|
||||
|
@ -10433,9 +10432,11 @@ async function load12(config3) {
|
|||
log("cached model:", model9["modelUrl"]);
|
||||
return model9;
|
||||
}
|
||||
async function predict11(input, config3) {
|
||||
var _a, _b, _c, _d;
|
||||
if (!config3.segmentation.enabled || !input.tensor || !input.canvas)
|
||||
async function predict11(input) {
|
||||
var _a, _b;
|
||||
const width = ((_a = input.tensor) == null ? void 0 : _a.shape[1]) || 0;
|
||||
const height = ((_b = input.tensor) == null ? void 0 : _b.shape[2]) || 0;
|
||||
if (!input.tensor)
|
||||
return null;
|
||||
if (!model9 || !model9.inputs[0].shape)
|
||||
return null;
|
||||
|
@ -10444,9 +10445,6 @@ async function predict11(input, config3) {
|
|||
const res = model9.predict(norm);
|
||||
tf20.dispose(resizeInput);
|
||||
tf20.dispose(norm);
|
||||
const overlay = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(input.canvas.width, input.canvas.height) : document.createElement("canvas");
|
||||
overlay.width = input.canvas.width;
|
||||
overlay.height = input.canvas.height;
|
||||
const squeeze7 = tf20.squeeze(res, 0);
|
||||
let resizeOutput;
|
||||
if (squeeze7.shape[2] === 2) {
|
||||
|
@ -10457,31 +10455,37 @@ async function predict11(input, config3) {
|
|||
tf20.dispose(softmax);
|
||||
tf20.dispose(bg);
|
||||
tf20.dispose(fg);
|
||||
const crop = tf20.image.cropAndResize(pad, [[0, 0, 0.5, 0.5]], [0], [(_a = input.tensor) == null ? void 0 : _a.shape[1], (_b = input.tensor) == null ? void 0 : _b.shape[2]]);
|
||||
const crop = tf20.image.cropAndResize(pad, [[0, 0, 0.5, 0.5]], [0], [width, height]);
|
||||
resizeOutput = crop.squeeze(0);
|
||||
tf20.dispose(crop);
|
||||
tf20.dispose(expand);
|
||||
tf20.dispose(pad);
|
||||
} else {
|
||||
resizeOutput = tf20.image.resizeBilinear(squeeze7, [(_c = input.tensor) == null ? void 0 : _c.shape[1], (_d = input.tensor) == null ? void 0 : _d.shape[2]]);
|
||||
resizeOutput = tf20.image.resizeBilinear(squeeze7, [width, height]);
|
||||
}
|
||||
if (typeof document === "undefined")
|
||||
return resizeOutput.dataSync();
|
||||
const overlay = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(width, height) : document.createElement("canvas");
|
||||
overlay.width = width;
|
||||
overlay.height = height;
|
||||
if (tf20.browser)
|
||||
await tf20.browser.toPixels(resizeOutput, overlay);
|
||||
tf20.dispose(resizeOutput);
|
||||
tf20.dispose(squeeze7);
|
||||
tf20.dispose(res);
|
||||
const alphaCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(input.canvas.width, input.canvas.height) : document.createElement("canvas");
|
||||
alphaCanvas.width = input.canvas.width;
|
||||
alphaCanvas.height = input.canvas.height;
|
||||
const alphaCanvas = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(width, height) : document.createElement("canvas");
|
||||
alphaCanvas.width = width;
|
||||
alphaCanvas.height = height;
|
||||
const ctxAlpha = alphaCanvas.getContext("2d");
|
||||
ctxAlpha.filter = "blur(8px";
|
||||
await ctxAlpha.drawImage(overlay, 0, 0);
|
||||
const alpha = ctxAlpha.getImageData(0, 0, input.canvas.width, input.canvas.height).data;
|
||||
const original = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(input.canvas.width, input.canvas.height) : document.createElement("canvas");
|
||||
original.width = input.canvas.width;
|
||||
original.height = input.canvas.height;
|
||||
const alpha = ctxAlpha.getImageData(0, 0, width, height).data;
|
||||
const original = typeof OffscreenCanvas !== "undefined" ? new OffscreenCanvas(width, height) : document.createElement("canvas");
|
||||
original.width = width;
|
||||
original.height = height;
|
||||
const ctx = original.getContext("2d");
|
||||
await ctx.drawImage(input.canvas, 0, 0);
|
||||
if (input.canvas)
|
||||
await ctx.drawImage(input.canvas, 0, 0);
|
||||
ctx.globalCompositeOperation = "darken";
|
||||
ctx.filter = "blur(8px)";
|
||||
await ctx.drawImage(overlay, 0, 0);
|
||||
|
@ -10495,12 +10499,10 @@ async function process5(input, background, config3) {
|
|||
if (busy)
|
||||
return null;
|
||||
busy = true;
|
||||
if (!config3.segmentation.enabled)
|
||||
config3.segmentation.enabled = true;
|
||||
if (!model9)
|
||||
await load12(config3);
|
||||
const img = process4(input, config3);
|
||||
const alpha = await predict11(img, config3);
|
||||
const alpha = await predict11(img);
|
||||
tf20.dispose(img.tensor);
|
||||
if (background && alpha) {
|
||||
const tmp = process4(background, config3);
|
||||
|
@ -11562,6 +11564,7 @@ var Human = class {
|
|||
return new Promise(async (resolve) => {
|
||||
this.state = "config";
|
||||
let timeStamp;
|
||||
let elapsedTime;
|
||||
this.config = mergeDeep(this.config, userConfig);
|
||||
this.state = "check";
|
||||
const error = __privateGet(this, _sanity).call(this, input);
|
||||
|
@ -11573,14 +11576,28 @@ var Human = class {
|
|||
await __privateGet(this, _checkBackend).call(this);
|
||||
await this.load();
|
||||
timeStamp = now();
|
||||
const process6 = process4(input, this.config);
|
||||
let process6 = process4(input, this.config);
|
||||
this.performance.image = Math.trunc(now() - timeStamp);
|
||||
this.analyze("Get Image:");
|
||||
if (this.config.segmentation.enabled && process6 && process6.tensor) {
|
||||
this.analyze("Start Segmentation:");
|
||||
this.state = "run:segmentation";
|
||||
timeStamp = now();
|
||||
await predict11(process6);
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0)
|
||||
this.performance.segmentation = elapsedTime;
|
||||
if (process6.canvas) {
|
||||
process6.tensor.dispose();
|
||||
process6 = process4(process6.canvas, this.config);
|
||||
}
|
||||
this.analyze("End Segmentation:");
|
||||
}
|
||||
if (!process6 || !process6.tensor) {
|
||||
log("could not convert input to tensor");
|
||||
resolve({ error: "could not convert input to tensor" });
|
||||
return;
|
||||
}
|
||||
this.performance.image = Math.trunc(now() - timeStamp);
|
||||
this.analyze("Get Image:");
|
||||
timeStamp = now();
|
||||
this.config.skipFrame = await __privateGet(this, _skipFrame).call(this, process6.tensor);
|
||||
if (!this.performance.frames)
|
||||
|
@ -11596,7 +11613,6 @@ var Human = class {
|
|||
let bodyRes;
|
||||
let handRes;
|
||||
let objectRes;
|
||||
let elapsedTime;
|
||||
if (this.config.async) {
|
||||
faceRes = this.config.face.enabled ? detectFace(this, process6.tensor) : [];
|
||||
if (this.performance.face)
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
104
server/build.js
104
server/build.js
|
@ -3,37 +3,27 @@
|
|||
* Used to generate prod builds for releases or by dev server to generate on-the-fly debug builds
|
||||
*/
|
||||
|
||||
const ts = require('typescript');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const log = require('@vladmandic/pilogger');
|
||||
const esbuild = require('esbuild');
|
||||
const TypeDoc = require('typedoc');
|
||||
const { ESLint } = require('eslint');
|
||||
const tfjs = require('@tensorflow/tfjs/package.json');
|
||||
const changelog = require('./changelog.js');
|
||||
const tsconfig = require('../tsconfig.json');
|
||||
|
||||
let logFile = 'build.log';
|
||||
const lint = require('./lint.js');
|
||||
const typedoc = require('./typedoc.js');
|
||||
const typings = require('./typings.js');
|
||||
|
||||
let busy = false;
|
||||
let td = null;
|
||||
let eslint = null;
|
||||
const banner = { js: `
|
||||
/*
|
||||
Human library
|
||||
homepage: <https://github.com/vladmandic/human>
|
||||
author: <https://github.com/vladmandic>'
|
||||
*/
|
||||
` };
|
||||
|
||||
// common configuration
|
||||
const lintLocations = ['server/', 'demo/', 'src/', 'test/'];
|
||||
|
||||
const config = {
|
||||
build: {
|
||||
banner,
|
||||
tsconfig: 'server/tfjs-tsconfig.json',
|
||||
banner: { js: `
|
||||
/*
|
||||
Human library
|
||||
homepage: <https://github.com/vladmandic/human>
|
||||
author: <https://github.com/vladmandic>'
|
||||
*/` },
|
||||
tsconfig: './tsconfig.json',
|
||||
logLevel: 'error',
|
||||
bundle: true,
|
||||
metafile: true,
|
||||
|
@ -49,7 +39,9 @@ const config = {
|
|||
minifyIdentifiers: true,
|
||||
minifySyntax: true,
|
||||
},
|
||||
buildLog: 'build.log',
|
||||
changelog: '../CHANGELOG.md',
|
||||
lintLocations: ['server/', 'demo/', 'src/', 'test/'],
|
||||
};
|
||||
|
||||
const targets = {
|
||||
|
@ -207,64 +199,6 @@ async function getStats(json) {
|
|||
}
|
||||
|
||||
// rebuild typings
|
||||
async function typings(entryPoint) {
|
||||
log.info('Generate Typings:', entryPoint, 'outDir:', [tsconfig.compilerOptions.outDir]);
|
||||
const tsoptions = { ...tsconfig.compilerOptions,
|
||||
target: ts.ScriptTarget.ES2018,
|
||||
module: ts.ModuleKind.ES2020,
|
||||
moduleResolution: ts.ModuleResolutionKind.NodeJs,
|
||||
};
|
||||
const compilerHost = ts.createCompilerHost(tsoptions);
|
||||
const program = ts.createProgram(entryPoint, tsoptions, compilerHost);
|
||||
const emit = program.emit();
|
||||
const diag = ts
|
||||
.getPreEmitDiagnostics(program)
|
||||
.concat(emit.diagnostics);
|
||||
for (const info of diag) {
|
||||
const msg = info.messageText['messageText'] || info.messageText;
|
||||
if (msg.includes('package.json')) continue;
|
||||
if (info.file) {
|
||||
const pos = info.file.getLineAndCharacterOfPosition(info.start || 0);
|
||||
log.error(`TSC: ${info.file.fileName} [${pos.line + 1},${pos.character + 1}]:`, msg);
|
||||
} else {
|
||||
log.error('TSC:', msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function typedoc(entryPoint) {
|
||||
if (!td) {
|
||||
td = new TypeDoc.Application();
|
||||
td.options.addReader(new TypeDoc.TSConfigReader());
|
||||
td.bootstrap({ entryPoints: [entryPoint], theme: 'wiki/theme/' });
|
||||
td.logger.warn = log.warn;
|
||||
td.logger.error = log.error;
|
||||
td.logger.verbose = () => { /***/ };
|
||||
td.logger.log = log.info;
|
||||
}
|
||||
log.info('Generate TypeDocs:', entryPoint, 'outDir:', [tsconfig.typedocOptions.out]);
|
||||
const project = td.convert();
|
||||
if (!project) log.warn('TypeDoc: convert returned empty project');
|
||||
if (td.logger.hasErrors() || td.logger.hasWarnings()) log.warn('TypeDoc:', 'errors:', td.logger.errorCount, 'warnings:', td.logger.warningCount);
|
||||
const result = project ? await td.generateDocs(project, 'typedoc') : null;
|
||||
if (result) log.warn('TypeDoc:', result);
|
||||
}
|
||||
|
||||
async function lint() {
|
||||
log.info('Running Linter:', lintLocations);
|
||||
if (!eslint) {
|
||||
eslint = new ESLint();
|
||||
}
|
||||
const results = await eslint.lintFiles(lintLocations);
|
||||
const errors = results.reduce((prev, curr) => prev += curr.errorCount, 0);
|
||||
const warnings = results.reduce((prev, curr) => prev += curr.warningCount, 0);
|
||||
log.info('Linter complete: files:', results.length, 'errors:', errors, 'warnings:', warnings);
|
||||
if (errors > 0 || warnings > 0) {
|
||||
const formatter = await eslint.loadFormatter('stylish');
|
||||
const text = formatter.format(results);
|
||||
log.warn(text);
|
||||
}
|
||||
}
|
||||
|
||||
// rebuild on file change
|
||||
async function build(f, msg, dev = false) {
|
||||
|
@ -290,10 +224,10 @@ async function build(f, msg, dev = false) {
|
|||
}
|
||||
}
|
||||
if (!dev) { // only for prod builds, skipped for dev build
|
||||
await lint(); // run linter
|
||||
await typings(targets.browserBundle.esm.entryPoints); // generate typings
|
||||
await lint.run(config.lintLocations); // run linter
|
||||
await typings.run(targets.browserBundle.esm.entryPoints); // generate typings
|
||||
await changelog.update(config.changelog); // generate changelog
|
||||
await typedoc(targets.browserBundle.esm.entryPoints); // generate typedoc
|
||||
await typedoc.run(targets.browserBundle.esm.entryPoints); // generate typedoc
|
||||
}
|
||||
if (require.main === module) process.exit(0);
|
||||
} catch (err) {
|
||||
|
@ -305,11 +239,11 @@ async function build(f, msg, dev = false) {
|
|||
}
|
||||
|
||||
if (require.main === module) {
|
||||
logFile = path.join(__dirname, logFile);
|
||||
if (fs.existsSync(logFile)) fs.unlinkSync(logFile);
|
||||
log.logFile(logFile);
|
||||
config.buildLog = path.join(__dirname, config.buildLog);
|
||||
if (fs.existsSync(config.buildLog)) fs.unlinkSync(config.buildLog);
|
||||
log.logFile(config.buildLog);
|
||||
log.header();
|
||||
log.info(`Toolchain: tfjs: ${tfjs.version} esbuild ${esbuild.version}; typescript ${ts.version}; typedoc: ${TypeDoc.Application.VERSION} eslint: ${ESLint.version}`);
|
||||
log.info(`Toolchain: tfjs: ${tfjs.version} esbuild ${esbuild.version}; typescript ${typings.version}; typedoc: ${typedoc.version} eslint: ${lint.version}`);
|
||||
build('all', 'startup');
|
||||
} else {
|
||||
exports.build = build;
|
||||
|
|
|
@ -1,21 +1,21 @@
|
|||
2021-06-05 16:11:51 [36mINFO: [39m @vladmandic/human version 2.0.0
|
||||
2021-06-05 16:11:51 [36mINFO: [39m User: vlado Platform: linux Arch: x64 Node: v16.0.0
|
||||
2021-06-05 16:11:51 [36mINFO: [39m Toolchain: tfjs: 3.7.0 esbuild 0.12.6; typescript 4.2.4; typedoc: 0.20.36 eslint: 7.27.0
|
||||
2021-06-05 16:11:51 [36mINFO: [39m Build: file startup all type: production config: {"minifyWhitespace":true,"minifyIdentifiers":true,"minifySyntax":true}
|
||||
2021-06-05 16:11:51 [35mSTATE:[39m target: node type: tfjs: {"imports":1,"importBytes":102,"outputBytes":1298,"outputFiles":"dist/tfjs.esm.js"}
|
||||
2021-06-05 16:11:51 [35mSTATE:[39m target: node type: node: {"imports":41,"importBytes":430197,"outputBytes":376126,"outputFiles":"dist/human.node.js"}
|
||||
2021-06-05 16:11:51 [35mSTATE:[39m target: nodeGPU type: tfjs: {"imports":1,"importBytes":110,"outputBytes":1306,"outputFiles":"dist/tfjs.esm.js"}
|
||||
2021-06-05 16:11:51 [35mSTATE:[39m target: nodeGPU type: node: {"imports":41,"importBytes":430205,"outputBytes":376130,"outputFiles":"dist/human.node-gpu.js"}
|
||||
2021-06-05 16:11:51 [35mSTATE:[39m target: nodeWASM type: tfjs: {"imports":1,"importBytes":149,"outputBytes":1373,"outputFiles":"dist/tfjs.esm.js"}
|
||||
2021-06-05 16:11:51 [35mSTATE:[39m target: nodeWASM type: node: {"imports":41,"importBytes":430272,"outputBytes":376202,"outputFiles":"dist/human.node-wasm.js"}
|
||||
2021-06-05 16:11:51 [35mSTATE:[39m target: browserNoBundle type: tfjs: {"imports":1,"importBytes":2478,"outputBytes":1400,"outputFiles":"dist/tfjs.esm.js"}
|
||||
2021-06-05 16:11:51 [35mSTATE:[39m target: browserNoBundle type: esm: {"imports":41,"importBytes":430299,"outputBytes":247921,"outputFiles":"dist/human.esm-nobundle.js"}
|
||||
2021-06-05 16:11:52 [35mSTATE:[39m target: browserBundle type: tfjs: {"modules":1299,"moduleBytes":4230827,"imports":7,"importBytes":2478,"outputBytes":1140326,"outputFiles":"dist/tfjs.esm.js"}
|
||||
2021-06-05 16:11:52 [35mSTATE:[39m target: browserBundle type: iife: {"imports":41,"importBytes":1569225,"outputBytes":1384109,"outputFiles":"dist/human.js"}
|
||||
2021-06-05 16:11:52 [35mSTATE:[39m target: browserBundle type: esm: {"imports":41,"importBytes":1569225,"outputBytes":1384101,"outputFiles":"dist/human.esm.js"}
|
||||
2021-06-05 16:11:52 [36mINFO: [39m Running Linter: ["server/","demo/","src/","test/"]
|
||||
2021-06-05 16:12:25 [36mINFO: [39m Linter complete: files: 69 errors: 0 warnings: 0
|
||||
2021-06-05 16:12:25 [36mINFO: [39m Generate Typings: ["src/human.ts"] outDir: ["types"]
|
||||
2021-06-05 16:12:44 [36mINFO: [39m Generate ChangeLog: ["/home/vlado/dev/human/CHANGELOG.md"]
|
||||
2021-06-05 16:12:44 [36mINFO: [39m Generate TypeDocs: ["src/human.ts"] outDir: ["typedoc"]
|
||||
2021-06-05 16:13:00 [36mINFO: [39m Documentation generated at /home/vlado/dev/human/typedoc 1
|
||||
2021-06-05 17:46:21 [36mINFO: [39m @vladmandic/human version 2.0.0
|
||||
2021-06-05 17:46:21 [36mINFO: [39m User: vlado Platform: linux Arch: x64 Node: v16.0.0
|
||||
2021-06-05 17:46:21 [36mINFO: [39m Toolchain: tfjs: 3.7.0 esbuild 0.12.6; typescript 4.2.4; typedoc: 0.20.36 eslint: 7.27.0
|
||||
2021-06-05 17:46:21 [36mINFO: [39m Build: file startup all type: production config: {"minifyWhitespace":true,"minifyIdentifiers":true,"minifySyntax":true}
|
||||
2021-06-05 17:46:21 [35mSTATE:[39m target: node type: tfjs: {"imports":1,"importBytes":102,"outputBytes":1307,"outputFiles":"dist/tfjs.esm.js"}
|
||||
2021-06-05 17:46:21 [35mSTATE:[39m target: node type: node: {"imports":41,"importBytes":430438,"outputBytes":376423,"outputFiles":"dist/human.node.js"}
|
||||
2021-06-05 17:46:21 [35mSTATE:[39m target: nodeGPU type: tfjs: {"imports":1,"importBytes":110,"outputBytes":1315,"outputFiles":"dist/tfjs.esm.js"}
|
||||
2021-06-05 17:46:21 [35mSTATE:[39m target: nodeGPU type: node: {"imports":41,"importBytes":430446,"outputBytes":376427,"outputFiles":"dist/human.node-gpu.js"}
|
||||
2021-06-05 17:46:21 [35mSTATE:[39m target: nodeWASM type: tfjs: {"imports":1,"importBytes":149,"outputBytes":1382,"outputFiles":"dist/tfjs.esm.js"}
|
||||
2021-06-05 17:46:21 [35mSTATE:[39m target: nodeWASM type: node: {"imports":41,"importBytes":430513,"outputBytes":376499,"outputFiles":"dist/human.node-wasm.js"}
|
||||
2021-06-05 17:46:21 [35mSTATE:[39m target: browserNoBundle type: tfjs: {"imports":1,"importBytes":2478,"outputBytes":1409,"outputFiles":"dist/tfjs.esm.js"}
|
||||
2021-06-05 17:46:21 [35mSTATE:[39m target: browserNoBundle type: esm: {"imports":41,"importBytes":430540,"outputBytes":247943,"outputFiles":"dist/human.esm-nobundle.js"}
|
||||
2021-06-05 17:46:22 [35mSTATE:[39m target: browserBundle type: tfjs: {"modules":1299,"moduleBytes":4230827,"imports":7,"importBytes":2478,"outputBytes":1140335,"outputFiles":"dist/tfjs.esm.js"}
|
||||
2021-06-05 17:46:22 [35mSTATE:[39m target: browserBundle type: iife: {"imports":41,"importBytes":1569466,"outputBytes":1384133,"outputFiles":"dist/human.js"}
|
||||
2021-06-05 17:46:23 [35mSTATE:[39m target: browserBundle type: esm: {"imports":41,"importBytes":1569466,"outputBytes":1384125,"outputFiles":"dist/human.esm.js"}
|
||||
2021-06-05 17:46:23 [36mINFO: [39m Running Linter: ["server/","demo/","src/","test/"]
|
||||
2021-06-05 17:46:50 [36mINFO: [39m Linter complete: files: 71 errors: 0 warnings: 0
|
||||
2021-06-05 17:46:50 [36mINFO: [39m Generate Typings: ["src/human.ts"] outDir: ["types"]
|
||||
2021-06-05 17:47:08 [36mINFO: [39m Generate ChangeLog: ["/home/vlado/dev/human/CHANGELOG.md"]
|
||||
2021-06-05 17:47:08 [36mINFO: [39m Generate TypeDocs: ["src/human.ts"] outDir: ["typedoc"]
|
||||
2021-06-05 17:47:24 [36mINFO: [39m Documentation generated at /home/vlado/dev/human/typedoc 1
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
const log = require('@vladmandic/pilogger');
|
||||
|
||||
let eslint = null;
|
||||
const { ESLint } = require('eslint');
|
||||
|
||||
const version = ESLint.version;
|
||||
|
||||
async function lint(lintLocations) {
|
||||
log.info('Running Linter:', lintLocations);
|
||||
if (!eslint) eslint = new ESLint();
|
||||
const results = await eslint.lintFiles(lintLocations);
|
||||
const errors = results.reduce((prev, curr) => prev += curr.errorCount, 0);
|
||||
const warnings = results.reduce((prev, curr) => prev += curr.warningCount, 0);
|
||||
log.info('Linter complete: files:', results.length, 'errors:', errors, 'warnings:', warnings);
|
||||
if (errors > 0 || warnings > 0) {
|
||||
const formatter = await eslint.loadFormatter('stylish');
|
||||
const text = formatter.format(results);
|
||||
log.warn(text);
|
||||
}
|
||||
}
|
||||
|
||||
exports.run = lint;
|
||||
exports.version = version;
|
|
@ -1,33 +0,0 @@
|
|||
{
|
||||
"compilerOptions": {
|
||||
"module": "es6",
|
||||
"moduleResolution": "node",
|
||||
"noImplicitAny": true,
|
||||
"sourceMap": true,
|
||||
"removeComments": false,
|
||||
"preserveConstEnums": true,
|
||||
"allowSyntheticDefaultImports": true,
|
||||
"declaration": true,
|
||||
"target": "es2018",
|
||||
"lib": [
|
||||
"es2018",
|
||||
"dom"
|
||||
],
|
||||
"outDir": "./dist",
|
||||
"noUnusedLocals": true,
|
||||
"noImplicitReturns": true,
|
||||
"noImplicitThis": true,
|
||||
"alwaysStrict": true,
|
||||
"noUnusedParameters": false,
|
||||
"pretty": true,
|
||||
"noFallthroughCasesInSwitch": true,
|
||||
"allowUnreachableCode": false,
|
||||
"incremental": true
|
||||
},
|
||||
"include": [
|
||||
"src/"
|
||||
],
|
||||
"exclude": [
|
||||
"node_modules/"
|
||||
]
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
const log = require('@vladmandic/pilogger');
|
||||
const TypeDoc = require('typedoc');
|
||||
const tsconfig = require('../tsconfig.json');
|
||||
|
||||
let td = null;
|
||||
|
||||
const version = TypeDoc.Application.VERSION;
|
||||
|
||||
async function typedoc(entryPoint) {
|
||||
if (!td) {
|
||||
td = new TypeDoc.Application();
|
||||
td.options.addReader(new TypeDoc.TSConfigReader());
|
||||
td.bootstrap({ entryPoints: [entryPoint], theme: 'wiki/theme/' });
|
||||
td.logger.warn = log.warn;
|
||||
td.logger.error = log.error;
|
||||
td.logger.verbose = () => { /***/ };
|
||||
td.logger.log = log.info;
|
||||
}
|
||||
log.info('Generate TypeDocs:', entryPoint, 'outDir:', [tsconfig.typedocOptions.out]);
|
||||
const project = td.convert();
|
||||
if (!project) log.warn('TypeDoc: convert returned empty project');
|
||||
if (td.logger.hasErrors() || td.logger.hasWarnings()) log.warn('TypeDoc:', 'errors:', td.logger.errorCount, 'warnings:', td.logger.warningCount);
|
||||
const result = project ? await td.generateDocs(project, 'typedoc') : null;
|
||||
if (result) log.warn('TypeDoc:', result);
|
||||
}
|
||||
|
||||
exports.run = typedoc;
|
||||
exports.version = version;
|
|
@ -0,0 +1,33 @@
|
|||
const ts = require('typescript');
|
||||
const log = require('@vladmandic/pilogger');
|
||||
const tsconfig = require('../tsconfig.json');
|
||||
|
||||
const version = ts.version;
|
||||
|
||||
async function typings(entryPoint) {
|
||||
log.info('Generate Typings:', entryPoint, 'outDir:', [tsconfig.compilerOptions.outDir]);
|
||||
const tsoptions = { ...tsconfig.compilerOptions,
|
||||
target: ts.ScriptTarget.ES2018,
|
||||
module: ts.ModuleKind.ES2020,
|
||||
moduleResolution: ts.ModuleResolutionKind.NodeJs,
|
||||
};
|
||||
const compilerHost = ts.createCompilerHost(tsoptions);
|
||||
const program = ts.createProgram(entryPoint, tsoptions, compilerHost);
|
||||
const emit = program.emit();
|
||||
const diag = ts
|
||||
.getPreEmitDiagnostics(program)
|
||||
.concat(emit.diagnostics);
|
||||
for (const info of diag) {
|
||||
const msg = info.messageText['messageText'] || info.messageText;
|
||||
if (msg.includes('package.json')) continue;
|
||||
if (info.file) {
|
||||
const pos = info.file.getLineAndCharacterOfPosition(info.start || 0);
|
||||
log.error(`TSC: ${info.file.fileName} [${pos.line + 1},${pos.character + 1}]:`, msg);
|
||||
} else {
|
||||
log.error('TSC:', msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
exports.run = typings;
|
||||
exports.version = version;
|
|
@ -198,7 +198,10 @@ export interface Config {
|
|||
},
|
||||
|
||||
/** Controlls and configures all body segmentation module
|
||||
* if segmentation is enabled, output result.canvas will be augmented with masked image containing only person output
|
||||
* removes background from input containing person
|
||||
* if segmentation is enabled it will run as preprocessing task before any other model
|
||||
* alternatively leave it disabled and use it on-demand using human.segmentation method which can
|
||||
* remove background or replace it with user-provided background
|
||||
*
|
||||
* - enabled: true/false
|
||||
* - modelPath: object detection model, can be absolute path or relative to modelBasePath
|
||||
|
@ -351,9 +354,11 @@ const config: Config = {
|
|||
},
|
||||
|
||||
segmentation: {
|
||||
enabled: false, // if segmentation is enabled, output result.canvas will be augmented
|
||||
// with masked image containing only person output
|
||||
// segmentation is not triggered as part of detection and requires separate call to human.segmentation
|
||||
enabled: false, // controlls and configures all body segmentation module
|
||||
// removes background from input containing person
|
||||
// if segmentation is enabled it will run as preprocessing task before any other model
|
||||
// alternatively leave it disabled and use it on-demand using human.segmentation method which can
|
||||
// remove background or replace it with user-provided background
|
||||
modelPath: 'selfie.json', // experimental: object detection model, can be absolute path or relative to modelBasePath
|
||||
// can be 'selfie' or 'meet'
|
||||
},
|
||||
|
|
|
@ -27,7 +27,7 @@ export function similarity(embedding1, embedding2, order = 2): number {
|
|||
if (embedding1?.length !== embedding2?.length) return 0;
|
||||
// general minkowski distance, euclidean distance is limited case where order is 2
|
||||
const distance = embedding1
|
||||
.map((val, i) => (Math.abs(embedding1[i] - embedding2[i]) ** order)) // distance squared
|
||||
.map((_val, i) => (Math.abs(embedding1[i] - embedding2[i]) ** order)) // distance squared
|
||||
.reduce((sum, now) => (sum + now), 0) // sum all distances into total
|
||||
** (1 / order); // get root of total distances
|
||||
const res = Math.max(Math.trunc(1000 * (1 - distance)) / 1000, 0);
|
||||
|
|
|
@ -39,7 +39,7 @@ export function similarity(embedding1: Array<number>, embedding2: Array<number>,
|
|||
if (embedding1?.length !== embedding2?.length) return 0;
|
||||
// general minkowski distance, euclidean distance is limited case where order is 2
|
||||
const distance = 5.0 * embedding1
|
||||
.map((val, i) => (Math.abs(embedding1[i] - embedding2[i]) ** order)) // distance squared
|
||||
.map((_val, i) => (Math.abs(embedding1[i] - embedding2[i]) ** order)) // distance squared
|
||||
.reduce((sum, now) => (sum + now), 0) // sum all distances
|
||||
** (1 / order); // get root of
|
||||
const res = Math.max(0, 100 - distance) / 100.0;
|
||||
|
|
38
src/human.ts
38
src/human.ts
|
@ -435,6 +435,7 @@ export class Human {
|
|||
return new Promise(async (resolve) => {
|
||||
this.state = 'config';
|
||||
let timeStamp;
|
||||
let elapsedTime;
|
||||
|
||||
// update configuration
|
||||
this.config = mergeDeep(this.config, userConfig) as Config;
|
||||
|
@ -473,14 +474,31 @@ export class Human {
|
|||
*/
|
||||
|
||||
timeStamp = now();
|
||||
const process = image.process(input, this.config);
|
||||
let process = image.process(input, this.config);
|
||||
this.performance.image = Math.trunc(now() - timeStamp);
|
||||
this.analyze('Get Image:');
|
||||
|
||||
// run segmentation preprocessing
|
||||
if (this.config.segmentation.enabled && process && process.tensor) {
|
||||
this.analyze('Start Segmentation:');
|
||||
this.state = 'run:segmentation';
|
||||
timeStamp = now();
|
||||
await segmentation.predict(process);
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0) this.performance.segmentation = elapsedTime;
|
||||
if (process.canvas) {
|
||||
// replace input
|
||||
process.tensor.dispose();
|
||||
process = image.process(process.canvas, this.config);
|
||||
}
|
||||
this.analyze('End Segmentation:');
|
||||
}
|
||||
|
||||
if (!process || !process.tensor) {
|
||||
log('could not convert input to tensor');
|
||||
resolve({ error: 'could not convert input to tensor' });
|
||||
return;
|
||||
}
|
||||
this.performance.image = Math.trunc(now() - timeStamp);
|
||||
this.analyze('Get Image:');
|
||||
|
||||
timeStamp = now();
|
||||
this.config.skipFrame = await this.#skipFrame(process.tensor);
|
||||
|
@ -497,7 +515,6 @@ export class Human {
|
|||
let bodyRes;
|
||||
let handRes;
|
||||
let objectRes;
|
||||
let elapsedTime;
|
||||
|
||||
// run face detection followed by all models that rely on face bounding box: face mesh, age, gender, emotion
|
||||
if (this.config.async) {
|
||||
|
@ -573,19 +590,6 @@ export class Human {
|
|||
else if (this.performance.gesture) delete this.performance.gesture;
|
||||
}
|
||||
|
||||
// run segmentation
|
||||
/* not triggered as part of detect
|
||||
if (this.config.segmentation.enabled) {
|
||||
this.analyze('Start Segmentation:');
|
||||
this.state = 'run:segmentation';
|
||||
timeStamp = now();
|
||||
await segmentation.predict(process, this.config);
|
||||
elapsedTime = Math.trunc(now() - timeStamp);
|
||||
if (elapsedTime > 0) this.performance.segmentation = elapsedTime;
|
||||
this.analyze('End Segmentation:');
|
||||
}
|
||||
*/
|
||||
|
||||
this.performance.total = Math.trunc(now() - timeStart);
|
||||
this.state = 'idle';
|
||||
this.result = {
|
||||
|
|
|
@ -96,7 +96,7 @@ async function process(res, inputSize, outputShape, config) {
|
|||
|
||||
// filter & sort results
|
||||
results = results
|
||||
.filter((a, idx) => nmsIdx.includes(idx))
|
||||
.filter((_val, idx) => nmsIdx.includes(idx))
|
||||
.sort((a, b) => (b.score - a.score));
|
||||
|
||||
return results;
|
||||
|
|
|
@ -1,29 +0,0 @@
|
|||
import * as tf from '../../dist/tfjs.esm.js';
|
||||
import { Tensor } from '../tfjs/types';
|
||||
|
||||
function get1dGaussianKernel(sigma: number, size: number): Tensor {
|
||||
// Generate a 1d gaussian distribution size numbers long
|
||||
const range = tf.range(Math.floor(-size / 2) + 1, Math.floor(size / 2) + 1);
|
||||
const distribution = tf.pow(tf.exp(range.div(-2.0 * (sigma * sigma))), 2);
|
||||
const normalized = distribution.div(tf.sum(distribution)) as Tensor;
|
||||
return normalized;
|
||||
}
|
||||
|
||||
function get2dGaussianKernel(size: number, sigma?: number): Tensor {
|
||||
// This default is to mimic opencv2.
|
||||
sigma = sigma === undefined ? 0.3 * ((size - 1) * 0.5 - 1) + 0.8 : sigma;
|
||||
const kerne1d = get1dGaussianKernel(sigma, size);
|
||||
return tf.outerProduct(kerne1d, kerne1d);
|
||||
}
|
||||
|
||||
export function getGaussianKernel(size = 5, channels = 1, sigma?: number): Tensor {
|
||||
return tf.tidy(() => {
|
||||
const kerne2d = get2dGaussianKernel(size, sigma);
|
||||
const kerne3d = channels === 3 ? tf.stack([kerne2d, kerne2d, kerne2d]) : kerne2d;
|
||||
return tf.reshape(kerne3d, [size, size, channels, 1]);
|
||||
});
|
||||
}
|
||||
|
||||
export function blur(image: Tensor, kernel: Tensor, pad: number | 'valid' | 'same' = 'same'): Tensor {
|
||||
return tf.tidy(() => tf.depthwiseConv2d(image, kernel, 1, pad));
|
||||
}
|
|
@ -7,13 +7,11 @@ import * as tf from '../../dist/tfjs.esm.js';
|
|||
import * as image from '../image/image';
|
||||
import { GraphModel, Tensor } from '../tfjs/types';
|
||||
import { Config } from '../config';
|
||||
// import * as blur from './blur';
|
||||
|
||||
type Input = Tensor | typeof Image | ImageData | ImageBitmap | HTMLImageElement | HTMLMediaElement | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas;
|
||||
|
||||
let model: GraphModel;
|
||||
let busy = false;
|
||||
// let blurKernel;
|
||||
|
||||
export async function load(config: Config): Promise<GraphModel> {
|
||||
if (!model) {
|
||||
|
@ -22,12 +20,13 @@ export async function load(config: Config): Promise<GraphModel> {
|
|||
if (!model || !model['modelUrl']) log('load model failed:', config.segmentation.modelPath);
|
||||
else if (config.debug) log('load model:', model['modelUrl']);
|
||||
} else if (config.debug) log('cached model:', model['modelUrl']);
|
||||
// if (!blurKernel) blurKernel = blur.getGaussianKernel(5, 1, 1);
|
||||
return model;
|
||||
}
|
||||
|
||||
export async function predict(input: { tensor: Tensor | null, canvas: OffscreenCanvas | HTMLCanvasElement }, config: Config): Promise<Uint8ClampedArray | null> {
|
||||
if (!config.segmentation.enabled || !input.tensor || !input.canvas) return null;
|
||||
export async function predict(input: { tensor: Tensor | null, canvas: OffscreenCanvas | HTMLCanvasElement }): Promise<Uint8ClampedArray | null> {
|
||||
const width = input.tensor?.shape[1] || 0;
|
||||
const height = input.tensor?.shape[2] || 0;
|
||||
if (!input.tensor) return null;
|
||||
if (!model || !model.inputs[0].shape) return null;
|
||||
const resizeInput = tf.image.resizeBilinear(input.tensor, [model.inputs[0].shape[1], model.inputs[0].shape[2]], false);
|
||||
const norm = resizeInput.div(255);
|
||||
|
@ -37,10 +36,6 @@ export async function predict(input: { tensor: Tensor | null, canvas: OffscreenC
|
|||
tf.dispose(resizeInput);
|
||||
tf.dispose(norm);
|
||||
|
||||
const overlay = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(input.canvas.width, input.canvas.height) : document.createElement('canvas');
|
||||
overlay.width = input.canvas.width;
|
||||
overlay.height = input.canvas.height;
|
||||
|
||||
const squeeze = tf.squeeze(res, 0);
|
||||
let resizeOutput;
|
||||
if (squeeze.shape[2] === 2) {
|
||||
|
@ -53,7 +48,7 @@ export async function predict(input: { tensor: Tensor | null, canvas: OffscreenC
|
|||
tf.dispose(bg);
|
||||
tf.dispose(fg);
|
||||
// running sofmax before unstack creates 2x2 matrix so we only take upper-left quadrant
|
||||
const crop = tf.image.cropAndResize(pad, [[0, 0, 0.5, 0.5]], [0], [input.tensor?.shape[1], input.tensor?.shape[2]]);
|
||||
const crop = tf.image.cropAndResize(pad, [[0, 0, 0.5, 0.5]], [0], [width, height]);
|
||||
// otherwise run softmax after unstack and use standard resize
|
||||
// resizeOutput = tf.image.resizeBilinear(expand, [input.tensor?.shape[1], input.tensor?.shape[2]]);
|
||||
resizeOutput = crop.squeeze(0);
|
||||
|
@ -61,29 +56,34 @@ export async function predict(input: { tensor: Tensor | null, canvas: OffscreenC
|
|||
tf.dispose(expand);
|
||||
tf.dispose(pad);
|
||||
} else { // model selfie has a single channel that we can use directly
|
||||
resizeOutput = tf.image.resizeBilinear(squeeze, [input.tensor?.shape[1], input.tensor?.shape[2]]);
|
||||
resizeOutput = tf.image.resizeBilinear(squeeze, [width, height]);
|
||||
}
|
||||
|
||||
if (typeof document === 'undefined') return resizeOutput.dataSync(); // we're running in nodejs so return alpha array as-is
|
||||
|
||||
const overlay = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(width, height) : document.createElement('canvas');
|
||||
overlay.width = width;
|
||||
overlay.height = height;
|
||||
if (tf.browser) await tf.browser.toPixels(resizeOutput, overlay);
|
||||
tf.dispose(resizeOutput);
|
||||
tf.dispose(squeeze);
|
||||
tf.dispose(res);
|
||||
|
||||
// get alpha channel data
|
||||
const alphaCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(input.canvas.width, input.canvas.height) : document.createElement('canvas'); // need one more copy since input may already have gl context so 2d context fails
|
||||
alphaCanvas.width = input.canvas.width;
|
||||
alphaCanvas.height = input.canvas.height;
|
||||
const alphaCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(width, height) : document.createElement('canvas'); // need one more copy since input may already have gl context so 2d context fails
|
||||
alphaCanvas.width = width;
|
||||
alphaCanvas.height = height;
|
||||
const ctxAlpha = alphaCanvas.getContext('2d') as CanvasRenderingContext2D;
|
||||
ctxAlpha.filter = 'blur(8px';
|
||||
await ctxAlpha.drawImage(overlay, 0, 0);
|
||||
const alpha = ctxAlpha.getImageData(0, 0, input.canvas.width, input.canvas.height).data;
|
||||
const alpha = ctxAlpha.getImageData(0, 0, width, height).data;
|
||||
|
||||
// get original canvas merged with overlay
|
||||
const original = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(input.canvas.width, input.canvas.height) : document.createElement('canvas'); // need one more copy since input may already have gl context so 2d context fails
|
||||
original.width = input.canvas.width;
|
||||
original.height = input.canvas.height;
|
||||
const original = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(width, height) : document.createElement('canvas'); // need one more copy since input may already have gl context so 2d context fails
|
||||
original.width = width;
|
||||
original.height = height;
|
||||
const ctx = original.getContext('2d') as CanvasRenderingContext2D;
|
||||
await ctx.drawImage(input.canvas, 0, 0);
|
||||
if (input.canvas) await ctx.drawImage(input.canvas, 0, 0);
|
||||
// https://developer.mozilla.org/en-US/docs/Web/API/CanvasRenderingContext2D/globalCompositeOperation // best options are: darken, color-burn, multiply
|
||||
ctx.globalCompositeOperation = 'darken';
|
||||
ctx.filter = 'blur(8px)'; // use css filter for bluring, can be done with gaussian blur manually instead
|
||||
|
@ -99,10 +99,9 @@ export async function predict(input: { tensor: Tensor | null, canvas: OffscreenC
|
|||
export async function process(input: Input, background: Input | undefined, config: Config): Promise<HTMLCanvasElement | OffscreenCanvas | null> {
|
||||
if (busy) return null;
|
||||
busy = true;
|
||||
if (!config.segmentation.enabled) config.segmentation.enabled = true; // override config
|
||||
if (!model) await load(config);
|
||||
const img = image.process(input, config);
|
||||
const alpha = await predict(img, config);
|
||||
const alpha = await predict(img);
|
||||
tf.dispose(img.tensor);
|
||||
|
||||
if (background && alpha) {
|
||||
|
|
|
@ -6,9 +6,6 @@ const config = {
|
|||
backend: 'tensorflow',
|
||||
debug: false,
|
||||
async: false,
|
||||
filter: {
|
||||
enabled: true,
|
||||
},
|
||||
face: {
|
||||
enabled: true,
|
||||
detector: { enabled: true, rotation: true },
|
||||
|
@ -20,6 +17,8 @@ const config = {
|
|||
hand: { enabled: true },
|
||||
body: { enabled: true },
|
||||
object: { enabled: true },
|
||||
segmentation: { enabled: true },
|
||||
filter: { enabled: false },
|
||||
};
|
||||
|
||||
test(Human, config);
|
||||
|
|
|
@ -8,9 +8,6 @@ const config = {
|
|||
// wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.6.0/dist/',
|
||||
debug: false,
|
||||
async: false,
|
||||
filter: {
|
||||
enabled: true,
|
||||
},
|
||||
face: {
|
||||
enabled: true,
|
||||
detector: { enabled: true, rotation: true },
|
||||
|
@ -22,6 +19,8 @@ const config = {
|
|||
hand: { enabled: true },
|
||||
body: { enabled: true },
|
||||
object: { enabled: false },
|
||||
segmentation: { enabled: true },
|
||||
filter: { enabled: false },
|
||||
};
|
||||
|
||||
test(Human, config);
|
||||
|
|
|
@ -6,9 +6,6 @@ const config = {
|
|||
backend: 'tensorflow',
|
||||
debug: false,
|
||||
async: false,
|
||||
filter: {
|
||||
enabled: true,
|
||||
},
|
||||
face: {
|
||||
enabled: true,
|
||||
detector: { enabled: true, rotation: true },
|
||||
|
@ -20,6 +17,8 @@ const config = {
|
|||
hand: { enabled: true },
|
||||
body: { enabled: true },
|
||||
object: { enabled: true },
|
||||
segmentation: { enabled: true },
|
||||
filter: { enabled: false },
|
||||
};
|
||||
|
||||
test(Human, config);
|
||||
|
|
289
test/test.log
289
test/test.log
|
@ -1,120 +1,169 @@
|
|||
2021-06-05 11:53:54 [36mINFO: [39m @vladmandic/human version 2.0.0
|
||||
2021-06-05 11:53:54 [36mINFO: [39m User: vlado Platform: linux Arch: x64 Node: v16.0.0
|
||||
2021-06-05 11:53:54 [36mINFO: [39m tests: ["test-node.js","test-node-gpu.js","test-node-wasm.js"]
|
||||
2021-06-05 11:53:54 [36mINFO: [39m test-node.js start
|
||||
2021-06-05 11:53:56 [35mSTATE:[39m test-node.js passed: create human
|
||||
2021-06-05 11:53:56 [36mINFO: [39m test-node.js human version: 2.0.0
|
||||
2021-06-05 11:53:56 [36mINFO: [39m test-node.js platform: linux x64 agent: NodeJS v16.0.0
|
||||
2021-06-05 11:53:56 [36mINFO: [39m test-node.js tfjs version: 3.7.0
|
||||
2021-06-05 11:53:56 [35mSTATE:[39m test-node.js passed: set backend: tensorflow
|
||||
2021-06-05 11:53:56 [35mSTATE:[39m test-node.js passed: load models
|
||||
2021-06-05 11:53:56 [35mSTATE:[39m test-node.js result: defined models: 14 loaded models: 6
|
||||
2021-06-05 11:53:56 [35mSTATE:[39m test-node.js passed: warmup: none default
|
||||
2021-06-05 11:53:58 [35mSTATE:[39m test-node.js passed: warmup: face default
|
||||
2021-06-05 11:53:58 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":23.6,"gender":"female"} {"score":0.82,"class":"person"} {"score":0.73,"keypoints":5}
|
||||
2021-06-05 11:53:58 [32mDATA: [39m test-node.js result: performance: load: 430 total: 1782
|
||||
2021-06-05 11:54:00 [35mSTATE:[39m test-node.js passed: warmup: body default
|
||||
2021-06-05 11:54:00 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.93,"keypoints":17}
|
||||
2021-06-05 11:54:00 [32mDATA: [39m test-node.js result: performance: load: 430 total: 1700
|
||||
2021-06-05 11:54:00 [36mINFO: [39m test-node.js test body variants
|
||||
2021-06-05 11:54:01 [35mSTATE:[39m test-node.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-06-05 11:54:02 [35mSTATE:[39m test-node.js passed: detect: samples/ai-body.jpg posenet
|
||||
2021-06-05 11:54:02 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.96,"keypoints":16}
|
||||
2021-06-05 11:54:02 [32mDATA: [39m test-node.js result: performance: load: 430 total: 1021
|
||||
2021-06-05 11:54:03 [35mSTATE:[39m test-node.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-06-05 11:54:03 [35mSTATE:[39m test-node.js passed: detect: samples/ai-body.jpg movenet
|
||||
2021-06-05 11:54:03 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.93,"keypoints":17}
|
||||
2021-06-05 11:54:03 [32mDATA: [39m test-node.js result: performance: load: 430 total: 217
|
||||
2021-06-05 11:54:03 [35mSTATE:[39m test-node.js passed: detect: random default
|
||||
2021-06-05 11:54:03 [32mDATA: [39m test-node.js result: face: 0 body: 1 hand: 0 gesture: 0 object: 1 person: 0 {} {"score":0.72,"class":"person"} {"score":0,"keypoints":0}
|
||||
2021-06-05 11:54:03 [32mDATA: [39m test-node.js result: performance: load: 430 total: 186
|
||||
2021-06-05 11:54:03 [36mINFO: [39m test-node.js test: first instance
|
||||
2021-06-05 11:54:04 [35mSTATE:[39m test-node.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-06-05 11:54:04 [35mSTATE:[39m test-node.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-06-05 11:54:04 [32mDATA: [39m test-node.js result: face: 0 body: 1 hand: 0 gesture: 1 object: 1 person: 0 {} {"score":0.72,"class":"person"} {"score":0.78,"keypoints":7}
|
||||
2021-06-05 11:54:04 [32mDATA: [39m test-node.js result: performance: load: 430 total: 113
|
||||
2021-06-05 11:54:04 [36mINFO: [39m test-node.js test: second instance
|
||||
2021-06-05 11:54:04 [35mSTATE:[39m test-node.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-06-05 11:54:05 [35mSTATE:[39m test-node.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-06-05 11:54:05 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.71,"class":"person"} {"score":0.78,"keypoints":7}
|
||||
2021-06-05 11:54:05 [32mDATA: [39m test-node.js result: performance: load: 2 total: 1319
|
||||
2021-06-05 11:54:05 [36mINFO: [39m test-node.js test: concurrent
|
||||
2021-06-05 11:54:06 [35mSTATE:[39m test-node.js passed: load image: samples/ai-face.jpg [1,256,256,3]
|
||||
2021-06-05 11:54:06 [35mSTATE:[39m test-node.js passed: load image: samples/ai-face.jpg [1,256,256,3]
|
||||
2021-06-05 11:54:07 [35mSTATE:[39m test-node.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-06-05 11:54:08 [35mSTATE:[39m test-node.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-06-05 11:54:13 [35mSTATE:[39m test-node.js passed: detect: samples/ai-face.jpg default
|
||||
2021-06-05 11:54:13 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 4 object: 1 person: 1 {"age":23.6,"gender":"female"} {"score":0.82,"class":"person"} {"score":0.73,"keypoints":17}
|
||||
2021-06-05 11:54:13 [32mDATA: [39m test-node.js result: performance: load: 430 total: 5500
|
||||
2021-06-05 11:54:13 [35mSTATE:[39m test-node.js passed: detect: samples/ai-face.jpg default
|
||||
2021-06-05 11:54:13 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 4 object: 1 person: 1 {"age":23.6,"gender":"female"} {"score":0.82,"class":"person"} {"score":0.73,"keypoints":17}
|
||||
2021-06-05 11:54:13 [32mDATA: [39m test-node.js result: performance: load: 2 total: 5500
|
||||
2021-06-05 11:54:13 [35mSTATE:[39m test-node.js passed: detect: samples/ai-body.jpg default
|
||||
2021-06-05 11:54:13 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":28.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.93,"keypoints":17}
|
||||
2021-06-05 11:54:13 [32mDATA: [39m test-node.js result: performance: load: 430 total: 5500
|
||||
2021-06-05 11:54:13 [35mSTATE:[39m test-node.js passed: detect: samples/ai-body.jpg default
|
||||
2021-06-05 11:54:13 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":28.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.93,"keypoints":17}
|
||||
2021-06-05 11:54:13 [32mDATA: [39m test-node.js result: performance: load: 2 total: 5500
|
||||
2021-06-05 11:54:13 [36mINFO: [39m test-node.js test complete: 17471 ms
|
||||
2021-06-05 11:54:13 [36mINFO: [39m test-node-gpu.js start
|
||||
2021-06-05 11:54:14 [33mWARN: [39m test-node-gpu.js stderr: 2021-06-05 11:54:14.837663: W tensorflow/stream_executor/platform/default/dso_loader.cc:60] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory
|
||||
2021-06-05 11:54:15 [33mWARN: [39m test-node-gpu.js stderr: 2021-06-05 11:54:15.036299: W tensorflow/stream_executor/platform/default/dso_loader.cc:60] Could not load dynamic library 'libcuda.so.1'; dlerror: libcuda.so.1: cannot open shared object file: No such file or directory
|
||||
2021-06-05 11:54:15 [33mWARN: [39m test-node-gpu.js stderr: 2021-06-05 11:54:15.036330: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:156] kernel driver does not appear to be running on this host (wyse): /proc/driver/nvidia/version does not exist
|
||||
2021-06-05 11:54:15 [35mSTATE:[39m test-node-gpu.js passed: create human
|
||||
2021-06-05 11:54:15 [36mINFO: [39m test-node-gpu.js human version: 2.0.0
|
||||
2021-06-05 11:54:15 [36mINFO: [39m test-node-gpu.js platform: linux x64 agent: NodeJS v16.0.0
|
||||
2021-06-05 11:54:15 [36mINFO: [39m test-node-gpu.js tfjs version: 3.7.0
|
||||
2021-06-05 11:54:15 [35mSTATE:[39m test-node-gpu.js passed: set backend: tensorflow
|
||||
2021-06-05 11:54:15 [35mSTATE:[39m test-node-gpu.js passed: load models
|
||||
2021-06-05 11:54:15 [35mSTATE:[39m test-node-gpu.js result: defined models: 14 loaded models: 6
|
||||
2021-06-05 11:54:15 [35mSTATE:[39m test-node-gpu.js passed: warmup: none default
|
||||
2021-06-05 11:54:17 [35mSTATE:[39m test-node-gpu.js passed: warmup: face default
|
||||
2021-06-05 11:54:17 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":23.6,"gender":"female"} {"score":0.82,"class":"person"} {"score":0.73,"keypoints":5}
|
||||
2021-06-05 11:54:17 [32mDATA: [39m test-node-gpu.js result: performance: load: 334 total: 1776
|
||||
2021-06-05 11:54:18 [35mSTATE:[39m test-node-gpu.js passed: warmup: body default
|
||||
2021-06-05 11:54:18 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.93,"keypoints":17}
|
||||
2021-06-05 11:54:18 [32mDATA: [39m test-node-gpu.js result: performance: load: 334 total: 1596
|
||||
2021-06-05 11:54:18 [36mINFO: [39m test-node-gpu.js test body variants
|
||||
2021-06-05 11:54:19 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-06-05 11:54:21 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-body.jpg posenet
|
||||
2021-06-05 11:54:21 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.96,"keypoints":16}
|
||||
2021-06-05 11:54:21 [32mDATA: [39m test-node-gpu.js result: performance: load: 334 total: 1179
|
||||
2021-06-05 11:54:21 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-06-05 11:54:22 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-body.jpg movenet
|
||||
2021-06-05 11:54:22 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.93,"keypoints":17}
|
||||
2021-06-05 11:54:22 [32mDATA: [39m test-node-gpu.js result: performance: load: 334 total: 196
|
||||
2021-06-05 11:54:23 [35mSTATE:[39m test-node-gpu.js passed: detect: random default
|
||||
2021-06-05 11:54:23 [32mDATA: [39m test-node-gpu.js result: face: 0 body: 1 hand: 0 gesture: 0 object: 0 person: 0 {} {} {"score":0,"keypoints":0}
|
||||
2021-06-05 11:54:23 [32mDATA: [39m test-node-gpu.js result: performance: load: 334 total: 908
|
||||
2021-06-05 11:54:23 [36mINFO: [39m test-node-gpu.js test: first instance
|
||||
2021-06-05 11:54:23 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-06-05 11:54:24 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-06-05 11:54:24 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.71,"class":"person"} {"score":0.78,"keypoints":7}
|
||||
2021-06-05 11:54:24 [32mDATA: [39m test-node-gpu.js result: performance: load: 334 total: 1275
|
||||
2021-06-05 11:54:24 [36mINFO: [39m test-node-gpu.js test: second instance
|
||||
2021-06-05 11:54:25 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-06-05 11:54:26 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-06-05 11:54:26 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.71,"class":"person"} {"score":0.78,"keypoints":7}
|
||||
2021-06-05 11:54:26 [32mDATA: [39m test-node-gpu.js result: performance: load: 5 total: 1202
|
||||
2021-06-05 11:54:26 [36mINFO: [39m test-node-gpu.js test: concurrent
|
||||
2021-06-05 11:54:26 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-face.jpg [1,256,256,3]
|
||||
2021-06-05 11:54:26 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-face.jpg [1,256,256,3]
|
||||
2021-06-05 11:54:27 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-06-05 11:54:28 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-06-05 11:54:34 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-face.jpg default
|
||||
2021-06-05 11:54:34 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 4 object: 1 person: 1 {"age":23.6,"gender":"female"} {"score":0.82,"class":"person"} {"score":0.73,"keypoints":17}
|
||||
2021-06-05 11:54:34 [32mDATA: [39m test-node-gpu.js result: performance: load: 334 total: 5393
|
||||
2021-06-05 11:54:34 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-face.jpg default
|
||||
2021-06-05 11:54:34 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 4 object: 1 person: 1 {"age":23.6,"gender":"female"} {"score":0.82,"class":"person"} {"score":0.73,"keypoints":17}
|
||||
2021-06-05 11:54:34 [32mDATA: [39m test-node-gpu.js result: performance: load: 5 total: 5393
|
||||
2021-06-05 11:54:34 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-body.jpg default
|
||||
2021-06-05 11:54:34 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":28.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.93,"keypoints":17}
|
||||
2021-06-05 11:54:34 [32mDATA: [39m test-node-gpu.js result: performance: load: 334 total: 5393
|
||||
2021-06-05 11:54:34 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-body.jpg default
|
||||
2021-06-05 11:54:34 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":28.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.93,"keypoints":17}
|
||||
2021-06-05 11:54:34 [32mDATA: [39m test-node-gpu.js result: performance: load: 5 total: 5393
|
||||
2021-06-05 11:54:34 [36mINFO: [39m test-node-gpu.js test complete: 18953 ms
|
||||
2021-06-05 11:54:34 [36mINFO: [39m test-node-wasm.js start
|
||||
2021-06-05 11:54:34 [31mERROR:[39m test-node-wasm.js failed: model server: request to http://localhost:10030/models/ failed, reason: connect ECONNREFUSED 127.0.0.1:10030
|
||||
2021-06-05 11:54:34 [31mERROR:[39m test-node-wasm.js aborting test
|
||||
2021-06-05 11:54:34 [36mINFO: [39m status: {"passed":46,"failed":1}
|
||||
2021-06-05 17:49:20 [36mINFO: [39m @vladmandic/human version 2.0.0
|
||||
2021-06-05 17:49:20 [36mINFO: [39m User: vlado Platform: linux Arch: x64 Node: v16.0.0
|
||||
2021-06-05 17:49:20 [36mINFO: [39m tests: ["test-node.js","test-node-gpu.js","test-node-wasm.js"]
|
||||
2021-06-05 17:49:20 [36mINFO: [39m test-node.js start
|
||||
2021-06-05 17:49:21 [35mSTATE:[39m test-node.js passed: create human
|
||||
2021-06-05 17:49:21 [36mINFO: [39m test-node.js human version: 2.0.0
|
||||
2021-06-05 17:49:21 [36mINFO: [39m test-node.js platform: linux x64 agent: NodeJS v16.0.0
|
||||
2021-06-05 17:49:21 [36mINFO: [39m test-node.js tfjs version: 3.7.0
|
||||
2021-06-05 17:49:21 [35mSTATE:[39m test-node.js passed: set backend: tensorflow
|
||||
2021-06-05 17:49:21 [35mSTATE:[39m test-node.js passed: load models
|
||||
2021-06-05 17:49:21 [35mSTATE:[39m test-node.js result: defined models: 14 loaded models: 7
|
||||
2021-06-05 17:49:21 [35mSTATE:[39m test-node.js passed: warmup: none default
|
||||
2021-06-05 17:49:23 [35mSTATE:[39m test-node.js passed: warmup: face default
|
||||
2021-06-05 17:49:23 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":23.6,"gender":"female"} {"score":0.82,"class":"person"} {"score":0.73,"keypoints":5}
|
||||
2021-06-05 17:49:23 [32mDATA: [39m test-node.js result: performance: load: 364 total: 1627
|
||||
2021-06-05 17:49:24 [35mSTATE:[39m test-node.js passed: warmup: body default
|
||||
2021-06-05 17:49:24 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.93,"keypoints":17}
|
||||
2021-06-05 17:49:24 [32mDATA: [39m test-node.js result: performance: load: 364 total: 1631
|
||||
2021-06-05 17:49:24 [36mINFO: [39m test-node.js test body variants
|
||||
2021-06-05 17:49:25 [35mSTATE:[39m test-node.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-06-05 17:49:27 [35mSTATE:[39m test-node.js passed: detect: samples/ai-body.jpg posenet
|
||||
2021-06-05 17:49:27 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.96,"keypoints":16}
|
||||
2021-06-05 17:49:27 [32mDATA: [39m test-node.js result: performance: load: 364 total: 1172
|
||||
2021-06-05 17:49:27 [35mSTATE:[39m test-node.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-06-05 17:49:28 [35mSTATE:[39m test-node.js passed: detect: samples/ai-body.jpg movenet
|
||||
2021-06-05 17:49:28 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.93,"keypoints":17}
|
||||
2021-06-05 17:49:28 [32mDATA: [39m test-node.js result: performance: load: 364 total: 287
|
||||
2021-06-05 17:49:29 [35mSTATE:[39m test-node.js passed: detect: random default
|
||||
2021-06-05 17:49:29 [32mDATA: [39m test-node.js result: face: 0 body: 1 hand: 0 gesture: 0 object: 0 person: 0 {} {} {"score":0,"keypoints":0}
|
||||
2021-06-05 17:49:29 [32mDATA: [39m test-node.js result: performance: load: 364 total: 874
|
||||
2021-06-05 17:49:29 [36mINFO: [39m test-node.js test: first instance
|
||||
2021-06-05 17:49:29 [35mSTATE:[39m test-node.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-06-05 17:49:30 [35mSTATE:[39m test-node.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-06-05 17:49:30 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.71,"class":"person"} {"score":0.78,"keypoints":7}
|
||||
2021-06-05 17:49:30 [32mDATA: [39m test-node.js result: performance: load: 364 total: 1279
|
||||
2021-06-05 17:49:30 [36mINFO: [39m test-node.js test: second instance
|
||||
2021-06-05 17:49:31 [35mSTATE:[39m test-node.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-06-05 17:49:32 [35mSTATE:[39m test-node.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-06-05 17:49:32 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.71,"class":"person"} {"score":0.78,"keypoints":7}
|
||||
2021-06-05 17:49:32 [32mDATA: [39m test-node.js result: performance: load: 2 total: 1263
|
||||
2021-06-05 17:49:32 [36mINFO: [39m test-node.js test: concurrent
|
||||
2021-06-05 17:49:32 [35mSTATE:[39m test-node.js passed: load image: samples/ai-face.jpg [1,256,256,3]
|
||||
2021-06-05 17:49:32 [35mSTATE:[39m test-node.js passed: load image: samples/ai-face.jpg [1,256,256,3]
|
||||
2021-06-05 17:49:33 [35mSTATE:[39m test-node.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-06-05 17:49:34 [35mSTATE:[39m test-node.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-06-05 17:49:40 [35mSTATE:[39m test-node.js passed: detect: samples/ai-face.jpg default
|
||||
2021-06-05 17:49:40 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 4 object: 1 person: 1 {"age":23.6,"gender":"female"} {"score":0.82,"class":"person"} {"score":0.73,"keypoints":17}
|
||||
2021-06-05 17:49:40 [32mDATA: [39m test-node.js result: performance: load: 364 total: 5621
|
||||
2021-06-05 17:49:40 [35mSTATE:[39m test-node.js passed: detect: samples/ai-face.jpg default
|
||||
2021-06-05 17:49:40 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 4 object: 1 person: 1 {"age":23.6,"gender":"female"} {"score":0.82,"class":"person"} {"score":0.73,"keypoints":17}
|
||||
2021-06-05 17:49:40 [32mDATA: [39m test-node.js result: performance: load: 2 total: 5621
|
||||
2021-06-05 17:49:40 [35mSTATE:[39m test-node.js passed: detect: samples/ai-body.jpg default
|
||||
2021-06-05 17:49:40 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":28.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.93,"keypoints":17}
|
||||
2021-06-05 17:49:40 [32mDATA: [39m test-node.js result: performance: load: 364 total: 5621
|
||||
2021-06-05 17:49:40 [35mSTATE:[39m test-node.js passed: detect: samples/ai-body.jpg default
|
||||
2021-06-05 17:49:40 [32mDATA: [39m test-node.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":28.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.93,"keypoints":17}
|
||||
2021-06-05 17:49:40 [32mDATA: [39m test-node.js result: performance: load: 2 total: 5621
|
||||
2021-06-05 17:49:40 [36mINFO: [39m test-node.js test complete: 19189 ms
|
||||
2021-06-05 17:49:40 [36mINFO: [39m test-node-gpu.js start
|
||||
2021-06-05 17:49:40 [33mWARN: [39m test-node-gpu.js stderr: 2021-06-05 17:49:40.832433: W tensorflow/stream_executor/platform/default/dso_loader.cc:60] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory
|
||||
2021-06-05 17:49:40 [33mWARN: [39m test-node-gpu.js stderr: 2021-06-05 17:49:40.884214: W tensorflow/stream_executor/platform/default/dso_loader.cc:60] Could not load dynamic library 'libcuda.so.1'; dlerror: libcuda.so.1: cannot open shared object file: No such file or directory
|
||||
2021-06-05 17:49:40 [33mWARN: [39m test-node-gpu.js stderr: 2021-06-05 17:49:40.884276: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:156] kernel driver does not appear to be running on this host (wyse): /proc/driver/nvidia/version does not exist
|
||||
2021-06-05 17:49:40 [35mSTATE:[39m test-node-gpu.js passed: create human
|
||||
2021-06-05 17:49:40 [36mINFO: [39m test-node-gpu.js human version: 2.0.0
|
||||
2021-06-05 17:49:40 [36mINFO: [39m test-node-gpu.js platform: linux x64 agent: NodeJS v16.0.0
|
||||
2021-06-05 17:49:40 [36mINFO: [39m test-node-gpu.js tfjs version: 3.7.0
|
||||
2021-06-05 17:49:41 [35mSTATE:[39m test-node-gpu.js passed: set backend: tensorflow
|
||||
2021-06-05 17:49:41 [35mSTATE:[39m test-node-gpu.js passed: load models
|
||||
2021-06-05 17:49:41 [35mSTATE:[39m test-node-gpu.js result: defined models: 14 loaded models: 7
|
||||
2021-06-05 17:49:41 [35mSTATE:[39m test-node-gpu.js passed: warmup: none default
|
||||
2021-06-05 17:49:43 [35mSTATE:[39m test-node-gpu.js passed: warmup: face default
|
||||
2021-06-05 17:49:43 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":23.6,"gender":"female"} {"score":0.82,"class":"person"} {"score":0.73,"keypoints":5}
|
||||
2021-06-05 17:49:43 [32mDATA: [39m test-node-gpu.js result: performance: load: 347 total: 1756
|
||||
2021-06-05 17:49:44 [35mSTATE:[39m test-node-gpu.js passed: warmup: body default
|
||||
2021-06-05 17:49:44 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.93,"keypoints":17}
|
||||
2021-06-05 17:49:44 [32mDATA: [39m test-node-gpu.js result: performance: load: 347 total: 1638
|
||||
2021-06-05 17:49:44 [36mINFO: [39m test-node-gpu.js test body variants
|
||||
2021-06-05 17:49:45 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-06-05 17:49:46 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-body.jpg posenet
|
||||
2021-06-05 17:49:46 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.96,"keypoints":16}
|
||||
2021-06-05 17:49:46 [32mDATA: [39m test-node-gpu.js result: performance: load: 347 total: 1100
|
||||
2021-06-05 17:49:47 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-06-05 17:49:48 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-body.jpg movenet
|
||||
2021-06-05 17:49:48 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.93,"keypoints":17}
|
||||
2021-06-05 17:49:48 [32mDATA: [39m test-node-gpu.js result: performance: load: 347 total: 303
|
||||
2021-06-05 17:49:49 [35mSTATE:[39m test-node-gpu.js passed: detect: random default
|
||||
2021-06-05 17:49:49 [32mDATA: [39m test-node-gpu.js result: face: 0 body: 1 hand: 0 gesture: 0 object: 0 person: 0 {} {} {"score":0,"keypoints":0}
|
||||
2021-06-05 17:49:49 [32mDATA: [39m test-node-gpu.js result: performance: load: 347 total: 873
|
||||
2021-06-05 17:49:49 [36mINFO: [39m test-node-gpu.js test: first instance
|
||||
2021-06-05 17:49:49 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-06-05 17:49:50 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-06-05 17:49:50 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.71,"class":"person"} {"score":0.78,"keypoints":7}
|
||||
2021-06-05 17:49:50 [32mDATA: [39m test-node-gpu.js result: performance: load: 347 total: 1212
|
||||
2021-06-05 17:49:50 [36mINFO: [39m test-node-gpu.js test: second instance
|
||||
2021-06-05 17:49:51 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-06-05 17:49:52 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-06-05 17:49:52 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":29.5,"gender":"female"} {"score":0.71,"class":"person"} {"score":0.78,"keypoints":7}
|
||||
2021-06-05 17:49:52 [32mDATA: [39m test-node-gpu.js result: performance: load: 2 total: 1305
|
||||
2021-06-05 17:49:52 [36mINFO: [39m test-node-gpu.js test: concurrent
|
||||
2021-06-05 17:49:52 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-face.jpg [1,256,256,3]
|
||||
2021-06-05 17:49:52 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-face.jpg [1,256,256,3]
|
||||
2021-06-05 17:49:53 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-06-05 17:49:54 [35mSTATE:[39m test-node-gpu.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-06-05 17:50:00 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-face.jpg default
|
||||
2021-06-05 17:50:00 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 4 object: 1 person: 1 {"age":23.6,"gender":"female"} {"score":0.82,"class":"person"} {"score":0.73,"keypoints":17}
|
||||
2021-06-05 17:50:00 [32mDATA: [39m test-node-gpu.js result: performance: load: 347 total: 5708
|
||||
2021-06-05 17:50:00 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-face.jpg default
|
||||
2021-06-05 17:50:00 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 4 object: 1 person: 1 {"age":23.6,"gender":"female"} {"score":0.82,"class":"person"} {"score":0.73,"keypoints":17}
|
||||
2021-06-05 17:50:00 [32mDATA: [39m test-node-gpu.js result: performance: load: 2 total: 5708
|
||||
2021-06-05 17:50:00 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-body.jpg default
|
||||
2021-06-05 17:50:00 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":28.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.93,"keypoints":17}
|
||||
2021-06-05 17:50:00 [32mDATA: [39m test-node-gpu.js result: performance: load: 347 total: 5708
|
||||
2021-06-05 17:50:00 [35mSTATE:[39m test-node-gpu.js passed: detect: samples/ai-body.jpg default
|
||||
2021-06-05 17:50:00 [32mDATA: [39m test-node-gpu.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 1 person: 1 {"age":28.5,"gender":"female"} {"score":0.72,"class":"person"} {"score":0.93,"keypoints":17}
|
||||
2021-06-05 17:50:00 [32mDATA: [39m test-node-gpu.js result: performance: load: 2 total: 5708
|
||||
2021-06-05 17:50:00 [36mINFO: [39m test-node-gpu.js test complete: 19284 ms
|
||||
2021-06-05 17:50:00 [36mINFO: [39m test-node-wasm.js start
|
||||
2021-06-05 17:50:00 [35mSTATE:[39m test-node-wasm.js passed: model server: http://localhost:10030/models/
|
||||
2021-06-05 17:50:00 [35mSTATE:[39m test-node-wasm.js passed: create human
|
||||
2021-06-05 17:50:00 [36mINFO: [39m test-node-wasm.js human version: 2.0.0
|
||||
2021-06-05 17:50:00 [36mINFO: [39m test-node-wasm.js platform: linux x64 agent: NodeJS v16.0.0
|
||||
2021-06-05 17:50:00 [36mINFO: [39m test-node-wasm.js tfjs version: 3.7.0
|
||||
2021-06-05 17:50:01 [35mSTATE:[39m test-node-wasm.js passed: set backend: wasm
|
||||
2021-06-05 17:50:01 [35mSTATE:[39m test-node-wasm.js passed: load models
|
||||
2021-06-05 17:50:01 [35mSTATE:[39m test-node-wasm.js result: defined models: 14 loaded models: 6
|
||||
2021-06-05 17:50:01 [35mSTATE:[39m test-node-wasm.js passed: warmup: none default
|
||||
2021-06-05 17:50:01 [31mERROR:[39m test-node-wasm.js failed: warmup: face default
|
||||
2021-06-05 17:50:01 [31mERROR:[39m test-node-wasm.js failed: warmup: body default
|
||||
2021-06-05 17:50:01 [36mINFO: [39m test-node-wasm.js test body variants
|
||||
2021-06-05 17:50:03 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-06-05 17:50:06 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-body.jpg posenet
|
||||
2021-06-05 17:50:06 [32mDATA: [39m test-node-wasm.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 0 person: 1 {"age":28.5,"gender":"female"} {} {"score":0.96,"keypoints":16}
|
||||
2021-06-05 17:50:06 [32mDATA: [39m test-node-wasm.js result: performance: load: 677 total: 3317
|
||||
2021-06-05 17:50:08 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-06-05 17:50:10 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-body.jpg movenet
|
||||
2021-06-05 17:50:10 [32mDATA: [39m test-node-wasm.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 0 person: 1 {"age":28.5,"gender":"female"} {} {"score":0.93,"keypoints":17}
|
||||
2021-06-05 17:50:10 [32mDATA: [39m test-node-wasm.js result: performance: load: 677 total: 2105
|
||||
2021-06-05 17:50:12 [35mSTATE:[39m test-node-wasm.js passed: detect: random default
|
||||
2021-06-05 17:50:12 [32mDATA: [39m test-node-wasm.js result: face: 0 body: 1 hand: 0 gesture: 0 object: 0 person: 0 {} {} {"score":0,"keypoints":0}
|
||||
2021-06-05 17:50:12 [32mDATA: [39m test-node-wasm.js result: performance: load: 677 total: 1746
|
||||
2021-06-05 17:50:12 [36mINFO: [39m test-node-wasm.js test: first instance
|
||||
2021-06-05 17:50:13 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-06-05 17:50:15 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-06-05 17:50:15 [32mDATA: [39m test-node-wasm.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 0 person: 1 {"age":29.5,"gender":"female"} {} {"score":0.78,"keypoints":7}
|
||||
2021-06-05 17:50:15 [32mDATA: [39m test-node-wasm.js result: performance: load: 677 total: 2460
|
||||
2021-06-05 17:50:15 [36mINFO: [39m test-node-wasm.js test: second instance
|
||||
2021-06-05 17:50:16 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-upper.jpg [1,720,688,3]
|
||||
2021-06-05 17:50:18 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-upper.jpg default
|
||||
2021-06-05 17:50:18 [32mDATA: [39m test-node-wasm.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 0 person: 1 {"age":29.5,"gender":"female"} {} {"score":0.78,"keypoints":7}
|
||||
2021-06-05 17:50:18 [32mDATA: [39m test-node-wasm.js result: performance: load: 4 total: 2339
|
||||
2021-06-05 17:50:18 [36mINFO: [39m test-node-wasm.js test: concurrent
|
||||
2021-06-05 17:50:18 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-face.jpg [1,256,256,3]
|
||||
2021-06-05 17:50:18 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-face.jpg [1,256,256,3]
|
||||
2021-06-05 17:50:20 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-06-05 17:50:22 [35mSTATE:[39m test-node-wasm.js passed: load image: samples/ai-body.jpg [1,1200,1200,3]
|
||||
2021-06-05 17:50:31 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-face.jpg default
|
||||
2021-06-05 17:50:31 [32mDATA: [39m test-node-wasm.js result: face: 1 body: 1 hand: 0 gesture: 4 object: 0 person: 1 {"age":23.6,"gender":"female"} {} {"score":0.73,"keypoints":17}
|
||||
2021-06-05 17:50:31 [32mDATA: [39m test-node-wasm.js result: performance: load: 677 total: 9199
|
||||
2021-06-05 17:50:31 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-face.jpg default
|
||||
2021-06-05 17:50:31 [32mDATA: [39m test-node-wasm.js result: face: 1 body: 1 hand: 0 gesture: 4 object: 0 person: 1 {"age":23.6,"gender":"female"} {} {"score":0.73,"keypoints":17}
|
||||
2021-06-05 17:50:31 [32mDATA: [39m test-node-wasm.js result: performance: load: 4 total: 9199
|
||||
2021-06-05 17:50:31 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-body.jpg default
|
||||
2021-06-05 17:50:31 [32mDATA: [39m test-node-wasm.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 0 person: 1 {"age":28.5,"gender":"female"} {} {"score":0.93,"keypoints":17}
|
||||
2021-06-05 17:50:31 [32mDATA: [39m test-node-wasm.js result: performance: load: 677 total: 9199
|
||||
2021-06-05 17:50:31 [35mSTATE:[39m test-node-wasm.js passed: detect: samples/ai-body.jpg default
|
||||
2021-06-05 17:50:31 [32mDATA: [39m test-node-wasm.js result: face: 1 body: 1 hand: 0 gesture: 3 object: 0 person: 1 {"age":28.5,"gender":"female"} {} {"score":0.93,"keypoints":17}
|
||||
2021-06-05 17:50:31 [32mDATA: [39m test-node-wasm.js result: performance: load: 4 total: 9199
|
||||
2021-06-05 17:50:31 [36mINFO: [39m test-node-wasm.js test complete: 31126 ms
|
||||
2021-06-05 17:50:31 [36mINFO: [39m status: {"passed":68,"failed":2}
|
||||
|
|
|
@ -7,17 +7,16 @@
|
|||
"typeRoots": ["node_modules/@types"],
|
||||
"outDir": "types",
|
||||
"declaration": true,
|
||||
"allowSyntheticDefaultImports": true,
|
||||
"emitDeclarationOnly": true,
|
||||
"emitDecoratorMetadata": true,
|
||||
"experimentalDecorators": true,
|
||||
"importHelpers": true,
|
||||
"noImplicitAny": false,
|
||||
"preserveConstEnums": true,
|
||||
"removeComments": false,
|
||||
"resolveJsonModule": true,
|
||||
"skipLibCheck": true,
|
||||
"sourceMap": false,
|
||||
"strictNullChecks": true,
|
||||
"sourceMap": true,
|
||||
"allowJs": true,
|
||||
"baseUrl": "./",
|
||||
"paths": {
|
||||
|
@ -25,10 +24,21 @@
|
|||
"@tensorflow/tfjs-node/dist/io/file_system": ["node_modules/@tensorflow/tfjs-node/dist/io/file_system.js"],
|
||||
"@tensorflow/tfjs-core/dist/index": ["node_modules/@tensorflow/tfjs-core/dist/index.js"],
|
||||
"@tensorflow/tfjs-converter/dist/index": ["node_modules/@tensorflow/tfjs-converter/dist/index.js"]
|
||||
}
|
||||
},
|
||||
"strictNullChecks": true,
|
||||
"noImplicitAny": false,
|
||||
"noUnusedLocals": false,
|
||||
"noImplicitReturns": true,
|
||||
"noImplicitThis": true,
|
||||
"alwaysStrict": true,
|
||||
"noUnusedParameters": true,
|
||||
"pretty": true,
|
||||
"noFallthroughCasesInSwitch": true,
|
||||
"allowUnreachableCode": false
|
||||
},
|
||||
"formatCodeOptions": { "indentSize": 2, "tabSize": 2 },
|
||||
"include": ["src/*", "src/***/*"],
|
||||
"exclude": ["node_modules/"],
|
||||
"typedocOptions": {
|
||||
"excludePrivate": true,
|
||||
"excludeExternals": true,
|
||||
|
|
|
@ -629,7 +629,10 @@
|
|||
<div class="tsd-comment tsd-typography">
|
||||
<div class="lead">
|
||||
<p>Controlls and configures all body segmentation module
|
||||
if segmentation is enabled, output result.canvas will be augmented with masked image containing only person output</p>
|
||||
removes background from input containing person
|
||||
if segmentation is enabled it will run as preprocessing task before any other model
|
||||
alternatively leave it disabled and use it on-demand using human.segmentation method which can
|
||||
remove background or replace it with user-provided background</p>
|
||||
</div>
|
||||
<ul>
|
||||
<li>enabled: true/false</li>
|
||||
|
|
|
@ -180,7 +180,10 @@ export interface Config {
|
|||
skipFrames: number;
|
||||
};
|
||||
/** Controlls and configures all body segmentation module
|
||||
* if segmentation is enabled, output result.canvas will be augmented with masked image containing only person output
|
||||
* removes background from input containing person
|
||||
* if segmentation is enabled it will run as preprocessing task before any other model
|
||||
* alternatively leave it disabled and use it on-demand using human.segmentation method which can
|
||||
* remove background or replace it with user-provided background
|
||||
*
|
||||
* - enabled: true/false
|
||||
* - modelPath: object detection model, can be absolute path or relative to modelBasePath
|
||||
|
|
|
@ -8,6 +8,6 @@ export declare function load(config: Config): Promise<GraphModel>;
|
|||
export declare function predict(input: {
|
||||
tensor: Tensor | null;
|
||||
canvas: OffscreenCanvas | HTMLCanvasElement;
|
||||
}, config: Config): Promise<Uint8ClampedArray | null>;
|
||||
}): Promise<Uint8ClampedArray | null>;
|
||||
export declare function process(input: Input, background: Input | undefined, config: Config): Promise<HTMLCanvasElement | OffscreenCanvas | null>;
|
||||
export {};
|
||||
|
|
2
wiki
2
wiki
|
@ -1 +1 @@
|
|||
Subproject commit c9408224d824368facc264c00e05d7b520d69051
|
||||
Subproject commit 9e92e5eec1e60b5ea58dbf1c4bbc67c828bcf673
|
Loading…
Reference in New Issue