compatibility notes

pull/293/head
Vladimir Mandic 2020-10-18 14:14:05 -04:00
parent e83774d7d5
commit 827a04e2d0
5 changed files with 25 additions and 15 deletions

View File

@ -229,7 +229,8 @@ config = {
scoped: false, // enable scoped runs
// some models *may* have memory leaks, this wrapps everything in a local scope at a cost of performance
// typically not needed
filter: {
videoOptimized: true, // perform additional optimizations when input is video, must be disabled for images
filter: { // note: image filters are only available in Browser environments and not in NodeJS as they require WebGL for processing
enabled: true, // enable image pre-processing filters
return: true, // return processed canvas imagedata in result
brightness: 0, // range: -1 (darken) to 1 (lighten)
@ -432,6 +433,15 @@ For performance details, see output of `result.performance` object during runtim
<hr>
## Limitations
`Human` library can be used in any modern Browser or NodeJS environment, but there are several items to be aware of:
- **NodeJS**: Due to a missing feature in `tfjs-node`, only some models are available <https://github.com/tensorflow/tfjs/issues/4066>
- **Browser**: `filters` module cannot be used when using web workers <https://github.com/phoboslab/WebGLImageFilter/issues/27>
<hr>
## Credits
- Face Detection: [**MediaPipe BlazeFace**](https://drive.google.com/file/d/1f39lSzU5Oq-j_OXgS67KfN5wNsoeAZ4V/view)

View File

@ -7,6 +7,7 @@ export default {
scoped: false, // enable scoped runs
// some models *may* have memory leaks, this wrapps everything in a local scope at a cost of performance
// typically not needed
videoOptimized: true, // perform additional optimizations when input is video, must be disabled for images
filter: {
enabled: true, // enable image pre-processing filters
return: true, // return processed canvas imagedata in result

View File

@ -28,6 +28,7 @@ const ui = {
const config = {
backend: 'webgl', // if you want to use 'wasm' backend, enable script load of tf and tf-backend-wasm in index.html
filter: { enabled: true, brightness: 0, contrast: 0, sharpness: 0, blur: 0, saturation: 0, hue: 0, negative: false, sepia: false, vintage: false, kodachrome: false, technicolor: false, polaroid: false, pixelate: 0 },
videoOptimized: true,
face: {
enabled: true,
detector: { maxFaces: 10, skipFrames: 10, minConfidence: 0.5, iouThreshold: 0.3, scoreThreshold: 0.7 },

View File

@ -1,6 +1,7 @@
import human from '../dist/human.esm.js';
let config;
let busy = false;
const log = (...msg) => {
// eslint-disable-next-line no-console
@ -8,6 +9,8 @@ const log = (...msg) => {
};
onmessage = async (msg) => {
if (busy) return;
busy = true;
// worker.postMessage({ image: image.data.buffer, width: canvas.width, height: canvas.height, config }, [image.data.buffer]);
const image = new ImageData(new Uint8ClampedArray(msg.data.image), msg.data.width, msg.data.height);
config = msg.data.config;
@ -19,4 +22,5 @@ onmessage = async (msg) => {
log('Worker thread error:', err.message);
}
postMessage(result);
busy = false;
};

View File

@ -74,15 +74,6 @@ function mergeDeep(...objects) {
function sanity(input) {
if (!input) return 'input is not defined';
if (!(input instanceof tf.Tensor)
|| (tf.ENV.flags.IS_BROWSER
&& (input instanceof ImageData || input instanceof HTMLImageElement || input instanceof HTMLCanvasElement || input instanceof HTMLVideoElement || input instanceof HTMLMediaElement))) {
const width = input.naturalWidth || input.videoWidth || input.width || (input.shape && (input.shape[1] > 0));
if (!width || (width === 0)) return 'input is empty';
}
if (tf.ENV.flags.IS_BROWSER && (input instanceof HTMLVideoElement || input instanceof HTMLMediaElement)) {
if (input.readyState && (input.readyState <= 2)) return 'input is not ready';
}
if (tf.ENV.flags.IS_NODE && !(input instanceof tf.Tensor)) {
return 'input must be a tensor';
}
@ -127,15 +118,18 @@ function tfImage(input) {
let filtered;
if (tf.ENV.flags.IS_BROWSER && config.filter.enabled && !(input instanceof tf.Tensor)) {
const width = input.naturalWidth || input.videoWidth || input.width || (input.shape && (input.shape[1] > 0));
const height = input.naturalHeight || input.videoHeight || input.Height || (input.shape && (input.shape[2] > 0));
// if (!offscreenCanvas) offscreenCanvas = new OffscreenCanvas(width, height);
const height = input.naturalHeight || input.videoHeight || input.height || (input.shape && (input.shape[2] > 0));
if (!offscreenCanvas) offscreenCanvas = new OffscreenCanvas(width, height);
/*
if (!offscreenCanvas) {
offscreenCanvas = document.createElement('canvas');
offscreenCanvas.width = width;
offscreenCanvas.height = height;
}
*/
const ctx = offscreenCanvas.getContext('2d');
ctx.drawImage(input, 0, 0, width, height, 0, 0, offscreenCanvas.width, offscreenCanvas.height);
if (input instanceof ImageData) ctx.putImageData(input, 0, 0);
else ctx.drawImage(input, 0, 0, width, height, 0, 0, offscreenCanvas.width, offscreenCanvas.height);
if (!fx) fx = new fxImage.Canvas();
else fx.reset();
fx.addFilter('brightness', config.filter.brightness); // must have at least one filter enabled
@ -173,8 +167,8 @@ async function detect(input, userConfig = {}) {
let timeStamp;
timeStamp = now();
const shouldOverride = tf.ENV.flags.IS_NODE || (tf.ENV.flags.IS_BROWSER && !((input instanceof HTMLVideoElement) || (input instanceof HTMLMediaElement)));
config = mergeDeep(defaults, userConfig, shouldOverride ? override : {});
config = mergeDeep(defaults, userConfig);
if (!config.videoOptimized) config = mergeDeep(config, override);
perf.config = Math.trunc(now() - timeStamp);
// sanity checks