mirror of https://github.com/vladmandic/human
added filter.flip feature
parent
22cf040972
commit
c4a353787a
|
@ -7,31 +7,32 @@ import Menu from './helpers/menu.js';
|
|||
import GLBench from './helpers/gl-bench.js';
|
||||
import webRTC from './helpers/webrtc.js';
|
||||
|
||||
const userConfig = { backend: 'webgl' }; // add any user configuration overrides
|
||||
// const userConfig = { backend: 'webgl' }; // add any user configuration overrides
|
||||
let human;
|
||||
|
||||
/*
|
||||
const userConfig = {
|
||||
backend: 'humangl',
|
||||
async: false,
|
||||
profile: false,
|
||||
warmup: 'full',
|
||||
videoOptimized: false,
|
||||
filter: { enabled: false },
|
||||
face: { enabled: false,
|
||||
filter: {
|
||||
enabled: true,
|
||||
flip: true,
|
||||
},
|
||||
face: { enabled: true,
|
||||
mesh: { enabled: true },
|
||||
iris: { enabled: true },
|
||||
description: { enabled: true },
|
||||
emotion: { enabled: true },
|
||||
description: { enabled: false },
|
||||
emotion: { enabled: false },
|
||||
},
|
||||
hand: { enabled: false },
|
||||
gesture: { enabled: false },
|
||||
gesture: { enabled: true },
|
||||
body: { enabled: false },
|
||||
// body: { enabled: true, modelPath: 'blazepose.json' },
|
||||
// body: { enabled: true, modelPath: 'efficientpose.json' },
|
||||
object: { enabled: true },
|
||||
// object: { enabled: true },
|
||||
};
|
||||
*/
|
||||
|
||||
// ui options
|
||||
const ui = {
|
||||
|
|
|
@ -17,6 +17,10 @@ const myConfig = {
|
|||
debug: true,
|
||||
videoOptimized: false,
|
||||
async: false,
|
||||
filter: {
|
||||
enabled: true,
|
||||
flip: true,
|
||||
},
|
||||
face: {
|
||||
enabled: true,
|
||||
detector: { enabled: true, rotation: false },
|
||||
|
@ -52,7 +56,7 @@ async function detect(input) {
|
|||
// read input image file and create tensor to be used for processing
|
||||
let buffer;
|
||||
log.info('Loading image:', input);
|
||||
if (input.startsWith('http')) {
|
||||
if (input.startsWith('http:') || input.startsWith('https:')) {
|
||||
const res = await fetch(input);
|
||||
if (res && res.ok) buffer = await res.buffer();
|
||||
else log.error('Invalid image URL:', input, res.status, res.statusText, res.headers.get('content-type'));
|
||||
|
@ -62,6 +66,7 @@ async function detect(input) {
|
|||
|
||||
// decode image using tfjs-node so we don't need external depenencies
|
||||
// can also be done using canvas.js or some other 3rd party image library
|
||||
if (!buffer) return {};
|
||||
const decoded = human.tf.node.decodeImage(buffer);
|
||||
const casted = decoded.toFloat();
|
||||
const tensor = casted.expandDims(0);
|
||||
|
|
|
@ -55,6 +55,8 @@ export interface Config {
|
|||
height: number,
|
||||
/** Return processed canvas imagedata in result */
|
||||
return: boolean,
|
||||
/** Flip input as mirror image */
|
||||
flip: boolean,
|
||||
/** Range: -1 (darken) to 1 (lighten) */
|
||||
brightness: number,
|
||||
/** Range: -1 (reduce contrast) to 1 (increase contrast) */
|
||||
|
@ -82,6 +84,8 @@ export interface Config {
|
|||
/** Range: 0 (no pixelate) to N (number of pixels to pixelate) */
|
||||
pixelate: number,
|
||||
},
|
||||
// type definition end
|
||||
|
||||
/** Controlls gesture detection */
|
||||
gesture: {
|
||||
enabled: boolean,
|
||||
|
@ -250,6 +254,7 @@ const config: Config = {
|
|||
// if both width and height are set to 0, there is no resizing
|
||||
// if just one is set, second one is scaled automatically
|
||||
// if both are set, values are used as-is
|
||||
flip: false, // flip input as mirror image
|
||||
return: true, // return processed canvas imagedata in result
|
||||
brightness: 0, // range: -1 (darken) to 1 (lighten)
|
||||
contrast: 0, // range: -1 (reduce contrast) to 1 (increase contrast)
|
||||
|
|
|
@ -25,7 +25,7 @@ export const face = (res) => {
|
|||
if (res[i].mesh && res[i].mesh.length > 0) {
|
||||
const eyeFacing = res[i].mesh[33][2] - res[i].mesh[263][2];
|
||||
if (Math.abs(eyeFacing) < 10) gestures.push({ face: i, gesture: 'facing center' });
|
||||
else gestures.push({ face: i, gesture: `facing ${eyeFacing < 0 ? 'right' : 'left'}` });
|
||||
else gestures.push({ face: i, gesture: `facing ${eyeFacing < 0 ? 'left' : 'right'}` });
|
||||
const openLeft = Math.abs(res[i].mesh[374][1] - res[i].mesh[386][1]) / Math.abs(res[i].mesh[443][1] - res[i].mesh[450][1]); // center of eye inner lid y coord div center of wider eye border y coord
|
||||
if (openLeft < 0.2) gestures.push({ face: i, gesture: 'blink left eye' });
|
||||
const openRight = Math.abs(res[i].mesh[145][1] - res[i].mesh[159][1]) / Math.abs(res[i].mesh[223][1] - res[i].mesh[230][1]); // center of eye inner lid y coord div center of wider eye border y coord
|
||||
|
|
|
@ -53,9 +53,21 @@ export function process(input, config): { tensor: typeof tf.Tensor | null, canva
|
|||
if (inCanvas?.width !== targetWidth) inCanvas.width = targetWidth;
|
||||
if (inCanvas?.height !== targetHeight) inCanvas.height = targetHeight;
|
||||
}
|
||||
|
||||
const ctx = inCanvas.getContext('2d');
|
||||
if (input instanceof ImageData) ctx.putImageData(input, 0, 0);
|
||||
else ctx.drawImage(input, 0, 0, originalWidth, originalHeight, 0, 0, inCanvas?.width, inCanvas?.height);
|
||||
if (input instanceof ImageData) {
|
||||
ctx.putImageData(input, 0, 0);
|
||||
} else {
|
||||
if (!config.filter.flip) {
|
||||
ctx.drawImage(input, 0, 0, originalWidth, originalHeight, 0, 0, inCanvas?.width, inCanvas?.height);
|
||||
} else {
|
||||
ctx.translate(originalWidth, 0);
|
||||
ctx.scale(-1, 1);
|
||||
ctx.drawImage(input, 0, 0, originalWidth, originalHeight, 0, 0, inCanvas?.width, inCanvas?.height);
|
||||
ctx.setTransform(1, 0, 0, 1, 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
if (config.filter.enabled) {
|
||||
if (!fx || !outCanvas || (inCanvas.width !== outCanvas.width) || (inCanvas?.height !== outCanvas?.height)) {
|
||||
outCanvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(inCanvas?.width, inCanvas?.height) : document.createElement('canvas');
|
||||
|
|
Loading…
Reference in New Issue