implemented drag & drop for image processing
|
@ -1,3 +1,3 @@
|
|||
node_modules
|
||||
private
|
||||
pnpm-lock.yaml
|
||||
samples
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
node_modules
|
||||
private
|
||||
pnpm-lock.yaml
|
||||
samples
|
||||
typedoc
|
||||
test
|
||||
wiki
|
||||
|
|
40
README.md
|
@ -23,7 +23,13 @@ JavaScript module using TensorFlow/JS Machine Learning library
|
|||
Compatible with both software *tfjs-node* and
|
||||
GPU accelerated backends *tfjs-node-gpu* using CUDA libraries
|
||||
|
||||
Check out [**Live Demo**](https://vladmandic.github.io/human/demo/index.html) for processing of live WebCam video or static images
|
||||
<br>
|
||||
|
||||
Check out [**Live Demo**](https://vladmandic.github.io/human/demo/index.html) app for processing of live WebCam video or static images
|
||||
|
||||
- To start video detection, simply press *Play*
|
||||
- To process images, simply drag & drop in your Browser window
|
||||
- Note: For optimal performance, select only models you'd like to use
|
||||
|
||||
<br>
|
||||
|
||||
|
@ -89,20 +95,30 @@ All options as presented in the demo application...
|
|||
|
||||
<br>
|
||||
|
||||
**Validation image:**
|
||||
> [demo/index.html](demo/index.html?image=%22../assets/human-sample-upper.jpg%22)
|
||||
**Face Close-up:**
|
||||

|
||||
|
||||

|
||||
<br>
|
||||
|
||||
**Using static images:**
|
||||
> [demo/index.html](demo/index.html?images=true)
|
||||
**Face under a high angle:**
|
||||

|
||||
|
||||

|
||||
<br>
|
||||
|
||||
**Live WebCam view:**
|
||||
> [demo/index.html](demo/index.html)
|
||||
**Full Person Details:**
|
||||

|
||||
|
||||

|
||||
<br>
|
||||
|
||||
**Pose Detection:**
|
||||

|
||||
|
||||
<br>
|
||||
|
||||
**Large Group:**
|
||||

|
||||
|
||||
<br>
|
||||
|
||||
**Face Similarity Matching:**
|
||||
Extracts all faces from provided input images,
|
||||
|
@ -112,11 +128,15 @@ and optionally matches detected face with database of known people to guess thei
|
|||
|
||||

|
||||
|
||||
<br>
|
||||
|
||||
**Face3D OpenGL Rendering:**
|
||||
> [demo/face3d.html](demo/face3d.html)
|
||||
|
||||

|
||||
|
||||
<br>
|
||||
|
||||
**468-Point Face Mesh Defails:**
|
||||
(view in full resolution to see keypoints)
|
||||
|
||||
|
|
Before Width: | Height: | Size: 32 KiB |
Before Width: | Height: | Size: 8.4 KiB |
Before Width: | Height: | Size: 41 KiB |
Before Width: | Height: | Size: 152 KiB |
Before Width: | Height: | Size: 141 KiB |
Before Width: | Height: | Size: 178 KiB |
Before Width: | Height: | Size: 216 KiB |
Before Width: | Height: | Size: 206 KiB |
Before Width: | Height: | Size: 162 KiB |
Before Width: | Height: | Size: 295 KiB |
After Width: | Height: | Size: 59 KiB |
After Width: | Height: | Size: 85 KiB |
After Width: | Height: | Size: 369 KiB |
Before Width: | Height: | Size: 434 KiB |
Before Width: | Height: | Size: 50 KiB |
After Width: | Height: | Size: 170 KiB |
After Width: | Height: | Size: 74 KiB |
Before Width: | Height: | Size: 113 KiB |
Before Width: | Height: | Size: 177 KiB |
104
demo/index.js
|
@ -77,6 +77,7 @@ const ui = {
|
|||
modelsPreload: true, // preload human models on startup
|
||||
modelsWarmup: true, // warmup human models on startup
|
||||
buffered: true, // should output be buffered between frames
|
||||
interpolated: true, // should output be interpolated for smoothness between frames
|
||||
iconSize: '48px', // ui icon sizes
|
||||
|
||||
// internal variables
|
||||
|
@ -228,8 +229,12 @@ async function drawResults(input) {
|
|||
}
|
||||
|
||||
// draw all results using interpolated results
|
||||
const interpolated = human.next(result);
|
||||
human.draw.all(canvas, interpolated, drawOptions);
|
||||
if (ui.interpolated) {
|
||||
const interpolated = human.next(result);
|
||||
human.draw.all(canvas, interpolated, drawOptions);
|
||||
} else {
|
||||
human.draw.all(canvas, result, drawOptions);
|
||||
}
|
||||
/* alternatively use individual functions
|
||||
human.draw.face(canvas, result.face);
|
||||
human.draw.body(canvas, result.body);
|
||||
|
@ -246,20 +251,21 @@ async function drawResults(input) {
|
|||
const gpu = engine.backendInstance ? `gpu: ${(engine.backendInstance.numBytesInGPU ? engine.backendInstance.numBytesInGPU : 0).toLocaleString()} bytes` : '';
|
||||
const memory = `system: ${engine.state.numBytes.toLocaleString()} bytes ${gpu} | tensors: ${engine.state.numTensors.toLocaleString()}`;
|
||||
const processing = result.canvas ? `processing: ${result.canvas.width} x ${result.canvas.height}` : '';
|
||||
const avgDetect = Math.trunc(10 * ui.detectFPS.reduce((a, b) => a + b, 0) / ui.detectFPS.length) / 10;
|
||||
const avgDraw = Math.trunc(10 * ui.drawFPS.reduce((a, b) => a + b, 0) / ui.drawFPS.length) / 10;
|
||||
const avgDetect = ui.detectFPS.length > 0 ? Math.trunc(10 * ui.detectFPS.reduce((a, b) => a + b, 0) / ui.detectFPS.length) / 10 : 0;
|
||||
const avgDraw = ui.drawFPS.length > 0 ? Math.trunc(10 * ui.drawFPS.reduce((a, b) => a + b, 0) / ui.drawFPS.length) / 10 : 0;
|
||||
const warning = (ui.detectFPS.length > 5) && (avgDetect < 5) ? '<font color="lightcoral">warning: your performance is low: try switching to higher performance backend, lowering resolution or disabling some models</font>' : '';
|
||||
const fps = avgDetect > 0 ? `FPS process:${avgDetect} refresh:${avgDraw}` : '';
|
||||
document.getElementById('log').innerHTML = `
|
||||
video: ${ui.camera.name} | facing: ${ui.camera.facing} | screen: ${window.innerWidth} x ${window.innerHeight} camera: ${ui.camera.width} x ${ui.camera.height} ${processing}<br>
|
||||
backend: ${human.tf.getBackend()} | ${memory}<br>
|
||||
performance: ${str(lastDetectedResult.performance)}ms FPS process:${avgDetect} refresh:${avgDraw}<br>
|
||||
performance: ${str(lastDetectedResult.performance)}ms ${fps}<br>
|
||||
${warning}<br>
|
||||
`;
|
||||
ui.framesDraw++;
|
||||
ui.lastFrame = performance.now();
|
||||
// if buffered, immediate loop but limit frame rate although it's going to run slower as JS is singlethreaded
|
||||
if (ui.buffered) {
|
||||
ui.drawThread = requestAnimationFrame(() => drawResults(input, canvas));
|
||||
ui.drawThread = requestAnimationFrame(() => drawResults(input));
|
||||
} else {
|
||||
log('stopping buffered refresh');
|
||||
if (ui.drawThread) cancelAnimationFrame(ui.drawThread);
|
||||
|
@ -431,7 +437,7 @@ function runHumanDetect(input, canvas, timestamp) {
|
|||
ctx.drawImage(input, 0, 0, canvas.width, canvas.height);
|
||||
const data = ctx.getImageData(0, 0, canvas.width, canvas.height);
|
||||
// perform detection in worker
|
||||
webWorker(input, data, canvas, userConfig, timestamp);
|
||||
webWorker(input, data, canvas, timestamp);
|
||||
status();
|
||||
} else {
|
||||
human.detect(input, userConfig).then((result) => {
|
||||
|
@ -457,32 +463,66 @@ function runHumanDetect(input, canvas, timestamp) {
|
|||
}
|
||||
|
||||
// main processing function when input is image, can use direct invocation or web worker
|
||||
async function processImage(input) {
|
||||
async function processImage(input, title) {
|
||||
return new Promise((resolve) => {
|
||||
const image = new Image();
|
||||
image.onerror = async () => status('image loading error');
|
||||
image.onload = async () => {
|
||||
log('processing image:', encodeURI(image.src));
|
||||
ui.interpolated = false; // stop interpolating results if input is image
|
||||
status(`processing image: ${title}`);
|
||||
const canvas = document.getElementById('canvas');
|
||||
image.width = image.naturalWidth;
|
||||
image.height = image.naturalHeight;
|
||||
canvas.width = human.config.filter.width && human.config.filter.width > 0 ? human.config.filter.width : image.naturalWidth;
|
||||
canvas.height = human.config.filter.height && human.config.filter.height > 0 ? human.config.filter.height : image.naturalHeight;
|
||||
const origCacheSensitiry = userConfig.cacheSensitivity;
|
||||
userConfig.cacheSensitivity = 0;
|
||||
const result = await human.detect(image, userConfig);
|
||||
userConfig.cacheSensitivity = origCacheSensitiry;
|
||||
lastDetectedResult = result;
|
||||
await drawResults(image);
|
||||
const thumb = document.createElement('canvas');
|
||||
thumb.className = 'thumbnail';
|
||||
thumb.width = window.innerWidth / (ui.columns + 0.1);
|
||||
thumb.width = ui.columns > 1 ? window.innerWidth / (ui.columns + 0.1) : window.innerWidth - 14;
|
||||
thumb.height = thumb.width * canvas.height / canvas.width;
|
||||
if (result.face && result.face.length > 0) {
|
||||
thumb.title = result.face.map((a, i) => `#${i} face: ${Math.trunc(100 * a.faceScore)}% box: ${Math.trunc(100 * a.boxScore)}% age: ${Math.trunc(a.age)} gender: ${Math.trunc(100 * a.genderScore)}% ${a.gender}`).join(' | ');
|
||||
} else {
|
||||
thumb.title = 'no face detected';
|
||||
}
|
||||
thumb.addEventListener('click', (evt) => {
|
||||
const stdWidth = ui.columns > 1 ? window.innerWidth / (ui.columns + 0.1) : window.innerWidth - 14;
|
||||
// zoom in/out on click
|
||||
if (evt.target.style.width === `${stdWidth}px`) {
|
||||
evt.target.style.width = '';
|
||||
evt.target.style.height = `${document.getElementById('log').offsetTop - document.getElementById('media').offsetTop}px`;
|
||||
} else {
|
||||
evt.target.style.width = `${stdWidth}px`;
|
||||
evt.target.style.height = '';
|
||||
}
|
||||
// copy to clipboard on click
|
||||
if (typeof ClipboardItem !== 'undefined' && navigator.clipboard) {
|
||||
evt.target.toBlob((blob) => {
|
||||
// eslint-disable-next-line no-undef
|
||||
const item = new ClipboardItem({ 'image/png': blob });
|
||||
navigator.clipboard.write([item]);
|
||||
log('copied image to clipboard');
|
||||
});
|
||||
}
|
||||
});
|
||||
const ctx = thumb.getContext('2d');
|
||||
ctx.drawImage(canvas, 0, 0, canvas.width, canvas.height, 0, 0, thumb.width, thumb.height);
|
||||
document.getElementById('samples-container').appendChild(thumb);
|
||||
image.src = '';
|
||||
const prev = document.getElementsByClassName('thumbnail');
|
||||
if (prev && prev.length > 0) document.getElementById('samples-container').insertBefore(thumb, prev[0]);
|
||||
else document.getElementById('samples-container').appendChild(thumb);
|
||||
|
||||
// finish up
|
||||
status();
|
||||
document.getElementById('play').style.display = 'none';
|
||||
document.getElementById('loader').style.display = 'none';
|
||||
if (ui.detectThread) cancelAnimationFrame(ui.detectThread);
|
||||
if (ui.drawThread) cancelAnimationFrame(ui.drawThread);
|
||||
|
||||
resolve(true);
|
||||
};
|
||||
image.src = input;
|
||||
|
@ -522,11 +562,7 @@ async function detectSampleImages() {
|
|||
status('processing images');
|
||||
document.getElementById('samples-container').innerHTML = '';
|
||||
for (const m of Object.values(menu)) m.hide();
|
||||
for (const image of ui.samples) await processImage(image);
|
||||
status();
|
||||
document.getElementById('play').style.display = 'none';
|
||||
document.getElementById('loader').style.display = 'none';
|
||||
if (ui.detectThread) cancelAnimationFrame(ui.detectThread);
|
||||
for (const image of ui.samples) await processImage(image, image);
|
||||
}
|
||||
|
||||
function setupMenu() {
|
||||
|
@ -604,8 +640,8 @@ function setupMenu() {
|
|||
human.config.hand.rotation = val;
|
||||
});
|
||||
menu.process.addHTML('<hr style="border-style: inset; border-color: dimgray">');
|
||||
menu.process.addButton('process sample images', 'process images', () => detectSampleImages());
|
||||
menu.process.addHTML('<hr style="border-style: inset; border-color: dimgray">');
|
||||
// menu.process.addButton('process sample images', 'process images', () => detectSampleImages());
|
||||
// menu.process.addHTML('<hr style="border-style: inset; border-color: dimgray">');
|
||||
menu.process.addChart('FPS', 'FPS');
|
||||
|
||||
menu.models = new Menu(document.body, '', { top, left: x[3] });
|
||||
|
@ -676,6 +712,31 @@ async function drawWarmup(res) {
|
|||
await human.draw.all(canvas, res, drawOptions);
|
||||
}
|
||||
|
||||
async function processDataURL(f) {
|
||||
return new Promise((resolve) => {
|
||||
const reader = new FileReader();
|
||||
reader.onload = async (e) => {
|
||||
const dataURL = e.target.result;
|
||||
await processImage(dataURL, f.name);
|
||||
document.getElementById('canvas').style.display = 'none';
|
||||
resolve(true);
|
||||
};
|
||||
reader.readAsDataURL(f);
|
||||
});
|
||||
}
|
||||
|
||||
async function dragAndDrop() {
|
||||
document.body.addEventListener('dragenter', (evt) => evt.preventDefault());
|
||||
document.body.addEventListener('dragleave', (evt) => evt.preventDefault());
|
||||
document.body.addEventListener('dragover', (evt) => evt.preventDefault());
|
||||
document.body.addEventListener('drop', async (evt) => {
|
||||
evt.preventDefault();
|
||||
evt.dataTransfer.dropEffect = 'copy';
|
||||
if (evt.dataTransfer.files.length < 2) ui.columns = 1;
|
||||
for (const f of evt.dataTransfer.files) await processDataURL(f);
|
||||
});
|
||||
}
|
||||
|
||||
async function pwaRegister() {
|
||||
if (!pwa.enabled) return;
|
||||
if ('serviceWorker' in navigator) {
|
||||
|
@ -790,11 +851,16 @@ async function main() {
|
|||
document.getElementById('play').style.display = 'block';
|
||||
for (const m of Object.values(menu)) m.hide();
|
||||
|
||||
// init drag & drop
|
||||
|
||||
await dragAndDrop();
|
||||
|
||||
if (params.has('image')) {
|
||||
try {
|
||||
const image = JSON.parse(params.get('image'));
|
||||
log('overriding image:', image);
|
||||
ui.samples = [image];
|
||||
ui.columns = 1;
|
||||
} catch {
|
||||
status('cannot parse input image');
|
||||
log('cannot parse input image', params.get('image'));
|
||||
|
|
|
@ -155,8 +155,8 @@ var config = {
|
|||
enabled: true,
|
||||
detector: {
|
||||
modelPath: "blazeface.json",
|
||||
rotation: false,
|
||||
maxDetected: 10,
|
||||
rotation: true,
|
||||
maxDetected: 5,
|
||||
skipFrames: 15,
|
||||
minConfidence: 0.2,
|
||||
iouThreshold: 0.1,
|
||||
|
@ -9806,7 +9806,7 @@ __export(draw_exports, {
|
|||
person: () => person
|
||||
});
|
||||
var options = {
|
||||
color: "rgba(173, 216, 230, 0.3)",
|
||||
color: "rgba(173, 216, 230, 0.6)",
|
||||
labelColor: "rgba(173, 216, 230, 1)",
|
||||
shadowColor: "black",
|
||||
font: 'small-caps 14px "Segoe UI"',
|
||||
|
|
|
@ -156,8 +156,8 @@ var config = {
|
|||
enabled: true,
|
||||
detector: {
|
||||
modelPath: "blazeface.json",
|
||||
rotation: false,
|
||||
maxDetected: 10,
|
||||
rotation: true,
|
||||
maxDetected: 5,
|
||||
skipFrames: 15,
|
||||
minConfidence: 0.2,
|
||||
iouThreshold: 0.1,
|
||||
|
@ -9807,7 +9807,7 @@ __export(draw_exports, {
|
|||
person: () => person
|
||||
});
|
||||
var options = {
|
||||
color: "rgba(173, 216, 230, 0.3)",
|
||||
color: "rgba(173, 216, 230, 0.6)",
|
||||
labelColor: "rgba(173, 216, 230, 1)",
|
||||
shadowColor: "black",
|
||||
font: 'small-caps 14px "Segoe UI"',
|
||||
|
|
|
@ -155,8 +155,8 @@ var config = {
|
|||
enabled: true,
|
||||
detector: {
|
||||
modelPath: "blazeface.json",
|
||||
rotation: false,
|
||||
maxDetected: 10,
|
||||
rotation: true,
|
||||
maxDetected: 5,
|
||||
skipFrames: 15,
|
||||
minConfidence: 0.2,
|
||||
iouThreshold: 0.1,
|
||||
|
@ -9806,7 +9806,7 @@ __export(draw_exports, {
|
|||
person: () => person
|
||||
});
|
||||
var options = {
|
||||
color: "rgba(173, 216, 230, 0.3)",
|
||||
color: "rgba(173, 216, 230, 0.6)",
|
||||
labelColor: "rgba(173, 216, 230, 1)",
|
||||
shadowColor: "black",
|
||||
font: 'small-caps 14px "Segoe UI"',
|
||||
|
|
|
@ -241,10 +241,10 @@ const config: Config = {
|
|||
// (note: module is not loaded until it is required)
|
||||
detector: {
|
||||
modelPath: 'blazeface.json', // detector model, can be absolute path or relative to modelBasePath
|
||||
rotation: false, // use best-guess rotated face image or just box with rotation as-is
|
||||
rotation: true, // use best-guess rotated face image or just box with rotation as-is
|
||||
// false means higher performance, but incorrect mesh mapping if face angle is above 20 degrees
|
||||
// this parameter is not valid in nodejs
|
||||
maxDetected: 10, // maximum number of faces detected in the input
|
||||
maxDetected: 5, // maximum number of faces detected in the input
|
||||
// should be set to the minimum number for performance
|
||||
skipFrames: 15, // how many max frames to go without re-running the face bounding box detector
|
||||
// only used when cacheSensitivity is not zero
|
||||
|
|
|
@ -47,7 +47,7 @@ export interface DrawOptions {
|
|||
}
|
||||
|
||||
export const options: DrawOptions = {
|
||||
color: <string>'rgba(173, 216, 230, 0.3)', // 'lightblue' with light alpha channel
|
||||
color: <string>'rgba(173, 216, 230, 0.6)', // 'lightblue' with light alpha channel
|
||||
labelColor: <string>'rgba(173, 216, 230, 1)', // 'lightblue' with dark alpha channel
|
||||
shadowColor: <string>'black',
|
||||
font: <string>'small-caps 14px "Segoe UI"',
|
||||
|
|