diff --git a/.gitignore b/.gitignore
index 1d2e013f..2839e22d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,3 @@
node_modules
-private
pnpm-lock.yaml
+samples
diff --git a/.npmignore b/.npmignore
index 1d2e013f..e0b54f43 100644
--- a/.npmignore
+++ b/.npmignore
@@ -1,3 +1,6 @@
node_modules
-private
pnpm-lock.yaml
+samples
+typedoc
+test
+wiki
diff --git a/README.md b/README.md
index e4ba408a..89544fd8 100644
--- a/README.md
+++ b/README.md
@@ -23,7 +23,13 @@ JavaScript module using TensorFlow/JS Machine Learning library
Compatible with both software *tfjs-node* and
GPU accelerated backends *tfjs-node-gpu* using CUDA libraries
-Check out [**Live Demo**](https://vladmandic.github.io/human/demo/index.html) for processing of live WebCam video or static images
+
+
+Check out [**Live Demo**](https://vladmandic.github.io/human/demo/index.html) app for processing of live WebCam video or static images
+
+- To start video detection, simply press *Play*
+- To process images, simply drag & drop in your Browser window
+- Note: For optimal performance, select only models you'd like to use
@@ -89,20 +95,30 @@ All options as presented in the demo application...
-**Validation image:**
-> [demo/index.html](demo/index.html?image=%22../assets/human-sample-upper.jpg%22)
+**Face Close-up:**
+
-
+
-**Using static images:**
-> [demo/index.html](demo/index.html?images=true)
+**Face under a high angle:**
+
-
+
-**Live WebCam view:**
-> [demo/index.html](demo/index.html)
+**Full Person Details:**
+
-
+
+
+**Pose Detection:**
+
+
+
+
+**Large Group:**
+
+
+
**Face Similarity Matching:**
Extracts all faces from provided input images,
@@ -112,11 +128,15 @@ and optionally matches detected face with database of known people to guess thei

+
+
**Face3D OpenGL Rendering:**
> [demo/face3d.html](demo/face3d.html)

+
+
**468-Point Face Mesh Defails:**
(view in full resolution to see keypoints)
diff --git a/demo/index.js b/demo/index.js
index 2ee43fc8..d9d124e1 100644
--- a/demo/index.js
+++ b/demo/index.js
@@ -77,6 +77,7 @@ const ui = {
modelsPreload: true, // preload human models on startup
modelsWarmup: true, // warmup human models on startup
buffered: true, // should output be buffered between frames
+ interpolated: true, // should output be interpolated for smoothness between frames
iconSize: '48px', // ui icon sizes
// internal variables
@@ -228,8 +229,12 @@ async function drawResults(input) {
}
// draw all results using interpolated results
- const interpolated = human.next(result);
- human.draw.all(canvas, interpolated, drawOptions);
+ if (ui.interpolated) {
+ const interpolated = human.next(result);
+ human.draw.all(canvas, interpolated, drawOptions);
+ } else {
+ human.draw.all(canvas, result, drawOptions);
+ }
/* alternatively use individual functions
human.draw.face(canvas, result.face);
human.draw.body(canvas, result.body);
@@ -246,20 +251,21 @@ async function drawResults(input) {
const gpu = engine.backendInstance ? `gpu: ${(engine.backendInstance.numBytesInGPU ? engine.backendInstance.numBytesInGPU : 0).toLocaleString()} bytes` : '';
const memory = `system: ${engine.state.numBytes.toLocaleString()} bytes ${gpu} | tensors: ${engine.state.numTensors.toLocaleString()}`;
const processing = result.canvas ? `processing: ${result.canvas.width} x ${result.canvas.height}` : '';
- const avgDetect = Math.trunc(10 * ui.detectFPS.reduce((a, b) => a + b, 0) / ui.detectFPS.length) / 10;
- const avgDraw = Math.trunc(10 * ui.drawFPS.reduce((a, b) => a + b, 0) / ui.drawFPS.length) / 10;
+ const avgDetect = ui.detectFPS.length > 0 ? Math.trunc(10 * ui.detectFPS.reduce((a, b) => a + b, 0) / ui.detectFPS.length) / 10 : 0;
+ const avgDraw = ui.drawFPS.length > 0 ? Math.trunc(10 * ui.drawFPS.reduce((a, b) => a + b, 0) / ui.drawFPS.length) / 10 : 0;
const warning = (ui.detectFPS.length > 5) && (avgDetect < 5) ? 'warning: your performance is low: try switching to higher performance backend, lowering resolution or disabling some models' : '';
+ const fps = avgDetect > 0 ? `FPS process:${avgDetect} refresh:${avgDraw}` : '';
document.getElementById('log').innerHTML = `
video: ${ui.camera.name} | facing: ${ui.camera.facing} | screen: ${window.innerWidth} x ${window.innerHeight} camera: ${ui.camera.width} x ${ui.camera.height} ${processing}
backend: ${human.tf.getBackend()} | ${memory}
- performance: ${str(lastDetectedResult.performance)}ms FPS process:${avgDetect} refresh:${avgDraw}
+ performance: ${str(lastDetectedResult.performance)}ms ${fps}
${warning}
`;
ui.framesDraw++;
ui.lastFrame = performance.now();
// if buffered, immediate loop but limit frame rate although it's going to run slower as JS is singlethreaded
if (ui.buffered) {
- ui.drawThread = requestAnimationFrame(() => drawResults(input, canvas));
+ ui.drawThread = requestAnimationFrame(() => drawResults(input));
} else {
log('stopping buffered refresh');
if (ui.drawThread) cancelAnimationFrame(ui.drawThread);
@@ -431,7 +437,7 @@ function runHumanDetect(input, canvas, timestamp) {
ctx.drawImage(input, 0, 0, canvas.width, canvas.height);
const data = ctx.getImageData(0, 0, canvas.width, canvas.height);
// perform detection in worker
- webWorker(input, data, canvas, userConfig, timestamp);
+ webWorker(input, data, canvas, timestamp);
status();
} else {
human.detect(input, userConfig).then((result) => {
@@ -457,32 +463,66 @@ function runHumanDetect(input, canvas, timestamp) {
}
// main processing function when input is image, can use direct invocation or web worker
-async function processImage(input) {
+async function processImage(input, title) {
return new Promise((resolve) => {
const image = new Image();
+ image.onerror = async () => status('image loading error');
image.onload = async () => {
- log('processing image:', encodeURI(image.src));
+ ui.interpolated = false; // stop interpolating results if input is image
+ status(`processing image: ${title}`);
const canvas = document.getElementById('canvas');
image.width = image.naturalWidth;
image.height = image.naturalHeight;
canvas.width = human.config.filter.width && human.config.filter.width > 0 ? human.config.filter.width : image.naturalWidth;
canvas.height = human.config.filter.height && human.config.filter.height > 0 ? human.config.filter.height : image.naturalHeight;
+ const origCacheSensitiry = userConfig.cacheSensitivity;
+ userConfig.cacheSensitivity = 0;
const result = await human.detect(image, userConfig);
+ userConfig.cacheSensitivity = origCacheSensitiry;
lastDetectedResult = result;
await drawResults(image);
const thumb = document.createElement('canvas');
thumb.className = 'thumbnail';
- thumb.width = window.innerWidth / (ui.columns + 0.1);
+ thumb.width = ui.columns > 1 ? window.innerWidth / (ui.columns + 0.1) : window.innerWidth - 14;
thumb.height = thumb.width * canvas.height / canvas.width;
if (result.face && result.face.length > 0) {
thumb.title = result.face.map((a, i) => `#${i} face: ${Math.trunc(100 * a.faceScore)}% box: ${Math.trunc(100 * a.boxScore)}% age: ${Math.trunc(a.age)} gender: ${Math.trunc(100 * a.genderScore)}% ${a.gender}`).join(' | ');
} else {
thumb.title = 'no face detected';
}
+ thumb.addEventListener('click', (evt) => {
+ const stdWidth = ui.columns > 1 ? window.innerWidth / (ui.columns + 0.1) : window.innerWidth - 14;
+ // zoom in/out on click
+ if (evt.target.style.width === `${stdWidth}px`) {
+ evt.target.style.width = '';
+ evt.target.style.height = `${document.getElementById('log').offsetTop - document.getElementById('media').offsetTop}px`;
+ } else {
+ evt.target.style.width = `${stdWidth}px`;
+ evt.target.style.height = '';
+ }
+ // copy to clipboard on click
+ if (typeof ClipboardItem !== 'undefined' && navigator.clipboard) {
+ evt.target.toBlob((blob) => {
+ // eslint-disable-next-line no-undef
+ const item = new ClipboardItem({ 'image/png': blob });
+ navigator.clipboard.write([item]);
+ log('copied image to clipboard');
+ });
+ }
+ });
const ctx = thumb.getContext('2d');
ctx.drawImage(canvas, 0, 0, canvas.width, canvas.height, 0, 0, thumb.width, thumb.height);
- document.getElementById('samples-container').appendChild(thumb);
- image.src = '';
+ const prev = document.getElementsByClassName('thumbnail');
+ if (prev && prev.length > 0) document.getElementById('samples-container').insertBefore(thumb, prev[0]);
+ else document.getElementById('samples-container').appendChild(thumb);
+
+ // finish up
+ status();
+ document.getElementById('play').style.display = 'none';
+ document.getElementById('loader').style.display = 'none';
+ if (ui.detectThread) cancelAnimationFrame(ui.detectThread);
+ if (ui.drawThread) cancelAnimationFrame(ui.drawThread);
+
resolve(true);
};
image.src = input;
@@ -522,11 +562,7 @@ async function detectSampleImages() {
status('processing images');
document.getElementById('samples-container').innerHTML = '';
for (const m of Object.values(menu)) m.hide();
- for (const image of ui.samples) await processImage(image);
- status();
- document.getElementById('play').style.display = 'none';
- document.getElementById('loader').style.display = 'none';
- if (ui.detectThread) cancelAnimationFrame(ui.detectThread);
+ for (const image of ui.samples) await processImage(image, image);
}
function setupMenu() {
@@ -604,8 +640,8 @@ function setupMenu() {
human.config.hand.rotation = val;
});
menu.process.addHTML('