mirror of https://github.com/vladmandic/human
122 lines
5.4 KiB
HTML
122 lines
5.4 KiB
HTML
<!DOCTYPE html>
|
|
<html lang="en">
|
|
<head>
|
|
<meta charset="utf-8">
|
|
<title>Human</title>
|
|
<meta name="viewport" content="width=device-width" id="viewport">
|
|
<meta name="keywords" content="Human">
|
|
<meta name="application-name" content="Human">
|
|
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
|
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
|
<meta name="theme-color" content="#000000">
|
|
<link rel="manifest" href="../manifest.webmanifest">
|
|
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
|
|
<link rel="apple-touch-icon" href="../../assets/icon.png">
|
|
<style>
|
|
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
|
|
html { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; }
|
|
body { margin: 0; background: black; color: white; overflow-x: hidden; width: 100vw; height: 100vh; }
|
|
body::-webkit-scrollbar { display: none; }
|
|
</style>
|
|
</head>
|
|
<body>
|
|
<img id="image" src="../../samples/in/group-1.jpg" alt="test image" style="display: none">
|
|
<div id="log" style="position: absolute; top: 0; margin: 1rem; font-size: 1rem; line-height: 1.4rem; font-family: monospace;"></div>
|
|
<script type="module">
|
|
import Human from '../../dist/human.esm.js';
|
|
|
|
const loop = 20;
|
|
let initial = true;
|
|
const backends = ['wasm', 'webgl', 'humangl', 'webgpu'];
|
|
|
|
function str(...msg) {
|
|
if (!Array.isArray(msg)) return msg;
|
|
let line = '';
|
|
for (const entry of msg) {
|
|
if (typeof entry === 'object') line += JSON.stringify(entry).replace(/{|}|"|\[|\]/g, '').replace(/,/g, ', ');
|
|
else line += entry;
|
|
}
|
|
return line;
|
|
}
|
|
|
|
function log(...msg) {
|
|
const dt = new Date();
|
|
const ts = `${dt.getHours().toString().padStart(2, '0')}:${dt.getMinutes().toString().padStart(2, '0')}:${dt.getSeconds().toString().padStart(2, '0')}.${dt.getMilliseconds().toString().padStart(3, '0')}`;
|
|
// eslint-disable-next-line no-console
|
|
if (msg) console.log(ts, 'Human:', ...msg);
|
|
const el = document.getElementById('log');
|
|
el.innerHTML += str(...msg) + '<br>';
|
|
}
|
|
|
|
const myConfig = {
|
|
modelBasePath: 'https://vladmandic.github.io/human/models',
|
|
// wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.18.0/dist/',
|
|
debug: true,
|
|
async: true,
|
|
cacheSensitivity: 0,
|
|
filter: { enabled: false },
|
|
face: {
|
|
enabled: true,
|
|
detector: { enabled: true, rotation: false },
|
|
mesh: { enabled: true },
|
|
iris: { enabled: true },
|
|
description: { enabled: true },
|
|
emotion: { enabled: true },
|
|
antispoof: { enabled: true },
|
|
liveness: { enabled: true },
|
|
},
|
|
hand: { enabled: true },
|
|
body: { enabled: true },
|
|
object: { enabled: true },
|
|
};
|
|
|
|
async function benchmark(backend) {
|
|
myConfig.backend = backend;
|
|
const human = new Human(myConfig);
|
|
await human.init();
|
|
await human.load();
|
|
const loaded = Object.keys(human.models).filter((a) => human.models[a]);
|
|
if (initial) {
|
|
log('Human: ', human.version);
|
|
log('TFJS: ', human.tf.version.tfjs);
|
|
log('Environment: ', { platform: human.env.platform, agent: human.env.agent });
|
|
log('Backends: ', human.env.backends);
|
|
log('Support: ', { offscreen: human.env.offscreen, webgl: human.env.webgl.supported, webgpu: human.env.webgpu.supported, wasm: human.env.wasm.supported, simd: human.env.wasm.simd, multithread: human.env.wasm.multithread });
|
|
log('Models: ', loaded);
|
|
log('');
|
|
initial = false;
|
|
}
|
|
const element = document.getElementById('image');
|
|
const processed = await human.image(element);
|
|
const t0 = human.now();
|
|
await human.detect(processed.tensor, myConfig);
|
|
const t1 = human.now();
|
|
if (human.tf.getBackend().toLowerCase() === backend.toLowerCase()) {
|
|
log('Backend: ', human.tf.getBackend());
|
|
log('Memory state: ', human.tf.engine().memory());
|
|
log('Initial inference: ', Math.round(t1 - t0));
|
|
for (let i = 0; i < loop; i++) {
|
|
const image = await human.image(element);
|
|
human.tf.dispose(image.tensor);
|
|
}
|
|
const t2 = human.now();
|
|
log('Input processing: ', Math.round(10 * (t2 - t1) / loop) / 10);
|
|
for (let i = 0; i < loop; i++) await human.detect(processed.tensor, myConfig);
|
|
const t3 = human.now();
|
|
log('Average inference: ', Math.round((t3 - t1) / loop));
|
|
} else {
|
|
log('Backend error: ', { desired: backend, current: human.tf.getBackend().toLowerCase() });
|
|
}
|
|
log('');
|
|
}
|
|
|
|
log('Attempting backends: ', backends);
|
|
async function main() {
|
|
for (const backend of backends) await benchmark(backend);
|
|
}
|
|
|
|
window.onload = main;
|
|
</script>
|
|
</body>
|
|
</html>
|