mirror of https://github.com/vladmandic/human
69 lines
3.9 KiB
HTML
69 lines
3.9 KiB
HTML
<!DOCTYPE html>
|
|
<html lang="en">
|
|
<head>
|
|
<meta charset="utf-8">
|
|
<title>Human</title>
|
|
<meta name="viewport" content="width=device-width" id="viewport">
|
|
<meta name="keywords" content="Human">
|
|
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
|
<link rel="manifest" href="../manifest.webmanifest">
|
|
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
|
|
<style>
|
|
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
|
|
body { font-family: 'Lato', 'Segoe UI'; font-size: 16px; font-variant: small-caps; margin: 0; background: black; color: white; overflow: hidden; width: 100vw; height: 100vh; }
|
|
</style>
|
|
</head>
|
|
<body>
|
|
<canvas id="canvas" style="margin: 0 auto; width: 100%"></canvas>
|
|
<video id="video" playsinline style="display: none"></video>
|
|
<pre id="log" style="padding: 8px"></pre>
|
|
<script type="module">
|
|
import * as H from '../../dist/human.esm.js'; // equivalent of import @vladmandic/Human
|
|
|
|
const humanConfig = { // user configuration for human, used to fine-tune behavior
|
|
modelBasePath: '../../models',
|
|
filter: { enabled: true, equalization: false, flip: false },
|
|
face: { enabled: true, detector: { rotation: false }, mesh: { enabled: true }, attention: { enabled: false }, iris: { enabled: true }, description: { enabled: true }, emotion: { enabled: true } },
|
|
body: { enabled: true },
|
|
hand: { enabled: true },
|
|
object: { enabled: false },
|
|
gesture: { enabled: true },
|
|
};
|
|
const human = new H.Human(humanConfig); // create instance of human with overrides from user configuration
|
|
const video = document.getElementById('video');
|
|
const canvas = document.getElementById('canvas');
|
|
|
|
async function webCam() { // initialize webcam
|
|
const options = { audio: false, video: { facingMode: 'user', resizeMode: 'none', width: { ideal: document.body.clientWidth }, height: { ideal: document.body.clientHeight } } }; // set ideal webcam video properties
|
|
const stream = await navigator.mediaDevices.getUserMedia(options); // find webcam stream that best matches what we want
|
|
const videoReady = new Promise((resolve) => { video.onloadeddata = () => resolve(true); });
|
|
video.srcObject = stream; // assign webcam stream to a video element
|
|
video.play(); // start webcam
|
|
await videoReady; // wait for video ready
|
|
canvas.width = video.videoWidth; // set canvas resolution to input webcam native resolution
|
|
canvas.height = video.videoHeight;
|
|
canvas.onclick = () => { // pause when clicked on screen and resume on next click
|
|
if (video.paused) void video.play();
|
|
else video.pause();
|
|
};
|
|
}
|
|
|
|
async function drawLoop() { // main screen refresh loop
|
|
human.draw.canvas(video, canvas); // draw original video to screen canvas // better than using procesed image as this loop happens faster than processing loop
|
|
const interpolated = human.next(); // get smoothened result using last-known results
|
|
await human.draw.all(canvas, interpolated); // draw labels, boxes, lines, etc.
|
|
setTimeout(drawLoop, 30); // use to slow down refresh from max refresh rate to target of 30 fps
|
|
}
|
|
|
|
async function main() { // main entry point
|
|
document.getElementById('log').innerHTML = `human version: ${human.version} | tfjs version: ${human.tf.version['tfjs-core']}<br>platform: ${human.env.platform} | agent ${human.env.agent}<br>`;
|
|
await webCam(); // start webcam
|
|
human.video(video); // instruct human to detect video frames
|
|
await drawLoop(); // start draw loop
|
|
}
|
|
|
|
window.onload = main;
|
|
</script>
|
|
</body>
|
|
</html>
|