mirror of https://github.com/vladmandic/human
add facedetect demo and fix model async load
parent
9f24aad194
commit
55efcafc0f
15
README.md
15
README.md
|
@ -70,6 +70,7 @@
|
|||
- **Full** [[*Live*]](https://vladmandic.github.io/human/demo/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo): Main browser demo app that showcases all Human capabilities
|
||||
- **Simple** [[*Live*]](https://vladmandic.github.io/human/demo/typescript/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/typescript): Simple demo in WebCam processing demo in TypeScript
|
||||
- **Embedded** [[*Live*]](https://vladmandic.github.io/human/demo/video/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/video/index.html): Even simpler demo with tiny code embedded in HTML file
|
||||
- **Face Detect** [[*Live*]](https://vladmandic.github.io/human/demo/facedetect/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facedetect): Extract faces from images and processes details
|
||||
- **Face Match** [[*Live*]](https://vladmandic.github.io/human/demo/facematch/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/facematch): Extract faces from images, calculates face descriptors and similarities and matches them to known database
|
||||
- **Face ID** [[*Live*]](https://vladmandic.github.io/human/demo/faceid/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/faceid): Runs multiple checks to validate webcam input before performing face match to faces in IndexDB
|
||||
- **Multi-thread** [[*Live*]](https://vladmandic.github.io/human/demo/multithread/index.html) [[*Details*]](https://github.com/vladmandic/human/tree/main/demo/multithread): Runs each Human module in a separate web worker for highest possible performance
|
||||
|
@ -173,7 +174,13 @@ and optionally matches detected face with database of known people to guess thei
|
|||
|
||||
[<img src="assets/screenshot-facematch.jpg" width="640"/>](assets/screenshot-facematch.jpg)
|
||||
|
||||
2. **Face ID:**
|
||||
2. **Face Detect:**
|
||||
Extracts all detect faces from loaded images on-demand and highlights face details on a selected face
|
||||
> [demo/facedetect](demo/facedetect/index.html)
|
||||
|
||||
[<img src="assets/screenshot-facedetect.jpg" width="640"/>](assets/screenshot-facedetect.jpg)
|
||||
|
||||
3. **Face ID:**
|
||||
Performs validation check on a webcam input to detect a real face and matches it to known faces stored in database
|
||||
> [demo/faceid](demo/faceid/index.html)
|
||||
|
||||
|
@ -181,7 +188,7 @@ Performs validation check on a webcam input to detect a real face and matches it
|
|||
|
||||
<br>
|
||||
|
||||
3. **3D Rendering:**
|
||||
4. **3D Rendering:**
|
||||
> [human-motion](https://github.com/vladmandic/human-motion)
|
||||
|
||||
[<img src="https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-face.jpg" width="640"/>](https://github.com/vladmandic/human-motion/raw/main/assets/screenshot-face.jpg)
|
||||
|
@ -190,14 +197,14 @@ Performs validation check on a webcam input to detect a real face and matches it
|
|||
|
||||
<br>
|
||||
|
||||
4. **VR Model Tracking:**
|
||||
5. **VR Model Tracking:**
|
||||
> [human-three-vrm](https://github.com/vladmandic/human-three-vrm)
|
||||
> [human-bjs-vrm](https://github.com/vladmandic/human-bjs-vrm)
|
||||
|
||||
[<img src="https://github.com/vladmandic/human-three-vrm/raw/main/assets/human-vrm-screenshot.jpg" width="640"/>](https://github.com/vladmandic/human-three-vrm/raw/main/assets/human-vrm-screenshot.jpg)
|
||||
|
||||
|
||||
5. **Human as OS native application:**
|
||||
6. **Human as OS native application:**
|
||||
> [human-electron](https://github.com/vladmandic/human-electron)
|
||||
|
||||
<br>
|
||||
|
|
4
TODO.md
4
TODO.md
|
@ -63,6 +63,8 @@ Optimizations:
|
|||
`Human` is now 30% smaller :)
|
||||
As usual, `Human` has **zero** runtime dependencies,
|
||||
all *devDependencies* are only to rebuild `Human` itself
|
||||
- Default hand skeleton model changed from `handlandmark-full` to `handlandmark-lite`
|
||||
Both models are still supported, this reduces default size and increases performance
|
||||
|
||||
Features:
|
||||
- Add [draw label templates](https://github.com/vladmandic/human/wiki/Draw)
|
||||
|
@ -88,6 +90,7 @@ Architecture:
|
|||
Better [TypeDoc specs](https://vladmandic.github.io/human/typedoc/index.html)
|
||||
- Add named export for improved bundler support when using non-default imports
|
||||
- Cleanup Git history for `dist`/`typedef`/`types`
|
||||
- Cleanup `@vladmandic/human-models`
|
||||
- Support for **NodeJS v19**
|
||||
- Upgrade to **TypeScript 4.9**
|
||||
|
||||
|
@ -97,3 +100,4 @@ Breaking changes:
|
|||
- Moved `human.similarity`, `human.distance` and `human.match` to namespace `human.match.*`
|
||||
- Obsolete `human.enhance()`
|
||||
- Obsolete `human.gl`
|
||||
- Renamed model `mb3-centernet` to `centernet`
|
||||
|
|
Binary file not shown.
After Width: | Height: | Size: 74 KiB |
|
@ -0,0 +1,104 @@
|
|||
/**
|
||||
* Human demo for browsers
|
||||
*
|
||||
* Demo for face detection
|
||||
*/
|
||||
|
||||
/** @type {Human} */
|
||||
import { Human } from '../../dist/human.esm.js';
|
||||
import { showLoader, hideLoader } from './loader.js';
|
||||
|
||||
const humanConfig = { // user configuration for human, used to fine-tune behavior
|
||||
debug: true,
|
||||
modelBasePath: 'https://vladmandic.github.io/human-models/models/',
|
||||
filter: { enabled: true, equalization: false, flip: false },
|
||||
face: {
|
||||
enabled: true,
|
||||
detector: { rotation: true, maxDetected: 100, minConfidence: 0.2, return: true },
|
||||
iris: { enabled: true },
|
||||
description: { enabled: true },
|
||||
emotion: { enabled: true },
|
||||
antispoof: { enabled: true },
|
||||
liveness: { enabled: true },
|
||||
},
|
||||
body: { enabled: false },
|
||||
hand: { enabled: false },
|
||||
object: { enabled: false },
|
||||
gesture: { enabled: false },
|
||||
segmentation: { enabled: false },
|
||||
};
|
||||
|
||||
const human = new Human(humanConfig); // new instance of human
|
||||
|
||||
async function addFaces(imgEl) {
|
||||
showLoader('human: busy');
|
||||
const faceEl = document.getElementById('faces');
|
||||
faceEl.innerHTML = '';
|
||||
const res = await human.detect(imgEl);
|
||||
for (const face of res.face) {
|
||||
const canvas = document.createElement('canvas');
|
||||
const emotion = face.emotion?.map((e) => `${Math.round(100 * e.score)}% ${e.emotion}`) || [];
|
||||
canvas.title = `
|
||||
source: ${imgEl.src.substring(0, 64)}
|
||||
score: ${Math.round(100 * face.boxScore)}% detection ${Math.round(100 * face.faceScore)}% analysis
|
||||
age: ${face.age} years
|
||||
gender: ${face.gender} score ${Math.round(100 * face.genderScore)}%
|
||||
emotion: ${emotion.join(' | ')}
|
||||
check: ${Math.round(100 * face.real)}% real ${Math.round(100 * face.live)}% live
|
||||
`.replace(/ /g, ' ');
|
||||
canvas.onclick = (e) => {
|
||||
e.preventDefault();
|
||||
document.getElementById('description').innerHTML = canvas.title;
|
||||
};
|
||||
human.tf.browser.toPixels(face.tensor, canvas);
|
||||
human.tf.dispose(face.tensor);
|
||||
faceEl?.appendChild(canvas);
|
||||
}
|
||||
hideLoader();
|
||||
}
|
||||
|
||||
function addImage(imageUri) {
|
||||
const imgEl = new Image(256, 256);
|
||||
imgEl.onload = () => {
|
||||
const images = document.getElementById('images');
|
||||
images.appendChild(imgEl); // add image if loaded ok
|
||||
images.scroll(images?.offsetWidth, 0);
|
||||
};
|
||||
imgEl.onerror = () => console.error('addImage', { imageUri }); // eslint-disable-line no-console
|
||||
imgEl.onclick = () => addFaces(imgEl);
|
||||
imgEl.title = imageUri.substring(0, 64);
|
||||
imgEl.src = encodeURI(imageUri);
|
||||
}
|
||||
|
||||
async function initDragAndDrop() {
|
||||
const reader = new FileReader();
|
||||
reader.onload = async (e) => {
|
||||
if (e.target.result.startsWith('data:image')) await addImage(e.target.result);
|
||||
};
|
||||
document.body.addEventListener('dragenter', (evt) => evt.preventDefault());
|
||||
document.body.addEventListener('dragleave', (evt) => evt.preventDefault());
|
||||
document.body.addEventListener('dragover', (evt) => evt.preventDefault());
|
||||
document.body.addEventListener('drop', async (evt) => {
|
||||
evt.preventDefault();
|
||||
evt.dataTransfer.dropEffect = 'copy';
|
||||
for (const f of evt.dataTransfer.files) reader.readAsDataURL(f);
|
||||
});
|
||||
document.body.onclick = (e) => {
|
||||
if (e.target.localName !== 'canvas') document.getElementById('description').innerHTML = '';
|
||||
};
|
||||
}
|
||||
|
||||
async function main() {
|
||||
showLoader('loading models');
|
||||
await human.load();
|
||||
showLoader('compiling models');
|
||||
await human.warmup();
|
||||
showLoader('loading images');
|
||||
const images = ['group-1.jpg', 'group-2.jpg', 'group-3.jpg', 'group-4.jpg', 'group-5.jpg', 'group-6.jpg', 'group-7.jpg', 'solvay1927.jpg', 'stock-group-1.jpg', 'stock-group-2.jpg'];
|
||||
const imageUris = images.map((a) => `../../samples/in/${a}`);
|
||||
for (let i = 0; i < imageUris.length; i++) addImage(imageUris[i]);
|
||||
initDragAndDrop();
|
||||
hideLoader();
|
||||
}
|
||||
|
||||
window.onload = main;
|
|
@ -0,0 +1,42 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>Human</title>
|
||||
<!-- <meta http-equiv="content-type" content="text/html; charset=utf-8"> -->
|
||||
<meta name="viewport" content="width=device-width, shrink-to-fit=yes">
|
||||
<meta name="keywords" content="Human">
|
||||
<meta name="application-name" content="Human">
|
||||
<meta name="description" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||
<meta name="msapplication-tooltip" content="Human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition; Author: Vladimir Mandic <https://github.com/vladmandic>">
|
||||
<meta name="theme-color" content="#000000">
|
||||
<link rel="manifest" href="../manifest.webmanifest">
|
||||
<link rel="shortcut icon" href="../../favicon.ico" type="image/x-icon">
|
||||
<link rel="apple-touch-icon" href="../../assets/icon.png">
|
||||
<script src="./facedetect.js" type="module"></script>
|
||||
<style>
|
||||
img { object-fit: contain; }
|
||||
@font-face { font-family: 'Lato'; font-display: swap; font-style: normal; font-weight: 100; src: local('Lato'), url('../../assets/lato-light.woff2') }
|
||||
html { font-family: 'Lato', 'Segoe UI'; font-size: 24px; font-variant: small-caps; }
|
||||
body { margin: 24px; background: black; color: white; overflow: hidden; text-align: -webkit-center; width: 100vw; height: 100vh; }
|
||||
::-webkit-scrollbar { height: 8px; border: 0; border-radius: 0; }
|
||||
::-webkit-scrollbar-thumb { background: grey }
|
||||
::-webkit-scrollbar-track { margin: 3px; }
|
||||
canvas { width: 192px; height: 192px; margin: 2px; padding: 2px; cursor: grab; transform: scale(1.00); transition : all 0.3s ease; }
|
||||
canvas:hover { filter: grayscale(1); transform: scale(1.08); transition : all 0.3s ease; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<component-loader></component-loader>
|
||||
<div style="display: flex">
|
||||
<div>
|
||||
<div style="margin: 24px">select image to show detected faces<br>drag & drop to add your images</div>
|
||||
<div id="images" style="display: flex; width: 98vw; overflow-x: auto; overflow-y: hidden; scroll-behavior: smooth"></div>
|
||||
</div>
|
||||
</div>
|
||||
<div id="list" style="height: 10px"></div>
|
||||
<div style="margin: 24px">hover or click on face to show details</div>
|
||||
<div id="faces" style="overflow-y: auto"></div>
|
||||
<div id="description" style="white-space: pre;"></div>
|
||||
</body>
|
||||
</html>
|
|
@ -0,0 +1,43 @@
|
|||
let loader;
|
||||
|
||||
export const showLoader = (msg) => { loader.setAttribute('msg', msg); loader.style.display = 'block'; };
|
||||
export const hideLoader = () => loader.style.display = 'none';
|
||||
|
||||
class ComponentLoader extends HTMLElement { // watch for attributes
|
||||
message = document.createElement('div');
|
||||
|
||||
static get observedAttributes() { return ['msg']; }
|
||||
|
||||
attributeChangedCallback(_name, _prevVal, currVal) {
|
||||
this.message.innerHTML = currVal;
|
||||
}
|
||||
|
||||
connectedCallback() { // triggered on insert
|
||||
this.attachShadow({ mode: 'open' });
|
||||
const css = document.createElement('style');
|
||||
css.innerHTML = `
|
||||
.loader-container { top: 450px; justify-content: center; position: fixed; width: 100%; }
|
||||
.loader-message { font-size: 1.5rem; padding: 1rem; }
|
||||
.loader { width: 300px; height: 300px; border: 3px solid transparent; border-radius: 50%; border-top: 4px solid #f15e41; animation: spin 4s linear infinite; position: relative; }
|
||||
.loader::before, .loader::after { content: ""; position: absolute; top: 6px; bottom: 6px; left: 6px; right: 6px; border-radius: 50%; border: 4px solid transparent; }
|
||||
.loader::before { border-top-color: #bad375; animation: 3s spin linear infinite; }
|
||||
.loader::after { border-top-color: #26a9e0; animation: spin 1.5s linear infinite; }
|
||||
@keyframes spin { from { transform: rotate(0deg); } to { transform: rotate(360deg); } }
|
||||
`;
|
||||
const container = document.createElement('div');
|
||||
container.id = 'loader-container';
|
||||
container.className = 'loader-container';
|
||||
loader = document.createElement('div');
|
||||
loader.id = 'loader';
|
||||
loader.className = 'loader';
|
||||
this.message.id = 'loader-message';
|
||||
this.message.className = 'loader-message';
|
||||
this.message.innerHTML = '';
|
||||
container.appendChild(this.message);
|
||||
container.appendChild(loader);
|
||||
this.shadowRoot?.append(css, container);
|
||||
loader = this;
|
||||
}
|
||||
}
|
||||
|
||||
customElements.define('component-loader', ComponentLoader);
|
|
@ -64,7 +64,7 @@ async function loadFaceMatchDB() {
|
|||
}
|
||||
}
|
||||
|
||||
async function SelectFaceCanvas(face) {
|
||||
async function selectFaceCanvas(face) {
|
||||
// if we have face image tensor, enhance it and display it
|
||||
let embedding;
|
||||
document.getElementById('orig').style.filter = 'blur(16px)';
|
||||
|
@ -72,19 +72,6 @@ async function SelectFaceCanvas(face) {
|
|||
title('Sorting Faces by Similarity');
|
||||
const c = document.getElementById('orig');
|
||||
await human.tf.browser.toPixels(face.tensor, c);
|
||||
/*
|
||||
const enhanced = human.enhance(face);
|
||||
if (enhanced) {
|
||||
const c = document.getElementById('orig');
|
||||
const squeeze = human.tf.squeeze(enhanced);
|
||||
const normalize = human.tf.div(squeeze, 255);
|
||||
await human.tf.browser.toPixels(normalize, c);
|
||||
human.tf.dispose([enhanced, squeeze, normalize]);
|
||||
const ctx = c.getContext('2d');
|
||||
ctx.font = 'small-caps 0.4rem "Lato"';
|
||||
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
|
||||
}
|
||||
*/
|
||||
const arr = db.map((rec) => rec.embedding);
|
||||
const res = await human.match.find(face.embedding, arr);
|
||||
log('Match:', db[res.index].name);
|
||||
|
@ -139,7 +126,7 @@ async function SelectFaceCanvas(face) {
|
|||
title('Selected Face');
|
||||
}
|
||||
|
||||
async function AddFaceCanvas(index, res, fileName) {
|
||||
async function addFaceCanvas(index, res, fileName) {
|
||||
all[index] = res.face;
|
||||
for (const i in res.face) {
|
||||
if (!res.face[i].tensor) continue; // did not get valid results
|
||||
|
@ -160,7 +147,7 @@ async function AddFaceCanvas(index, res, fileName) {
|
|||
`.replace(/ /g, ' ');
|
||||
await human.tf.browser.toPixels(res.face[i].tensor, canvas);
|
||||
const ctx = canvas.getContext('2d');
|
||||
if (!ctx) return false;
|
||||
if (!ctx) return;
|
||||
ctx.font = 'small-caps 0.8rem "Lato"';
|
||||
ctx.fillStyle = 'rgba(255, 255, 255, 1)';
|
||||
ctx.fillText(`${res.face[i].age}y ${(100 * (res.face[i].genderScore || 0)).toFixed(1)}% ${res.face[i].gender}`, 4, canvas.height - 6);
|
||||
|
@ -171,12 +158,12 @@ async function AddFaceCanvas(index, res, fileName) {
|
|||
document.getElementById('faces').appendChild(canvas);
|
||||
canvas.addEventListener('click', (evt) => {
|
||||
log('Select:', 'Image:', evt.target.tag.sample, 'Face:', evt.target.tag.face, 'Source:', evt.target.tag.source, all[evt.target.tag.sample][evt.target.tag.face]);
|
||||
SelectFaceCanvas(all[evt.target.tag.sample][evt.target.tag.face]);
|
||||
selectFaceCanvas(all[evt.target.tag.sample][evt.target.tag.face]);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
async function AddImageElement(index, image, length) {
|
||||
async function addImageElement(index, image, length) {
|
||||
const faces = all.reduce((prev, curr) => prev += curr.length, 0);
|
||||
title(`Analyzing Input Images<br> ${Math.round(100 * index / length)}% [${index} / ${length}]<br>Found ${faces} Faces`);
|
||||
return new Promise((resolve) => {
|
||||
|
@ -185,7 +172,7 @@ async function AddImageElement(index, image, length) {
|
|||
document.getElementById('images').appendChild(img); // and finally we can add it
|
||||
human.detect(img, userConfig)
|
||||
.then((res) => { // eslint-disable-line promise/always-return
|
||||
AddFaceCanvas(index, res, image); // then wait until image is analyzed
|
||||
addFaceCanvas(index, res, image); // then wait until image is analyzed
|
||||
resolve(true);
|
||||
})
|
||||
.catch(() => log('human detect error'));
|
||||
|
@ -226,18 +213,23 @@ async function main() {
|
|||
// could not dynamically enumerate images so using static list
|
||||
if (images.length === 0) {
|
||||
images = [
|
||||
'ai-body.jpg', 'solvay1927.jpg', 'ai-upper.jpg',
|
||||
'person-carolina.jpg', 'person-celeste.jpg', 'person-leila1.jpg', 'person-leila2.jpg', 'person-lexi.jpg', 'person-linda.jpg', 'person-nicole.jpg', 'person-tasia.jpg',
|
||||
'person-tetiana.jpg', 'person-vlado1.jpg', 'person-vlado5.jpg', 'person-vlado.jpg', 'person-christina.jpg', 'person-lauren.jpg',
|
||||
'ai-face.jpg', 'ai-upper.jpg', 'ai-body.jpg', 'solvay1927.jpg',
|
||||
'group-1.jpg', 'group-2.jpg', 'group-3.jpg', 'group-4.jpg', 'group-5.jpg', 'group-6.jpg', 'group-7.jpg',
|
||||
'daz3d-brianna.jpg', 'daz3d-chiyo.jpg', 'daz3d-cody.jpg', 'daz3d-drew-01.jpg', 'daz3d-drew-02.jpg', 'daz3d-ella-01.jpg', 'daz3d-ella-02.jpg', 'daz3d-gillian.jpg',
|
||||
'daz3d-hye-01.jpg', 'daz3d-hye-02.jpg', 'daz3d-kaia.jpg', 'daz3d-karen.jpg', 'daz3d-kiaria-01.jpg', 'daz3d-kiaria-02.jpg', 'daz3d-lilah-01.jpg', 'daz3d-lilah-02.jpg',
|
||||
'daz3d-lilah-03.jpg', 'daz3d-lila.jpg', 'daz3d-lindsey.jpg', 'daz3d-megah.jpg', 'daz3d-selina-01.jpg', 'daz3d-selina-02.jpg', 'daz3d-snow.jpg',
|
||||
'daz3d-sunshine.jpg', 'daz3d-taia.jpg', 'daz3d-tuesday-01.jpg', 'daz3d-tuesday-02.jpg', 'daz3d-tuesday-03.jpg', 'daz3d-zoe.jpg', 'daz3d-ginnifer.jpg',
|
||||
'daz3d-_emotions01.jpg', 'daz3d-_emotions02.jpg', 'daz3d-_emotions03.jpg', 'daz3d-_emotions04.jpg', 'daz3d-_emotions05.jpg',
|
||||
'person-celeste.jpg', 'person-christina.jpg', 'person-lauren.jpg', 'person-lexi.jpg', 'person-linda.jpg', 'person-nicole.jpg', 'person-tasia.jpg', 'person-tetiana.jpg', 'person-vlado.jpg', 'person-vlado1.jpg', 'person-vlado5.jpg',
|
||||
'stock-group-1.jpg', 'stock-group-2.jpg',
|
||||
'stock-models-1.jpg', 'stock-models-2.jpg', 'stock-models-3.jpg', 'stock-models-4.jpg', 'stock-models-5.jpg', 'stock-models-6.jpg', 'stock-models-7.jpg', 'stock-models-8.jpg', 'stock-models-9.jpg',
|
||||
'stock-teen-1.jpg', 'stock-teen-2.jpg', 'stock-teen-3.jpg', 'stock-teen-4.jpg', 'stock-teen-5.jpg', 'stock-teen-6.jpg', 'stock-teen-7.jpg', 'stock-teen-8.jpg',
|
||||
'stock-models-10.jpg', 'stock-models-11.jpg', 'stock-models-12.jpg', 'stock-models-13.jpg', 'stock-models-14.jpg', 'stock-models-15.jpg', 'stock-models-16.jpg',
|
||||
'cgi-model-1.jpg', 'cgi-model-2.jpg', 'cgi-model-3.jpg', 'cgi-model-4.jpg', 'cgi-model-5.jpg', 'cgi-model-6.jpg', 'cgi-model-7.jpg', 'cgi-model-8.jpg', 'cgi-model-9.jpg',
|
||||
'cgi-model-10.jpg', 'cgi-model-11.jpg', 'cgi-model-12.jpg', 'cgi-model-13.jpg', 'cgi-model-14.jpg', 'cgi-model-15.jpg', 'cgi-model-18.jpg', 'cgi-model-19.jpg',
|
||||
'cgi-model-20.jpg', 'cgi-model-21.jpg', 'cgi-model-22.jpg', 'cgi-model-23.jpg', 'cgi-model-24.jpg', 'cgi-model-25.jpg', 'cgi-model-26.jpg', 'cgi-model-27.jpg', 'cgi-model-28.jpg', 'cgi-model-29.jpg',
|
||||
'cgi-model-30.jpg', 'cgi-model-31.jpg', 'cgi-model-33.jpg', 'cgi-model-34.jpg',
|
||||
'cgi-multiangle-1.jpg', 'cgi-multiangle-2.jpg', 'cgi-multiangle-3.jpg', 'cgi-multiangle-4.jpg', 'cgi-multiangle-6.jpg', 'cgi-multiangle-7.jpg', 'cgi-multiangle-8.jpg', 'cgi-multiangle-9.jpg', 'cgi-multiangle-10.jpg', 'cgi-multiangle-11.jpg',
|
||||
'stock-emotions-a-1.jpg', 'stock-emotions-a-2.jpg', 'stock-emotions-a-3.jpg', 'stock-emotions-a-4.jpg', 'stock-emotions-a-5.jpg', 'stock-emotions-a-6.jpg', 'stock-emotions-a-7.jpg', 'stock-emotions-a-8.jpg',
|
||||
'stock-emotions-b-1.jpg', 'stock-emotions-b-2.jpg', 'stock-emotions-b-3.jpg', 'stock-emotions-b-4.jpg', 'stock-emotions-b-5.jpg', 'stock-emotions-b-6.jpg', 'stock-emotions-b-7.jpg', 'stock-emotions-b-8.jpg',
|
||||
];
|
||||
// add prefix for gitpages
|
||||
images = images.map((a) => `/human/samples/in/${a}`);
|
||||
images = images.map((a) => `../../samples/in/${a}`);
|
||||
log('Adding static image list:', images);
|
||||
} else {
|
||||
log('Discovered images:', images);
|
||||
|
@ -246,7 +238,7 @@ async function main() {
|
|||
// images = ['/samples/in/person-lexi.jpg', '/samples/in/person-carolina.jpg', '/samples/in/solvay1927.jpg'];
|
||||
|
||||
const t0 = human.now();
|
||||
for (let i = 0; i < images.length; i++) await AddImageElement(i, images[i], images.length);
|
||||
for (let i = 0; i < images.length; i++) await addImageElement(i, images[i], images.length);
|
||||
const t1 = human.now();
|
||||
|
||||
// print stats
|
||||
|
|
|
@ -247,7 +247,7 @@ var config = {
|
|||
modelPath: "handtrack.json"
|
||||
},
|
||||
skeleton: {
|
||||
modelPath: "handlandmark-full.json"
|
||||
modelPath: "handlandmark-lite.json"
|
||||
}
|
||||
},
|
||||
object: {
|
||||
|
@ -13761,9 +13761,11 @@ var Models = class {
|
|||
m.selfie = this.instance.config.segmentation.enabled && !this.models.selfie && ((_y = this.instance.config.segmentation.modelPath) == null ? void 0 : _y.includes("selfie")) ? load21(this.instance.config) : null;
|
||||
m.meet = this.instance.config.segmentation.enabled && !this.models.meet && ((_z = this.instance.config.segmentation.modelPath) == null ? void 0 : _z.includes("meet")) ? load16(this.instance.config) : null;
|
||||
m.rvm = this.instance.config.segmentation.enabled && !this.models.rvm && ((_A = this.instance.config.segmentation.modelPath) == null ? void 0 : _A.includes("rvm")) ? load20(this.instance.config) : null;
|
||||
await Promise.all([...Object.values(m)]);
|
||||
for (const model23 of Object.keys(m))
|
||||
this.models[model23] = m[model23] || this.models[model23] || null;
|
||||
for (const [model23, promise] of Object.entries(m)) {
|
||||
if (promise == null ? void 0 : promise["then"])
|
||||
promise["then"]((val) => this.models[model23] = val);
|
||||
}
|
||||
await Promise.all(Object.values(m));
|
||||
}
|
||||
list() {
|
||||
const models3 = Object.keys(this.models).map((model23) => {
|
||||
|
@ -14685,7 +14687,9 @@ async function runCompile(instance) {
|
|||
tfjs_esm_exports.env().set("ENGINE_COMPILE_ONLY", true);
|
||||
const numTensorsStart = tfjs_esm_exports.engine().state.numTensors;
|
||||
const compiledModels = [];
|
||||
for (const [modelName, model23] of Object.entries(instance.models).filter(([key, val]) => key !== null && val !== null)) {
|
||||
for (const [modelName, model23] of Object.entries(instance.models.models)) {
|
||||
if (!model23)
|
||||
continue;
|
||||
const shape = (model23 == null ? void 0 : model23.modelSignature) && ((_b = (_a = model23 == null ? void 0 : model23.inputs) == null ? void 0 : _a[0]) == null ? void 0 : _b.shape) ? [...model23.inputs[0].shape] : [1, 64, 64, 3];
|
||||
const dtype = (model23 == null ? void 0 : model23.modelSignature) && ((_d = (_c = model23 == null ? void 0 : model23.inputs) == null ? void 0 : _c[0]) == null ? void 0 : _d.dtype) ? model23.inputs[0].dtype : "float32";
|
||||
for (let dim = 0; dim < shape.length; dim++) {
|
||||
|
|
|
@ -30888,7 +30888,7 @@ var config = {
|
|||
modelPath: "handtrack.json"
|
||||
},
|
||||
skeleton: {
|
||||
modelPath: "handlandmark-full.json"
|
||||
modelPath: "handlandmark-lite.json"
|
||||
}
|
||||
},
|
||||
object: {
|
||||
|
@ -44402,9 +44402,11 @@ var Models = class {
|
|||
m.selfie = this.instance.config.segmentation.enabled && !this.models.selfie && ((_y = this.instance.config.segmentation.modelPath) == null ? void 0 : _y.includes("selfie")) ? load21(this.instance.config) : null;
|
||||
m.meet = this.instance.config.segmentation.enabled && !this.models.meet && ((_z2 = this.instance.config.segmentation.modelPath) == null ? void 0 : _z2.includes("meet")) ? load16(this.instance.config) : null;
|
||||
m.rvm = this.instance.config.segmentation.enabled && !this.models.rvm && ((_A2 = this.instance.config.segmentation.modelPath) == null ? void 0 : _A2.includes("rvm")) ? load20(this.instance.config) : null;
|
||||
await Promise.all([...Object.values(m)]);
|
||||
for (const model23 of Object.keys(m))
|
||||
this.models[model23] = m[model23] || this.models[model23] || null;
|
||||
for (const [model23, promise] of Object.entries(m)) {
|
||||
if (promise == null ? void 0 : promise["then"])
|
||||
promise["then"]((val) => this.models[model23] = val);
|
||||
}
|
||||
await Promise.all(Object.values(m));
|
||||
}
|
||||
list() {
|
||||
const models3 = Object.keys(this.models).map((model23) => {
|
||||
|
@ -45326,7 +45328,9 @@ async function runCompile(instance) {
|
|||
O().set("ENGINE_COMPILE_ONLY", true);
|
||||
const numTensorsStart = cr().state.numTensors;
|
||||
const compiledModels = [];
|
||||
for (const [modelName, model23] of Object.entries(instance.models).filter(([key, val]) => key !== null && val !== null)) {
|
||||
for (const [modelName, model23] of Object.entries(instance.models.models)) {
|
||||
if (!model23)
|
||||
continue;
|
||||
const shape = (model23 == null ? void 0 : model23.modelSignature) && ((_b2 = (_a2 = model23 == null ? void 0 : model23.inputs) == null ? void 0 : _a2[0]) == null ? void 0 : _b2.shape) ? [...model23.inputs[0].shape] : [1, 64, 64, 3];
|
||||
const dtype = (model23 == null ? void 0 : model23.modelSignature) && ((_d2 = (_c2 = model23 == null ? void 0 : model23.inputs) == null ? void 0 : _c2[0]) == null ? void 0 : _d2.dtype) ? model23.inputs[0].dtype : "float32";
|
||||
for (let dim = 0; dim < shape.length; dim++) {
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -286,7 +286,7 @@ var config = {
|
|||
modelPath: "handtrack.json"
|
||||
},
|
||||
skeleton: {
|
||||
modelPath: "handlandmark-full.json"
|
||||
modelPath: "handlandmark-lite.json"
|
||||
}
|
||||
},
|
||||
object: {
|
||||
|
@ -13862,9 +13862,11 @@ var Models = class {
|
|||
m.selfie = this.instance.config.segmentation.enabled && !this.models.selfie && ((_y = this.instance.config.segmentation.modelPath) == null ? void 0 : _y.includes("selfie")) ? load21(this.instance.config) : null;
|
||||
m.meet = this.instance.config.segmentation.enabled && !this.models.meet && ((_z = this.instance.config.segmentation.modelPath) == null ? void 0 : _z.includes("meet")) ? load16(this.instance.config) : null;
|
||||
m.rvm = this.instance.config.segmentation.enabled && !this.models.rvm && ((_A = this.instance.config.segmentation.modelPath) == null ? void 0 : _A.includes("rvm")) ? load20(this.instance.config) : null;
|
||||
await Promise.all([...Object.values(m)]);
|
||||
for (const model23 of Object.keys(m))
|
||||
this.models[model23] = m[model23] || this.models[model23] || null;
|
||||
for (const [model23, promise] of Object.entries(m)) {
|
||||
if (promise == null ? void 0 : promise["then"])
|
||||
promise["then"]((val) => this.models[model23] = val);
|
||||
}
|
||||
await Promise.all(Object.values(m));
|
||||
}
|
||||
list() {
|
||||
const models3 = Object.keys(this.models).map((model23) => {
|
||||
|
@ -14789,7 +14791,9 @@ async function runCompile(instance) {
|
|||
tf37.env().set("ENGINE_COMPILE_ONLY", true);
|
||||
const numTensorsStart = tf37.engine().state.numTensors;
|
||||
const compiledModels = [];
|
||||
for (const [modelName, model23] of Object.entries(instance.models).filter(([key, val]) => key !== null && val !== null)) {
|
||||
for (const [modelName, model23] of Object.entries(instance.models.models)) {
|
||||
if (!model23)
|
||||
continue;
|
||||
const shape = (model23 == null ? void 0 : model23.modelSignature) && ((_b = (_a = model23 == null ? void 0 : model23.inputs) == null ? void 0 : _a[0]) == null ? void 0 : _b.shape) ? [...model23.inputs[0].shape] : [1, 64, 64, 3];
|
||||
const dtype = (model23 == null ? void 0 : model23.modelSignature) && ((_d = (_c = model23 == null ? void 0 : model23.inputs) == null ? void 0 : _c[0]) == null ? void 0 : _d.dtype) ? model23.inputs[0].dtype : "float32";
|
||||
for (let dim = 0; dim < shape.length; dim++) {
|
||||
|
|
|
@ -288,7 +288,7 @@ var config = {
|
|||
modelPath: "handtrack.json"
|
||||
},
|
||||
skeleton: {
|
||||
modelPath: "handlandmark-full.json"
|
||||
modelPath: "handlandmark-lite.json"
|
||||
}
|
||||
},
|
||||
object: {
|
||||
|
@ -13864,9 +13864,11 @@ var Models = class {
|
|||
m.selfie = this.instance.config.segmentation.enabled && !this.models.selfie && ((_y = this.instance.config.segmentation.modelPath) == null ? void 0 : _y.includes("selfie")) ? load21(this.instance.config) : null;
|
||||
m.meet = this.instance.config.segmentation.enabled && !this.models.meet && ((_z = this.instance.config.segmentation.modelPath) == null ? void 0 : _z.includes("meet")) ? load16(this.instance.config) : null;
|
||||
m.rvm = this.instance.config.segmentation.enabled && !this.models.rvm && ((_A = this.instance.config.segmentation.modelPath) == null ? void 0 : _A.includes("rvm")) ? load20(this.instance.config) : null;
|
||||
await Promise.all([...Object.values(m)]);
|
||||
for (const model23 of Object.keys(m))
|
||||
this.models[model23] = m[model23] || this.models[model23] || null;
|
||||
for (const [model23, promise] of Object.entries(m)) {
|
||||
if (promise == null ? void 0 : promise["then"])
|
||||
promise["then"]((val) => this.models[model23] = val);
|
||||
}
|
||||
await Promise.all(Object.values(m));
|
||||
}
|
||||
list() {
|
||||
const models3 = Object.keys(this.models).map((model23) => {
|
||||
|
@ -14791,7 +14793,9 @@ async function runCompile(instance) {
|
|||
tf37.env().set("ENGINE_COMPILE_ONLY", true);
|
||||
const numTensorsStart = tf37.engine().state.numTensors;
|
||||
const compiledModels = [];
|
||||
for (const [modelName, model23] of Object.entries(instance.models).filter(([key, val]) => key !== null && val !== null)) {
|
||||
for (const [modelName, model23] of Object.entries(instance.models.models)) {
|
||||
if (!model23)
|
||||
continue;
|
||||
const shape = (model23 == null ? void 0 : model23.modelSignature) && ((_b = (_a = model23 == null ? void 0 : model23.inputs) == null ? void 0 : _a[0]) == null ? void 0 : _b.shape) ? [...model23.inputs[0].shape] : [1, 64, 64, 3];
|
||||
const dtype = (model23 == null ? void 0 : model23.modelSignature) && ((_d = (_c = model23 == null ? void 0 : model23.inputs) == null ? void 0 : _c[0]) == null ? void 0 : _d.dtype) ? model23.inputs[0].dtype : "float32";
|
||||
for (let dim = 0; dim < shape.length; dim++) {
|
||||
|
|
|
@ -286,7 +286,7 @@ var config = {
|
|||
modelPath: "handtrack.json"
|
||||
},
|
||||
skeleton: {
|
||||
modelPath: "handlandmark-full.json"
|
||||
modelPath: "handlandmark-lite.json"
|
||||
}
|
||||
},
|
||||
object: {
|
||||
|
@ -13862,9 +13862,11 @@ var Models = class {
|
|||
m.selfie = this.instance.config.segmentation.enabled && !this.models.selfie && ((_y = this.instance.config.segmentation.modelPath) == null ? void 0 : _y.includes("selfie")) ? load21(this.instance.config) : null;
|
||||
m.meet = this.instance.config.segmentation.enabled && !this.models.meet && ((_z = this.instance.config.segmentation.modelPath) == null ? void 0 : _z.includes("meet")) ? load16(this.instance.config) : null;
|
||||
m.rvm = this.instance.config.segmentation.enabled && !this.models.rvm && ((_A = this.instance.config.segmentation.modelPath) == null ? void 0 : _A.includes("rvm")) ? load20(this.instance.config) : null;
|
||||
await Promise.all([...Object.values(m)]);
|
||||
for (const model23 of Object.keys(m))
|
||||
this.models[model23] = m[model23] || this.models[model23] || null;
|
||||
for (const [model23, promise] of Object.entries(m)) {
|
||||
if (promise == null ? void 0 : promise["then"])
|
||||
promise["then"]((val) => this.models[model23] = val);
|
||||
}
|
||||
await Promise.all(Object.values(m));
|
||||
}
|
||||
list() {
|
||||
const models3 = Object.keys(this.models).map((model23) => {
|
||||
|
@ -14789,7 +14791,9 @@ async function runCompile(instance) {
|
|||
tf37.env().set("ENGINE_COMPILE_ONLY", true);
|
||||
const numTensorsStart = tf37.engine().state.numTensors;
|
||||
const compiledModels = [];
|
||||
for (const [modelName, model23] of Object.entries(instance.models).filter(([key, val]) => key !== null && val !== null)) {
|
||||
for (const [modelName, model23] of Object.entries(instance.models.models)) {
|
||||
if (!model23)
|
||||
continue;
|
||||
const shape = (model23 == null ? void 0 : model23.modelSignature) && ((_b = (_a = model23 == null ? void 0 : model23.inputs) == null ? void 0 : _a[0]) == null ? void 0 : _b.shape) ? [...model23.inputs[0].shape] : [1, 64, 64, 3];
|
||||
const dtype = (model23 == null ? void 0 : model23.modelSignature) && ((_d = (_c = model23 == null ? void 0 : model23.inputs) == null ? void 0 : _c[0]) == null ? void 0 : _d.dtype) ? model23.inputs[0].dtype : "float32";
|
||||
for (let dim = 0; dim < shape.length; dim++) {
|
||||
|
|
Binary file not shown.
Binary file not shown.
File diff suppressed because one or more lines are too long
|
@ -85,13 +85,13 @@
|
|||
"@tensorflow/tfjs-node-gpu": "^4.1.0",
|
||||
"@types/node": "^18.11.9",
|
||||
"@types/offscreencanvas": "^2019.7.0",
|
||||
"@typescript-eslint/eslint-plugin": "^5.43.0",
|
||||
"@typescript-eslint/parser": "^5.43.0",
|
||||
"@typescript-eslint/eslint-plugin": "^5.44.0",
|
||||
"@typescript-eslint/parser": "^5.44.0",
|
||||
"@vladmandic/build": "^0.7.14",
|
||||
"@vladmandic/pilogger": "^0.4.6",
|
||||
"@vladmandic/tfjs": "github:vladmandic/tfjs",
|
||||
"canvas": "^2.10.2",
|
||||
"esbuild": "^0.15.14",
|
||||
"esbuild": "^0.15.15",
|
||||
"eslint": "8.28.0",
|
||||
"eslint-config-airbnb-base": "^15.0.0",
|
||||
"eslint-plugin-html": "^7.1.0",
|
||||
|
|
|
@ -444,7 +444,7 @@ const config: Config = {
|
|||
modelPath: 'handtrack.json',
|
||||
},
|
||||
skeleton: {
|
||||
modelPath: 'handlandmark-full.json',
|
||||
modelPath: 'handlandmark-lite.json',
|
||||
},
|
||||
},
|
||||
object: {
|
||||
|
|
|
@ -157,8 +157,10 @@ export class Models {
|
|||
m.rvm = (this.instance.config.segmentation.enabled && !this.models.rvm && this.instance.config.segmentation.modelPath?.includes('rvm')) ? rvm.load(this.instance.config) : null;
|
||||
|
||||
// models are loaded in parallel asynchronously so lets wait until they are actually loaded
|
||||
await Promise.all([...Object.values(m)]);
|
||||
for (const model of Object.keys(m)) this.models[model] = m[model] as GraphModel || this.models[model] || null; // only update actually loaded models
|
||||
for (const [model, promise] of Object.entries(m)) {
|
||||
if (promise?.['then']) promise['then']((val) => this.models[model] = val);
|
||||
}
|
||||
await Promise.all(Object.values(m)); // wait so this function does not resolve prematurely
|
||||
}
|
||||
|
||||
list() {
|
||||
|
|
|
@ -122,7 +122,8 @@ export async function runCompile(instance: Human) {
|
|||
tf.env().set('ENGINE_COMPILE_ONLY', true);
|
||||
const numTensorsStart = tf.engine().state.numTensors;
|
||||
const compiledModels: string[] = [];
|
||||
for (const [modelName, model] of Object.entries(instance.models).filter(([key, val]) => (key !== null && val !== null))) {
|
||||
for (const [modelName, model] of Object.entries(instance.models.models)) {
|
||||
if (!model) continue;
|
||||
const shape = (model?.modelSignature && model?.inputs?.[0]?.shape) ? [...model.inputs[0].shape] : [1, 64, 64, 3];
|
||||
const dtype: DataType = (model?.modelSignature && model?.inputs?.[0]?.dtype) ? model.inputs[0].dtype : 'float32';
|
||||
for (let dim = 0; dim < shape.length; dim++) {
|
||||
|
|
2240
test/build.log
2240
test/build.log
File diff suppressed because it is too large
Load Diff
|
@ -52,7 +52,7 @@
|
|||
"tabSize": 2
|
||||
},
|
||||
"exclude": ["node_modules/", "types/", "dist/**/*.js"],
|
||||
"include": ["src", "tfjs/*.ts", "types/human.d.ts", "test/**/*.ts", "demo/**/*.ts"],
|
||||
"include": ["src", "tfjs/*.ts", "types/human.d.ts", "test/**/*.ts", "demo/**/*.ts", "demo/facedetect/loader.js"],
|
||||
"typedocOptions": {
|
||||
"excludeExternals": true,
|
||||
"externalPattern": ["**/node_modules/**", "tfjs/"]
|
||||
|
|
2
wiki
2
wiki
|
@ -1 +1 @@
|
|||
Subproject commit 41b5880bf084217dc6ba76f0eb3a93f26f3969a7
|
||||
Subproject commit 185d129a178776c150defdc125334bb1221bec14
|
Loading…
Reference in New Issue