update embedding and strong typings

pull/280/head
Vladimir Mandic 2021-03-13 11:26:53 -05:00
parent 092873adaa
commit 896cb0aac0
8 changed files with 43 additions and 36 deletions

View File

@ -1,6 +1,6 @@
# @vladmandic/human
Version: **1.1.1**
Version: **1.1.2**
Description: **Human: AI-powered 3D Face Detection, Face Embedding & Recognition, Body Pose Tracking, Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction & Gesture Recognition**
Author: **Vladimir Mandic <mandic00@live.com>**
@ -9,8 +9,10 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
## Changelog
### **HEAD -> main** 2021/03/12 mandic00@live.com
### **1.1.2** 2021/03/12 mandic00@live.com
- distance based on minkowski space and limited euclidean space
- guard against invalid input images
### **1.1.1** 2021/03/12 mandic00@live.com

17
TODO.md
View File

@ -4,21 +4,26 @@
- Strong(er) typing
- Automated testing framework
- API Documentation
- API Extractor: generate dts rollup and docs
<https://api-extractor.com/>
- TypeDoc: generate docs
<http://typedoc.org/>
## Explore Models
- EfficientPose
<https://github.com/daniegr/EfficientPose>
<https://github.com/PINTO0309/PINTO_model_zoo/tree/main/084_EfficientPose>
- ArcFace
- RetinaFace
- CenterFace
- InsightFace
RetinaFace detetor and ArcFace recognition
<https://github.com/deepinsight/insightface>
- NanoDet
<https://github.com/RangiLyu/nanodet>
## WiP Items
- Embedding:
- Try average of flipped image
- Try with variable aspect ratio
- face.tensor should return image in correct aspect ratio
## Issues

View File

@ -82,7 +82,7 @@ let original;
async function calcSimmilariry(result) {
document.getElementById('compare-container').style.display = human.config.face.embedding.enabled ? 'block' : 'none';
if (!human.config.face.embedding.enabled) return;
if (!(result?.face?.length > 0) || (result?.face[0]?.embedding?.length !== 256)) return;
if (!(result?.face?.length > 0) || (result?.face[0]?.embedding?.length >= 64)) return;
if (!original) {
original = result;
if (result.face[0].tensor) {

View File

@ -20,7 +20,7 @@
body { margin: 0; background: black; color: white; overflow-x: hidden; scrollbar-width: none; }
body::-webkit-scrollbar { display: none; }
img { object-fit: contain; }
.face { width: 150px; height: 150px; }
.face { width: 128px; height: 128px; }
</style>
</head>
<body>

View File

@ -5,7 +5,6 @@ const userConfig = {
async: false,
warmup: 'none',
debug: true,
filter: false,
videoOptimized: false,
face: {
enabled: true,
@ -20,15 +19,13 @@ const userConfig = {
hand: { enabled: false },
gesture: { enabled: false },
body: { enabled: false },
filter: {
enabled: false,
},
};
const human = new Human(userConfig); // new instance of human
// const samples = ['../assets/sample-me.jpg', '../assets/sample6.jpg', '../assets/sample1.jpg', '../assets/sample4.jpg', '../assets/sample5.jpg', '../assets/sample3.jpg', '../assets/sample2.jpg'];
// const samples = ['../assets/sample-me.jpg', '../assets/sample6.jpg', '../assets/sample1.jpg', '../assets/sample4.jpg', '../assets/sample5.jpg', '../assets/sample3.jpg', '../assets/sample2.jpg',
// '../private/me (1).jpg', '../private/me (2).jpg', '../private/me (3).jpg', '../private/me (4).jpg', '../private/me (5).jpg', '../private/me (6).jpg', '../private/me (7).jpg', '../private/me (8).jpg',
// '../private/me (9).jpg', '../private/me (10).jpg', '../private/me (11).jpg', '../private/me (12).jpg', '../private/me (13).jpg'];
const all = []; // array that will hold all detected faces
function log(...msg) {
@ -39,8 +36,6 @@ function log(...msg) {
}
async function analyze(face) {
log('Face:', face);
// if we have face image tensor, enhance it and display it
if (face.tensor) {
const enhanced = human.enhance(face);
@ -87,7 +82,7 @@ async function faces(index, res) {
canvas.className = 'face';
// mouse click on any face canvas triggers analysis
canvas.addEventListener('click', (evt) => {
log('Select:', 'Image:', evt.target.tag.sample, 'Face:', evt.target.tag.face);
log('Select:', 'Image:', evt.target.tag.sample, 'Face:', evt.target.tag.face, all[evt.target.tag.sample][evt.target.tag.face]);
analyze(all[evt.target.tag.sample][evt.target.tag.face]);
});
// if we actually got face image tensor, draw canvas with that face
@ -98,14 +93,16 @@ async function faces(index, res) {
}
}
async function add(index, image) {
log('Add image:', index + 1, image);
async function process(index, image) {
return new Promise((resolve) => {
const img = new Image(100, 100);
const img = new Image(128, 128);
img.onload = () => { // must wait until image is loaded
human.detect(img).then((res) => faces(index, res)); // then wait until image is analyzed
document.getElementById('images').appendChild(img); // and finally we can add it
resolve(true);
human.detect(img).then((res) => {
faces(index, res); // then wait until image is analyzed
log('Add image:', index + 1, image, 'faces:', res.face.length);
document.getElementById('images').appendChild(img); // and finally we can add it
resolve(true);
});
};
img.title = image;
img.src = encodeURI(image);
@ -126,8 +123,11 @@ async function main() {
// download and analyze all images
log('Enumerated:', images.length, 'images');
for (const i in images) await add(i, images[i]);
for (let i = 0; i < images.length; i++) await process(i, images[i]);
const num = all.reduce((prev, cur) => prev += cur.length, 0);
log('Extracted faces:', num, 'from images:', all.length);
log(human.tf.engine().memory());
log('Ready');
}

View File

@ -44,6 +44,7 @@
"blazepose"
],
"devDependencies": {
"@microsoft/api-extractor": "^7.13.2",
"@tensorflow/tfjs": "^3.3.0",
"@tensorflow/tfjs-backend-cpu": "^3.3.0",
"@tensorflow/tfjs-backend-wasm": "^3.3.0",
@ -61,7 +62,7 @@
"chokidar": "^3.5.1",
"dayjs": "^1.10.4",
"esbuild": "^0.9.2",
"eslint": "^7.21.0",
"eslint": "^7.22.0",
"eslint-config-airbnb-base": "^14.2.1",
"eslint-plugin-import": "^2.22.1",
"eslint-plugin-json": "^2.1.2",

View File

@ -43,7 +43,7 @@ class Human {
version: string;
config: typeof config.default;
state: string;
image: { tensor, canvas };
image: { tensor: any, canvas: OffscreenCanvas | HTMLCanvasElement };
// classes
tf: typeof tf;
draw: typeof draw;
@ -67,7 +67,7 @@ class Human {
body: typeof posenet | typeof blazepose;
hand: typeof handpose;
};
sysinfo: { platform, agent };
sysinfo: { platform: string, agent: string };
#package: any;
#perf: any;
#numTensors: number;
@ -146,7 +146,7 @@ class Human {
return null;
}
simmilarity(embedding1, embedding2): number {
simmilarity(embedding1: Array<number>, embedding2: Array<number>): number {
if (this.config.face.embedding.enabled) return embedding.simmilarity(embedding1, embedding2);
return 0;
}
@ -309,7 +309,6 @@ class Human {
embedding: any,
iris: number,
angle: any,
tensor: any,
}> = [];
this.state = 'run:face';
@ -401,11 +400,11 @@ class Human {
embedding: embeddingRes,
iris: (irisSize !== 0) ? Math.trunc(irisSize) / 100 : 0,
angle,
tensor: this.config.face.detector.return ? face.image.squeeze() : null,
tensor: this.config.face.detector.return ? face.image?.squeeze() : null,
});
// dont need face anymore
// dispose original face tensor
face.image?.dispose();
this.#analyze('End Face');
}
this.#analyze('End FaceMesh:');
@ -419,7 +418,7 @@ class Human {
}
// main detect function
async detect(input, userConfig = {}): Promise<{ face, body, hand, gesture, performance, canvas } | { error }> {
async detect(input, userConfig = {}): Promise<{ face: Array<{ any }>, body: Array<{ any }>, hand: Array<{ any }>, gesture: Array<{ any }>, performance: object, canvas: OffscreenCanvas | HTMLCanvasElement } | { error: string }> {
// detection happens inside a promise
return new Promise(async (resolve) => {
this.state = 'config';

2
wiki

@ -1 +1 @@
Subproject commit ac5d01255f2f02de6b308b82976c5866c458f149
Subproject commit cdd53841d5adf0542379c87b52919ea12a02335d