mirror of https://github.com/vladmandic/human
update embedding and strong typings
parent
092873adaa
commit
896cb0aac0
|
@ -1,6 +1,6 @@
|
||||||
# @vladmandic/human
|
# @vladmandic/human
|
||||||
|
|
||||||
Version: **1.1.1**
|
Version: **1.1.2**
|
||||||
Description: **Human: AI-powered 3D Face Detection, Face Embedding & Recognition, Body Pose Tracking, Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction & Gesture Recognition**
|
Description: **Human: AI-powered 3D Face Detection, Face Embedding & Recognition, Body Pose Tracking, Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction & Gesture Recognition**
|
||||||
|
|
||||||
Author: **Vladimir Mandic <mandic00@live.com>**
|
Author: **Vladimir Mandic <mandic00@live.com>**
|
||||||
|
@ -9,8 +9,10 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
|
||||||
|
|
||||||
## Changelog
|
## Changelog
|
||||||
|
|
||||||
### **HEAD -> main** 2021/03/12 mandic00@live.com
|
### **1.1.2** 2021/03/12 mandic00@live.com
|
||||||
|
|
||||||
|
- distance based on minkowski space and limited euclidean space
|
||||||
|
- guard against invalid input images
|
||||||
|
|
||||||
### **1.1.1** 2021/03/12 mandic00@live.com
|
### **1.1.1** 2021/03/12 mandic00@live.com
|
||||||
|
|
||||||
|
|
17
TODO.md
17
TODO.md
|
@ -4,21 +4,26 @@
|
||||||
|
|
||||||
- Strong(er) typing
|
- Strong(er) typing
|
||||||
- Automated testing framework
|
- Automated testing framework
|
||||||
|
- API Documentation
|
||||||
|
- API Extractor: generate dts rollup and docs
|
||||||
|
<https://api-extractor.com/>
|
||||||
|
- TypeDoc: generate docs
|
||||||
|
<http://typedoc.org/>
|
||||||
|
|
||||||
## Explore Models
|
## Explore Models
|
||||||
|
|
||||||
- EfficientPose
|
- EfficientPose
|
||||||
<https://github.com/daniegr/EfficientPose>
|
<https://github.com/daniegr/EfficientPose>
|
||||||
<https://github.com/PINTO0309/PINTO_model_zoo/tree/main/084_EfficientPose>
|
<https://github.com/PINTO0309/PINTO_model_zoo/tree/main/084_EfficientPose>
|
||||||
- ArcFace
|
- InsightFace
|
||||||
- RetinaFace
|
RetinaFace detetor and ArcFace recognition
|
||||||
- CenterFace
|
<https://github.com/deepinsight/insightface>
|
||||||
|
- NanoDet
|
||||||
|
<https://github.com/RangiLyu/nanodet>
|
||||||
|
|
||||||
## WiP Items
|
## WiP Items
|
||||||
|
|
||||||
- Embedding:
|
- face.tensor should return image in correct aspect ratio
|
||||||
- Try average of flipped image
|
|
||||||
- Try with variable aspect ratio
|
|
||||||
|
|
||||||
## Issues
|
## Issues
|
||||||
|
|
||||||
|
|
|
@ -82,7 +82,7 @@ let original;
|
||||||
async function calcSimmilariry(result) {
|
async function calcSimmilariry(result) {
|
||||||
document.getElementById('compare-container').style.display = human.config.face.embedding.enabled ? 'block' : 'none';
|
document.getElementById('compare-container').style.display = human.config.face.embedding.enabled ? 'block' : 'none';
|
||||||
if (!human.config.face.embedding.enabled) return;
|
if (!human.config.face.embedding.enabled) return;
|
||||||
if (!(result?.face?.length > 0) || (result?.face[0]?.embedding?.length !== 256)) return;
|
if (!(result?.face?.length > 0) || (result?.face[0]?.embedding?.length >= 64)) return;
|
||||||
if (!original) {
|
if (!original) {
|
||||||
original = result;
|
original = result;
|
||||||
if (result.face[0].tensor) {
|
if (result.face[0].tensor) {
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
body { margin: 0; background: black; color: white; overflow-x: hidden; scrollbar-width: none; }
|
body { margin: 0; background: black; color: white; overflow-x: hidden; scrollbar-width: none; }
|
||||||
body::-webkit-scrollbar { display: none; }
|
body::-webkit-scrollbar { display: none; }
|
||||||
img { object-fit: contain; }
|
img { object-fit: contain; }
|
||||||
.face { width: 150px; height: 150px; }
|
.face { width: 128px; height: 128px; }
|
||||||
</style>
|
</style>
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
|
|
|
@ -5,7 +5,6 @@ const userConfig = {
|
||||||
async: false,
|
async: false,
|
||||||
warmup: 'none',
|
warmup: 'none',
|
||||||
debug: true,
|
debug: true,
|
||||||
filter: false,
|
|
||||||
videoOptimized: false,
|
videoOptimized: false,
|
||||||
face: {
|
face: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
|
@ -20,15 +19,13 @@ const userConfig = {
|
||||||
hand: { enabled: false },
|
hand: { enabled: false },
|
||||||
gesture: { enabled: false },
|
gesture: { enabled: false },
|
||||||
body: { enabled: false },
|
body: { enabled: false },
|
||||||
|
filter: {
|
||||||
|
enabled: false,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
const human = new Human(userConfig); // new instance of human
|
const human = new Human(userConfig); // new instance of human
|
||||||
|
|
||||||
// const samples = ['../assets/sample-me.jpg', '../assets/sample6.jpg', '../assets/sample1.jpg', '../assets/sample4.jpg', '../assets/sample5.jpg', '../assets/sample3.jpg', '../assets/sample2.jpg'];
|
|
||||||
// const samples = ['../assets/sample-me.jpg', '../assets/sample6.jpg', '../assets/sample1.jpg', '../assets/sample4.jpg', '../assets/sample5.jpg', '../assets/sample3.jpg', '../assets/sample2.jpg',
|
|
||||||
// '../private/me (1).jpg', '../private/me (2).jpg', '../private/me (3).jpg', '../private/me (4).jpg', '../private/me (5).jpg', '../private/me (6).jpg', '../private/me (7).jpg', '../private/me (8).jpg',
|
|
||||||
// '../private/me (9).jpg', '../private/me (10).jpg', '../private/me (11).jpg', '../private/me (12).jpg', '../private/me (13).jpg'];
|
|
||||||
|
|
||||||
const all = []; // array that will hold all detected faces
|
const all = []; // array that will hold all detected faces
|
||||||
|
|
||||||
function log(...msg) {
|
function log(...msg) {
|
||||||
|
@ -39,8 +36,6 @@ function log(...msg) {
|
||||||
}
|
}
|
||||||
|
|
||||||
async function analyze(face) {
|
async function analyze(face) {
|
||||||
log('Face:', face);
|
|
||||||
|
|
||||||
// if we have face image tensor, enhance it and display it
|
// if we have face image tensor, enhance it and display it
|
||||||
if (face.tensor) {
|
if (face.tensor) {
|
||||||
const enhanced = human.enhance(face);
|
const enhanced = human.enhance(face);
|
||||||
|
@ -87,7 +82,7 @@ async function faces(index, res) {
|
||||||
canvas.className = 'face';
|
canvas.className = 'face';
|
||||||
// mouse click on any face canvas triggers analysis
|
// mouse click on any face canvas triggers analysis
|
||||||
canvas.addEventListener('click', (evt) => {
|
canvas.addEventListener('click', (evt) => {
|
||||||
log('Select:', 'Image:', evt.target.tag.sample, 'Face:', evt.target.tag.face);
|
log('Select:', 'Image:', evt.target.tag.sample, 'Face:', evt.target.tag.face, all[evt.target.tag.sample][evt.target.tag.face]);
|
||||||
analyze(all[evt.target.tag.sample][evt.target.tag.face]);
|
analyze(all[evt.target.tag.sample][evt.target.tag.face]);
|
||||||
});
|
});
|
||||||
// if we actually got face image tensor, draw canvas with that face
|
// if we actually got face image tensor, draw canvas with that face
|
||||||
|
@ -98,14 +93,16 @@ async function faces(index, res) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async function add(index, image) {
|
async function process(index, image) {
|
||||||
log('Add image:', index + 1, image);
|
|
||||||
return new Promise((resolve) => {
|
return new Promise((resolve) => {
|
||||||
const img = new Image(100, 100);
|
const img = new Image(128, 128);
|
||||||
img.onload = () => { // must wait until image is loaded
|
img.onload = () => { // must wait until image is loaded
|
||||||
human.detect(img).then((res) => faces(index, res)); // then wait until image is analyzed
|
human.detect(img).then((res) => {
|
||||||
document.getElementById('images').appendChild(img); // and finally we can add it
|
faces(index, res); // then wait until image is analyzed
|
||||||
resolve(true);
|
log('Add image:', index + 1, image, 'faces:', res.face.length);
|
||||||
|
document.getElementById('images').appendChild(img); // and finally we can add it
|
||||||
|
resolve(true);
|
||||||
|
});
|
||||||
};
|
};
|
||||||
img.title = image;
|
img.title = image;
|
||||||
img.src = encodeURI(image);
|
img.src = encodeURI(image);
|
||||||
|
@ -126,8 +123,11 @@ async function main() {
|
||||||
|
|
||||||
// download and analyze all images
|
// download and analyze all images
|
||||||
log('Enumerated:', images.length, 'images');
|
log('Enumerated:', images.length, 'images');
|
||||||
for (const i in images) await add(i, images[i]);
|
for (let i = 0; i < images.length; i++) await process(i, images[i]);
|
||||||
|
|
||||||
|
const num = all.reduce((prev, cur) => prev += cur.length, 0);
|
||||||
|
log('Extracted faces:', num, 'from images:', all.length);
|
||||||
|
log(human.tf.engine().memory());
|
||||||
log('Ready');
|
log('Ready');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -44,6 +44,7 @@
|
||||||
"blazepose"
|
"blazepose"
|
||||||
],
|
],
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
|
"@microsoft/api-extractor": "^7.13.2",
|
||||||
"@tensorflow/tfjs": "^3.3.0",
|
"@tensorflow/tfjs": "^3.3.0",
|
||||||
"@tensorflow/tfjs-backend-cpu": "^3.3.0",
|
"@tensorflow/tfjs-backend-cpu": "^3.3.0",
|
||||||
"@tensorflow/tfjs-backend-wasm": "^3.3.0",
|
"@tensorflow/tfjs-backend-wasm": "^3.3.0",
|
||||||
|
@ -61,7 +62,7 @@
|
||||||
"chokidar": "^3.5.1",
|
"chokidar": "^3.5.1",
|
||||||
"dayjs": "^1.10.4",
|
"dayjs": "^1.10.4",
|
||||||
"esbuild": "^0.9.2",
|
"esbuild": "^0.9.2",
|
||||||
"eslint": "^7.21.0",
|
"eslint": "^7.22.0",
|
||||||
"eslint-config-airbnb-base": "^14.2.1",
|
"eslint-config-airbnb-base": "^14.2.1",
|
||||||
"eslint-plugin-import": "^2.22.1",
|
"eslint-plugin-import": "^2.22.1",
|
||||||
"eslint-plugin-json": "^2.1.2",
|
"eslint-plugin-json": "^2.1.2",
|
||||||
|
|
15
src/human.ts
15
src/human.ts
|
@ -43,7 +43,7 @@ class Human {
|
||||||
version: string;
|
version: string;
|
||||||
config: typeof config.default;
|
config: typeof config.default;
|
||||||
state: string;
|
state: string;
|
||||||
image: { tensor, canvas };
|
image: { tensor: any, canvas: OffscreenCanvas | HTMLCanvasElement };
|
||||||
// classes
|
// classes
|
||||||
tf: typeof tf;
|
tf: typeof tf;
|
||||||
draw: typeof draw;
|
draw: typeof draw;
|
||||||
|
@ -67,7 +67,7 @@ class Human {
|
||||||
body: typeof posenet | typeof blazepose;
|
body: typeof posenet | typeof blazepose;
|
||||||
hand: typeof handpose;
|
hand: typeof handpose;
|
||||||
};
|
};
|
||||||
sysinfo: { platform, agent };
|
sysinfo: { platform: string, agent: string };
|
||||||
#package: any;
|
#package: any;
|
||||||
#perf: any;
|
#perf: any;
|
||||||
#numTensors: number;
|
#numTensors: number;
|
||||||
|
@ -146,7 +146,7 @@ class Human {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
simmilarity(embedding1, embedding2): number {
|
simmilarity(embedding1: Array<number>, embedding2: Array<number>): number {
|
||||||
if (this.config.face.embedding.enabled) return embedding.simmilarity(embedding1, embedding2);
|
if (this.config.face.embedding.enabled) return embedding.simmilarity(embedding1, embedding2);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -309,7 +309,6 @@ class Human {
|
||||||
embedding: any,
|
embedding: any,
|
||||||
iris: number,
|
iris: number,
|
||||||
angle: any,
|
angle: any,
|
||||||
tensor: any,
|
|
||||||
}> = [];
|
}> = [];
|
||||||
|
|
||||||
this.state = 'run:face';
|
this.state = 'run:face';
|
||||||
|
@ -401,11 +400,11 @@ class Human {
|
||||||
embedding: embeddingRes,
|
embedding: embeddingRes,
|
||||||
iris: (irisSize !== 0) ? Math.trunc(irisSize) / 100 : 0,
|
iris: (irisSize !== 0) ? Math.trunc(irisSize) / 100 : 0,
|
||||||
angle,
|
angle,
|
||||||
tensor: this.config.face.detector.return ? face.image.squeeze() : null,
|
tensor: this.config.face.detector.return ? face.image?.squeeze() : null,
|
||||||
});
|
});
|
||||||
|
// dispose original face tensor
|
||||||
// dont need face anymore
|
|
||||||
face.image?.dispose();
|
face.image?.dispose();
|
||||||
|
|
||||||
this.#analyze('End Face');
|
this.#analyze('End Face');
|
||||||
}
|
}
|
||||||
this.#analyze('End FaceMesh:');
|
this.#analyze('End FaceMesh:');
|
||||||
|
@ -419,7 +418,7 @@ class Human {
|
||||||
}
|
}
|
||||||
|
|
||||||
// main detect function
|
// main detect function
|
||||||
async detect(input, userConfig = {}): Promise<{ face, body, hand, gesture, performance, canvas } | { error }> {
|
async detect(input, userConfig = {}): Promise<{ face: Array<{ any }>, body: Array<{ any }>, hand: Array<{ any }>, gesture: Array<{ any }>, performance: object, canvas: OffscreenCanvas | HTMLCanvasElement } | { error: string }> {
|
||||||
// detection happens inside a promise
|
// detection happens inside a promise
|
||||||
return new Promise(async (resolve) => {
|
return new Promise(async (resolve) => {
|
||||||
this.state = 'config';
|
this.state = 'config';
|
||||||
|
|
2
wiki
2
wiki
|
@ -1 +1 @@
|
||||||
Subproject commit ac5d01255f2f02de6b308b82976c5866c458f149
|
Subproject commit cdd53841d5adf0542379c87b52919ea12a02335d
|
Loading…
Reference in New Issue