mirror of https://github.com/vladmandic/human
update tests and demos
parent
f2c937d2cd
commit
9d7a3148ec
10
CHANGELOG.md
10
CHANGELOG.md
|
@ -9,14 +9,16 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
|
||||||
|
|
||||||
## Changelog
|
## Changelog
|
||||||
|
|
||||||
### **HEAD -> main** 2021/06/01 mandic00@live.com
|
### **HEAD -> main** 2021/06/02 mandic00@live.com
|
||||||
|
|
||||||
|
- implemented drag & drop for image processing
|
||||||
|
|
||||||
|
### **origin/main** 2021/06/01 mandic00@live.com
|
||||||
|
|
||||||
|
- breaking changes to results.face output properties
|
||||||
- breaking changes to results.object output properties
|
- breaking changes to results.object output properties
|
||||||
- breaking changes to results.hand output properties
|
- breaking changes to results.hand output properties
|
||||||
- breaking changes to results.body output properties
|
- breaking changes to results.body output properties
|
||||||
|
|
||||||
### **origin/main** 2021/05/31 mandic00@live.com
|
|
||||||
|
|
||||||
- implemented human.next global interpolation method
|
- implemented human.next global interpolation method
|
||||||
- finished draw buffering and smoothing and enabled by default
|
- finished draw buffering and smoothing and enabled by default
|
||||||
- implemented service worker
|
- implemented service worker
|
||||||
|
|
|
@ -30,6 +30,7 @@ Check out [**Live Demo**](https://vladmandic.github.io/human/demo/index.html) ap
|
||||||
- To start video detection, simply press *Play*
|
- To start video detection, simply press *Play*
|
||||||
- To process images, simply drag & drop in your Browser window
|
- To process images, simply drag & drop in your Browser window
|
||||||
- Note: For optimal performance, select only models you'd like to use
|
- Note: For optimal performance, select only models you'd like to use
|
||||||
|
- Note: If you have modern GPU, WebGL (default) backend is preferred, otherwise select WASM backend
|
||||||
|
|
||||||
<br>
|
<br>
|
||||||
|
|
||||||
|
|
File diff suppressed because one or more lines are too long
|
@ -183,44 +183,36 @@ async function main() {
|
||||||
// pre-load human models
|
// pre-load human models
|
||||||
await human.load();
|
await human.load();
|
||||||
|
|
||||||
let res;
|
|
||||||
let images = [];
|
let images = [];
|
||||||
let dir = [];
|
let dir = [];
|
||||||
// load face descriptor database
|
// load face descriptor database
|
||||||
await getFaceDB();
|
await getFaceDB();
|
||||||
|
|
||||||
// enumerate all sample images in /assets
|
// enumerate all sample images in /assets
|
||||||
res = await fetch('/assets');
|
const res = await fetch('/samples/groups');
|
||||||
dir = (res && res.ok) ? await res.json() : [];
|
dir = (res && res.ok) ? await res.json() : [];
|
||||||
images = images.concat(dir.filter((img) => (img.endsWith('.jpg') && img.includes('sample'))));
|
images = images.concat(dir.filter((img) => (img.endsWith('.jpg') && img.includes('sample'))));
|
||||||
// enumerate additional private test images in /private, not includded in git repository
|
|
||||||
res = await fetch('/private/me');
|
|
||||||
dir = (res && res.ok) ? await res.json() : [];
|
|
||||||
images = images.concat(dir.filter((img) => (img.endsWith('.jpg'))));
|
|
||||||
|
|
||||||
// enumerate additional error images, not includded in git repository
|
|
||||||
res = await fetch('/private/err');
|
|
||||||
dir = (res && res.ok) ? await res.json() : [];
|
|
||||||
images = images.concat(dir.filter((img) => (img.endsWith('.jpg'))));
|
|
||||||
log('Enumerated:', images.length, 'images');
|
|
||||||
|
|
||||||
// could not dynamically enumerate images so using static list
|
// could not dynamically enumerate images so using static list
|
||||||
if (images.length === 0) {
|
if (images.length === 0) {
|
||||||
images = [
|
images = [
|
||||||
'sample1.jpg',
|
'groups/group1.jpg',
|
||||||
'sample2.jpg',
|
'groups/group2.jpg',
|
||||||
'sample3.jpg',
|
'groups/group3.jpg',
|
||||||
'sample4.jpg',
|
'groups/group4.jpg',
|
||||||
'sample5.jpg',
|
'groups/group5.jpg',
|
||||||
'sample6.jpg',
|
'groups/group6.jpg',
|
||||||
'sample6.jpg',
|
'groups/group7.jpg',
|
||||||
'sample-me.jpg',
|
'groups/group8.jpg',
|
||||||
'human-sample-face.jpg',
|
'groups/group9.jpg',
|
||||||
'human-sample-upper.jpg',
|
'groups/group10.jpg',
|
||||||
'human-sample-body.jpg',
|
'groups/group11.jpg',
|
||||||
|
'groups/group12.jpg',
|
||||||
|
'groups/group13.jpg',
|
||||||
|
'groups/group14.jpg',
|
||||||
];
|
];
|
||||||
// add prefix for gitpages
|
// add prefix for gitpages
|
||||||
images = images.map((a) => `/human/assets/${a}`);
|
images = images.map((a) => `/samples/${a}`);
|
||||||
log('Adding static image list:', images.length, 'images');
|
log('Adding static image list:', images.length, 'images');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,4 @@
|
||||||
|
# Human Library: Sample Images
|
||||||
|
|
||||||
|
Sample Images used by `Human` library demos and automated tests
|
||||||
|
Not required for normal funcioning of library
|
|
@ -244,7 +244,7 @@ const config: Config = {
|
||||||
rotation: true, // use best-guess rotated face image or just box with rotation as-is
|
rotation: true, // use best-guess rotated face image or just box with rotation as-is
|
||||||
// false means higher performance, but incorrect mesh mapping if face angle is above 20 degrees
|
// false means higher performance, but incorrect mesh mapping if face angle is above 20 degrees
|
||||||
// this parameter is not valid in nodejs
|
// this parameter is not valid in nodejs
|
||||||
maxDetected: 5, // maximum number of faces detected in the input
|
maxDetected: 15, // maximum number of faces detected in the input
|
||||||
// should be set to the minimum number for performance
|
// should be set to the minimum number for performance
|
||||||
skipFrames: 15, // how many max frames to go without re-running the face bounding box detector
|
skipFrames: 15, // how many max frames to go without re-running the face bounding box detector
|
||||||
// only used when cacheSensitivity is not zero
|
// only used when cacheSensitivity is not zero
|
||||||
|
|
|
@ -211,7 +211,6 @@ export const detectFace = async (parent /* instance of human */, input: Tensor):
|
||||||
: 0;
|
: 0;
|
||||||
|
|
||||||
// combine results
|
// combine results
|
||||||
if (faces[i].image) delete faces[i].image;
|
|
||||||
faceRes.push({
|
faceRes.push({
|
||||||
...faces[i],
|
...faces[i],
|
||||||
id: i,
|
id: i,
|
||||||
|
@ -226,6 +225,8 @@ export const detectFace = async (parent /* instance of human */, input: Tensor):
|
||||||
});
|
});
|
||||||
// dispose original face tensor
|
// dispose original face tensor
|
||||||
tf.dispose(faces[i].image);
|
tf.dispose(faces[i].image);
|
||||||
|
// delete temp face image
|
||||||
|
if (faces[i].image) delete faces[i].image;
|
||||||
|
|
||||||
parent.analyze('End Face');
|
parent.analyze('End Face');
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,6 +2,8 @@
|
||||||
|
|
||||||
## Automatic Tests
|
## Automatic Tests
|
||||||
|
|
||||||
|
Not required for normal funcioning of library
|
||||||
|
|
||||||
### NodeJS using TensorFlow library
|
### NodeJS using TensorFlow library
|
||||||
|
|
||||||
- Image filters are disabled due to lack of Canvas and WeBGL access
|
- Image filters are disabled due to lack of Canvas and WeBGL access
|
||||||
|
|
|
@ -140,22 +140,22 @@ async function test(Human, inputConfig) {
|
||||||
|
|
||||||
log('info', 'test body variants');
|
log('info', 'test body variants');
|
||||||
config.body = { modelPath: 'posenet.json', enabled: true };
|
config.body = { modelPath: 'posenet.json', enabled: true };
|
||||||
await testDetect(human, 'assets/human-sample-body.jpg', 'posenet');
|
await testDetect(human, 'samples/ai-body.jpg', 'posenet');
|
||||||
config.body = { modelPath: 'movenet-lightning.json', enabled: true };
|
config.body = { modelPath: 'movenet-lightning.json', enabled: true };
|
||||||
await testDetect(human, 'assets/human-sample-body.jpg', 'movenet');
|
await testDetect(human, 'samples/ai-body.jpg', 'movenet');
|
||||||
|
|
||||||
await testDetect(human, null, 'default');
|
await testDetect(human, null, 'default');
|
||||||
log('info', 'test: first instance');
|
log('info', 'test: first instance');
|
||||||
await testDetect(human, 'assets/sample-me.jpg', 'default');
|
await testDetect(human, 'samples/ai-upper.jpg', 'default');
|
||||||
log('info', 'test: second instance');
|
log('info', 'test: second instance');
|
||||||
const second = new Human(config);
|
const second = new Human(config);
|
||||||
await testDetect(second, 'assets/sample-me.jpg', 'default');
|
await testDetect(second, 'samples/ai-upper.jpg', 'default');
|
||||||
log('info', 'test: concurrent');
|
log('info', 'test: concurrent');
|
||||||
await Promise.all([
|
await Promise.all([
|
||||||
testDetect(human, 'assets/human-sample-face.jpg', 'default'),
|
testDetect(human, 'samples/ai-face.jpg', 'default'),
|
||||||
testDetect(second, 'assets/human-sample-face.jpg', 'default'),
|
testDetect(second, 'samples/ai-face.jpg', 'default'),
|
||||||
testDetect(human, 'assets/human-sample-body.jpg', 'default'),
|
testDetect(human, 'samples/ai-body.jpg', 'default'),
|
||||||
testDetect(second, 'assets/human-sample-body.jpg', 'default'),
|
testDetect(second, 'samples/ai-body.jpg', 'default'),
|
||||||
]);
|
]);
|
||||||
const t1 = process.hrtime.bigint();
|
const t1 = process.hrtime.bigint();
|
||||||
log('info', 'test complete:', Math.trunc(Number(t1 - t0) / 1000 / 1000), 'ms');
|
log('info', 'test complete:', Math.trunc(Number(t1 - t0) / 1000 / 1000), 'ms');
|
||||||
|
|
Loading…
Reference in New Issue