update tests and demos

pull/280/head
Vladimir Mandic 2021-06-02 13:35:33 -04:00
parent f2c937d2cd
commit 9d7a3148ec
9 changed files with 41 additions and 39 deletions

View File

@ -9,14 +9,16 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
## Changelog
### **HEAD -> main** 2021/06/01 mandic00@live.com
### **HEAD -> main** 2021/06/02 mandic00@live.com
- implemented drag & drop for image processing
### **origin/main** 2021/06/01 mandic00@live.com
- breaking changes to results.face output properties
- breaking changes to results.object output properties
- breaking changes to results.hand output properties
- breaking changes to results.body output properties
### **origin/main** 2021/05/31 mandic00@live.com
- implemented human.next global interpolation method
- finished draw buffering and smoothing and enabled by default
- implemented service worker

View File

@ -30,6 +30,7 @@ Check out [**Live Demo**](https://vladmandic.github.io/human/demo/index.html) ap
- To start video detection, simply press *Play*
- To process images, simply drag & drop in your Browser window
- Note: For optimal performance, select only models you'd like to use
- Note: If you have modern GPU, WebGL (default) backend is preferred, otherwise select WASM backend
<br>

File diff suppressed because one or more lines are too long

View File

@ -183,44 +183,36 @@ async function main() {
// pre-load human models
await human.load();
let res;
let images = [];
let dir = [];
// load face descriptor database
await getFaceDB();
// enumerate all sample images in /assets
res = await fetch('/assets');
const res = await fetch('/samples/groups');
dir = (res && res.ok) ? await res.json() : [];
images = images.concat(dir.filter((img) => (img.endsWith('.jpg') && img.includes('sample'))));
// enumerate additional private test images in /private, not includded in git repository
res = await fetch('/private/me');
dir = (res && res.ok) ? await res.json() : [];
images = images.concat(dir.filter((img) => (img.endsWith('.jpg'))));
// enumerate additional error images, not includded in git repository
res = await fetch('/private/err');
dir = (res && res.ok) ? await res.json() : [];
images = images.concat(dir.filter((img) => (img.endsWith('.jpg'))));
log('Enumerated:', images.length, 'images');
// could not dynamically enumerate images so using static list
if (images.length === 0) {
images = [
'sample1.jpg',
'sample2.jpg',
'sample3.jpg',
'sample4.jpg',
'sample5.jpg',
'sample6.jpg',
'sample6.jpg',
'sample-me.jpg',
'human-sample-face.jpg',
'human-sample-upper.jpg',
'human-sample-body.jpg',
'groups/group1.jpg',
'groups/group2.jpg',
'groups/group3.jpg',
'groups/group4.jpg',
'groups/group5.jpg',
'groups/group6.jpg',
'groups/group7.jpg',
'groups/group8.jpg',
'groups/group9.jpg',
'groups/group10.jpg',
'groups/group11.jpg',
'groups/group12.jpg',
'groups/group13.jpg',
'groups/group14.jpg',
];
// add prefix for gitpages
images = images.map((a) => `/human/assets/${a}`);
images = images.map((a) => `/samples/${a}`);
log('Adding static image list:', images.length, 'images');
}

4
samples/README.md Normal file
View File

@ -0,0 +1,4 @@
# Human Library: Sample Images
Sample Images used by `Human` library demos and automated tests
Not required for normal funcioning of library

View File

@ -244,7 +244,7 @@ const config: Config = {
rotation: true, // use best-guess rotated face image or just box with rotation as-is
// false means higher performance, but incorrect mesh mapping if face angle is above 20 degrees
// this parameter is not valid in nodejs
maxDetected: 5, // maximum number of faces detected in the input
maxDetected: 15, // maximum number of faces detected in the input
// should be set to the minimum number for performance
skipFrames: 15, // how many max frames to go without re-running the face bounding box detector
// only used when cacheSensitivity is not zero

View File

@ -211,7 +211,6 @@ export const detectFace = async (parent /* instance of human */, input: Tensor):
: 0;
// combine results
if (faces[i].image) delete faces[i].image;
faceRes.push({
...faces[i],
id: i,
@ -226,6 +225,8 @@ export const detectFace = async (parent /* instance of human */, input: Tensor):
});
// dispose original face tensor
tf.dispose(faces[i].image);
// delete temp face image
if (faces[i].image) delete faces[i].image;
parent.analyze('End Face');
}

View File

@ -2,6 +2,8 @@
## Automatic Tests
Not required for normal funcioning of library
### NodeJS using TensorFlow library
- Image filters are disabled due to lack of Canvas and WeBGL access

View File

@ -140,22 +140,22 @@ async function test(Human, inputConfig) {
log('info', 'test body variants');
config.body = { modelPath: 'posenet.json', enabled: true };
await testDetect(human, 'assets/human-sample-body.jpg', 'posenet');
await testDetect(human, 'samples/ai-body.jpg', 'posenet');
config.body = { modelPath: 'movenet-lightning.json', enabled: true };
await testDetect(human, 'assets/human-sample-body.jpg', 'movenet');
await testDetect(human, 'samples/ai-body.jpg', 'movenet');
await testDetect(human, null, 'default');
log('info', 'test: first instance');
await testDetect(human, 'assets/sample-me.jpg', 'default');
await testDetect(human, 'samples/ai-upper.jpg', 'default');
log('info', 'test: second instance');
const second = new Human(config);
await testDetect(second, 'assets/sample-me.jpg', 'default');
await testDetect(second, 'samples/ai-upper.jpg', 'default');
log('info', 'test: concurrent');
await Promise.all([
testDetect(human, 'assets/human-sample-face.jpg', 'default'),
testDetect(second, 'assets/human-sample-face.jpg', 'default'),
testDetect(human, 'assets/human-sample-body.jpg', 'default'),
testDetect(second, 'assets/human-sample-body.jpg', 'default'),
testDetect(human, 'samples/ai-face.jpg', 'default'),
testDetect(second, 'samples/ai-face.jpg', 'default'),
testDetect(human, 'samples/ai-body.jpg', 'default'),
testDetect(second, 'samples/ai-body.jpg', 'default'),
]);
const t1 = process.hrtime.bigint();
log('info', 'test complete:', Math.trunc(Number(t1 - t0) / 1000 / 1000), 'ms');