enhanced automated test coverage

pull/356/head
Vladimir Mandic 2021-09-20 09:42:34 -04:00
parent 57f5fd391f
commit 384d94c0cb
8 changed files with 91 additions and 14 deletions

View File

@ -9,8 +9,9 @@
## Changelog
### **HEAD -> main** 2021/09/18 mandic00@live.com
### **HEAD -> main** 2021/09/19 mandic00@live.com
- added configuration validation
- prevent validation failed on some model combinations
- webgl exception handling

File diff suppressed because one or more lines are too long

View File

@ -66,7 +66,7 @@
"@tensorflow/tfjs-layers": "^3.9.0",
"@tensorflow/tfjs-node": "^3.9.0",
"@tensorflow/tfjs-node-gpu": "^3.9.0",
"@types/node": "^16.9.3",
"@types/node": "^16.9.4",
"@typescript-eslint/eslint-plugin": "^4.31.1",
"@typescript-eslint/parser": "^4.31.1",
"@vladmandic/build": "^0.5.2",

View File

@ -442,7 +442,7 @@ export class Human {
}
if (!img.tensor) {
log('could not convert input to tensor');
if (this.config.debug) log('could not convert input to tensor');
resolve({ error: 'could not convert input to tensor' });
return;
}

View File

@ -39,9 +39,13 @@ export function canvas(width, height): HTMLCanvasElement | OffscreenCanvas {
// process input image and return tensor
// input can be tensor, imagedata, htmlimageelement, htmlvideoelement
// input is resized and run through imagefx filter
export function process(input: Input, config: Config): { tensor: Tensor | null, canvas: OffscreenCanvas | HTMLCanvasElement } {
export function process(input: Input, config: Config): { tensor: Tensor | null, canvas: OffscreenCanvas | HTMLCanvasElement | null } {
let tensor;
if (!input) throw new Error('input is missing');
if (!input) {
// throw new Error('input is missing');
if (config.debug) log('input is missing');
return { tensor: null, canvas: null }; // video may become temporarily unavailable due to onresize
}
// sanity checks since different browsers do not implement all dom elements
if (
!(input instanceof tf.Tensor)
@ -64,13 +68,13 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
} else {
// check if resizing will be needed
if (typeof input['readyState'] !== 'undefined' && input['readyState'] <= 2) {
log('input stream is not ready');
if (config.debug) log('input stream is not ready');
return { tensor: null, canvas: inCanvas }; // video may become temporarily unavailable due to onresize
}
const originalWidth = input['naturalWidth'] || input['videoWidth'] || input['width'] || (input['shape'] && (input['shape'][1] > 0));
const originalHeight = input['naturalHeight'] || input['videoHeight'] || input['height'] || (input['shape'] && (input['shape'][2] > 0));
if (!originalWidth || !originalHeight) {
log('cannot determine input dimensions');
if (config.debug) log('cannot determine input dimensions');
return { tensor: null, canvas: inCanvas }; // video may become temporarily unavailable due to onresize
}
let targetWidth = originalWidth;

View File

@ -1,3 +1,4 @@
const fs = require('fs');
const process = require('process');
const canvasJS = require('canvas');
@ -165,6 +166,13 @@ async function test(Human, inputConfig) {
else log('error', 'failed: configuration default validation', invalid);
delete config.invalid;
// test model loading
await human.load();
const models = Object.keys(human.models).map((model) => ({ name: model, loaded: (human.models[model] !== null) }));
const loaded = models.filter((model) => model.loaded);
if (models.length === 14 && loaded.length === 7) log('state', 'passed: models loaded', models.length, loaded.length);
else log('error', 'failed: models loaded', models.length, loaded.length);
// test warmup sequences
await testInstance(human);
config.warmup = 'none';
@ -175,28 +183,73 @@ async function test(Human, inputConfig) {
res = await testWarmup(human, 'default');
if (!res || res?.face?.length !== 1 || res?.body?.length !== 1 || res?.hand?.length !== 0 || res?.gesture?.length !== 3) log('error', 'failed: warmup face result mismatch', res?.face?.length, res?.body?.length, res?.hand?.length, res?.gesture?.length);
else log('state', 'passed: warmup face result match');
config.warmup = 'body';
res = await testWarmup(human, 'default');
if (!res || res?.face?.length !== 1 || res?.body?.length !== 1 || res?.hand?.length !== 0 || res?.gesture?.length !== 3) log('error', 'failed: warmup body result mismatch', res?.face?.length, res?.body?.length, res?.hand?.length, res?.gesture?.length);
else log('state', 'passed: warmup body result match');
// test default config
// test default config async
log('info', 'test default');
human.reset();
config.async = true;
config.cacheSensitivity = 0;
res = await testDetect(human, 'samples/ai-body.jpg', 'default');
if (!res || res?.face?.length !== 1 || res?.face[0].gender !== 'female') log('error', 'failed: default result face mismatch', res?.face?.length, res?.body?.length, res?.hand?.length, res?.gesture?.length);
else log('state', 'passed: default result face match');
// test default config
// test default config sync
log('info', 'test sync');
human.reset();
config.async = false;
config.cacheSensitivity = 0;
res = await testDetect(human, 'samples/ai-body.jpg', 'default');
if (!res || res?.face?.length !== 1 || res?.face[0].gender !== 'female') log('error', 'failed: default sync', res?.face?.length, res?.body?.length, res?.hand?.length, res?.gesture?.length);
else log('state', 'passed: default sync');
// test image processing
const img1 = await human.image(null);
const img2 = await human.image(await getImage(human, 'samples/ai-face.jpg'));
if (!img1 || !img2 || img1.tensor !== null || img2.tensor?.shape?.length !== 4) log('error', 'failed: image input', img1?.tensor?.shape, img2?.tensor?.shape);
else log('state', 'passed: image input', img1?.tensor?.shape, img2?.tensor?.shape);
// test null input
res = await human.detect(null);
if (!res || !res.error) log('error', 'failed: invalid input', res);
else log('state', 'passed: invalid input', res);
// test face similarity
log('info', 'test face similarity');
human.reset();
config.async = false;
config.cacheSensitivity = 0;
let res1 = await testDetect(human, 'samples/ai-face.jpg', 'default');
let res2 = await testDetect(human, 'samples/ai-body.jpg', 'default');
let res3 = await testDetect(human, 'samples/ai-upper.jpg', 'default');
const desc1 = res1 && res1.face && res1.face[0] && res1.face[0].embedding ? [...res1.face[0].embedding] : null;
const desc2 = res2 && res2.face && res2.face[0] && res2.face[0].embedding ? [...res2.face[0].embedding] : null;
const desc3 = res3 && res3.face && res3.face[0] && res3.face[0].embedding ? [...res3.face[0].embedding] : null;
if (!desc1 || !desc2 || !desc3 || desc1.length !== 1024 || desc2.length !== 1024 || desc3.length !== 1024) log('error', 'failed: face descriptor', desc1?.length, desc2?.length, desc3?.length);
else log('state', 'passed: face descriptor');
res1 = Math.round(100 * human.similarity(desc1, desc2));
res2 = Math.round(100 * human.similarity(desc1, desc3));
res3 = Math.round(100 * human.similarity(desc2, desc3));
if (res1 !== 51 || res2 !== 49 || res3 !== 53) log('error', 'failed: face match ', res1, res2, res3);
else log('state', 'passed: face match');
// test face matching
log('info', 'test face matching');
let db = [];
try {
db = JSON.parse(fs.readFileSync('demo/facematch/faces.json').toString());
} catch { /***/ }
if (db.length < 100) log('error', 'failed: face database ', db.length);
else log('state', 'passed: face database', db.length);
res1 = human.match(desc1, db);
res2 = human.match(desc2, db);
res3 = human.match(desc3, db);
if (!res1 || !res1['name'] || !res2 || !res2['name'] || !res3 || !res3['name']) log('error', 'failed: face match ', res1);
else log('state', 'passed: face match', { first: { name: res1.name, similarity: res1.similarity } }, { second: { name: res2.name, similarity: res2.similarity } }, { third: { name: res3.name, similarity: res3.similarity } });
// test object detection
log('info', 'test object');
human.reset();
@ -273,6 +326,22 @@ async function test(Human, inputConfig) {
testDetect(second, 'samples/ai-upper.jpg', 'default'),
]);
// test monkey-patch
human.env.Canvas = canvasJS.Canvas; // monkey-patch human to use external canvas library
const inputImage = await canvasJS.loadImage('samples/ai-face.jpg'); // load image using canvas library
const inputCanvas = new canvasJS.Canvas(inputImage.width, inputImage.height); // create canvas
const ctx = inputCanvas.getContext('2d');
ctx.drawImage(inputImage, 0, 0); // draw input image onto canvas
res = await human.detect(inputCanvas);
if (!res || res?.face?.length !== 1) log('error', 'failed: monkey patch');
else log('state', 'passed: monkey patch');
// test segmentation
res = await human.segmentation(inputCanvas, inputCanvas);
if (!res || !res.width || !res.height) log('error', 'failed: segmentation', res);
else log('state', 'passed: segmentation', [res.width, res.height]);
human.env.Canvas = undefined;
// tests end
const t1 = process.hrtime.bigint();

View File

@ -27,7 +27,7 @@ const config = {
},
hand: { enabled: true, rotation: false },
body: { enabled: true },
object: { enabled: false },
object: { enabled: true },
segmentation: { enabled: true },
filter: { enabled: false },
};

View File

@ -60,6 +60,7 @@ function logStdIO(ok, test, buffer) {
}
}
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
async function runTest(test) {
log.info();
log.info(test, 'start');
@ -73,6 +74,7 @@ async function runTest(test) {
});
}
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
async function runDemo(demo) {
log.info();
log.info(demo, 'start');
@ -94,7 +96,8 @@ async function testAll() {
process.on('unhandledRejection', (data) => log.error('nodejs unhandled rejection', data));
process.on('uncaughtException', (data) => log.error('nodejs unhandled exception', data));
log.info('tests:', tests);
for (const demo of demos) await runDemo(demo);
log.info('demos:', demos);
// for (const demo of demos) await runDemo(demo);
for (const test of tests) await runTest(test);
log.info();
log.info('status:', status);