From 4c3e9818c80d9af7b6cf8347b021c0791a23888b Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Tue, 23 Mar 2021 15:24:58 -0400 Subject: [PATCH] improve fact matching --- demo/embedding.html | 25 ++++++++++++++++++------- demo/embedding.js | 17 +++++++++-------- src/faceres/faceres.ts | 6 ++---- 3 files changed, 29 insertions(+), 19 deletions(-) diff --git a/demo/embedding.html b/demo/embedding.html index c0f4f608..b74cd5fa 100644 --- a/demo/embedding.html +++ b/demo/embedding.html @@ -23,12 +23,23 @@ -
Sample Images: -
-
Selected Face (Enhanced):
- - -

Extracted Faces - click on a face to sort by similarity and get a known face match:
-
+
+
+
+ Selected Face
+ +
+
+
+ Sample Images
+
+
+ +
+
+
+ Extracted Faces - click on a face to sort by similarity and get a known face match:
+
+
diff --git a/demo/embedding.js b/demo/embedding.js index 97748bc6..98a85b41 100644 --- a/demo/embedding.js +++ b/demo/embedding.js @@ -61,10 +61,9 @@ async function analyze(face) { for (const canvas of canvases) { // calculate similarity from selected face to current one in the loop const current = all[canvas.tag.sample][canvas.tag.face]; - const similarity = human.similarity(face.embedding, current.embedding, 2); + const similarity = human.similarity(face.embedding, current.embedding, 3); // get best match - const person = await human.match(current.embedding, db); - // draw the canvas and similarity score + // draw the canvas canvas.title = similarity; await human.tf.browser.toPixels(current.tensor, canvas); const ctx = canvas.getContext('2d'); @@ -75,8 +74,10 @@ async function analyze(face) { ctx.fillText(`${(100 * similarity).toFixed(1)}%`, 4, 24); ctx.font = 'small-caps 0.8rem "Lato"'; ctx.fillText(`${current.age}y ${(100 * current.genderConfidence).toFixed(1)}% ${current.gender}`, 4, canvas.height - 6); - ctx.font = 'small-caps 1rem "Lato"'; - if (person.similarity) ctx.fillText(`${(100 * person.similarity).toFixed(1)}% ${person.name}`, 4, canvas.height - 30); + // identify person + // ctx.font = 'small-caps 1rem "Lato"'; + // const person = await human.match(current.embedding, db); + // if (person.similarity) ctx.fillText(`${(100 * person.similarity).toFixed(1)}% ${person.name}`, 4, canvas.height - 30); } // sort all faces by similarity @@ -109,9 +110,9 @@ async function faces(index, res, fileName) { ctx.font = 'small-caps 0.8rem "Lato"'; ctx.fillStyle = 'rgba(255, 255, 255, 1)'; ctx.fillText(`${res.face[i].age}y ${(100 * res.face[i].genderConfidence).toFixed(1)}% ${res.face[i].gender}`, 4, canvas.height - 6); - const person = await human.match(res.face[i].embedding, db); - ctx.font = 'small-caps 1rem "Lato"'; - if (person.similarity && person.similarity > 0.60) ctx.fillText(`${(100 * person.similarity).toFixed(1)}% ${person.name}`, 4, canvas.height - 30); + // const person = await human.match(res.face[i].embedding, db); + // ctx.font = 'small-caps 1rem "Lato"'; + // if (person.similarity && person.similarity > 0.60) ctx.fillText(`${(100 * person.similarity).toFixed(1)}% ${person.name}`, 4, canvas.height - 30); } } } diff --git a/src/faceres/faceres.ts b/src/faceres/faceres.ts index a3fed331..4b6c83a6 100644 --- a/src/faceres/faceres.ts +++ b/src/faceres/faceres.ts @@ -47,15 +47,13 @@ export function enhance(input): Tensor { // input received from detector is already normalized to 0..1 // input is also assumed to be straightened const tensor = input.image || input.tensor || input; - /* + if (!(tensor instanceof tf.Tensor)) return null; // do a tight crop of image and resize it to fit the model const box = [[0.05, 0.15, 0.85, 0.85]]; // empyrical values for top, left, bottom, right - if (!(tensor instanceof tf.Tensor)) return null; const crop = (tensor.shape.length === 3) ? tf.image.cropAndResize(tf.expandDims(tensor, 0), box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]) // add batch dimension if missing : tf.image.cropAndResize(tensor, box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]); - */ - const crop = tf.image.resizeBilinear(tensor, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false); // just resize to fit the embedding model + // const crop = tf.image.resizeBilinear(tensor, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false); // just resize to fit the embedding model /* // convert to black&white to avoid colorization impact