improve fact matching

pull/293/head
Vladimir Mandic 2021-03-23 15:24:58 -04:00
parent 870ac26f5d
commit 4c3e9818c8
3 changed files with 29 additions and 19 deletions

View File

@ -23,12 +23,23 @@
</style> </style>
</head> </head>
<body> <body>
<br>Sample Images: <div style="display: block">
<div id="images"></div> <div style="display: flex">
<br>Selected Face (Enhanced):<br> <div>
<canvas id="orig" style="width: 200px; height: 200px;"></canvas> Selected Face<br>
<span id="desc" style="visibility: hidden; font-size: 0.4rem;"></span> <canvas id="orig" style="width: 200px; height: 200px;"></canvas>
<br><br>Extracted Faces - click on a face to sort by similarity and get a known face match:<br> </div>
<div id="faces"></div> <div style="width: 20px"></div>
<div>
Sample Images<br>
<div id="images"></div>
</div>
</span>
<span id="desc" style="visibility: hidden; font-size: 0.4rem;"></span><br>
</div>
<div style="height: 10px"></div>
Extracted Faces - click on a face to sort by similarity and get a known face match:<br>
<div id="faces"></div>
</div>
</body> </body>
</html> </html>

View File

@ -61,10 +61,9 @@ async function analyze(face) {
for (const canvas of canvases) { for (const canvas of canvases) {
// calculate similarity from selected face to current one in the loop // calculate similarity from selected face to current one in the loop
const current = all[canvas.tag.sample][canvas.tag.face]; const current = all[canvas.tag.sample][canvas.tag.face];
const similarity = human.similarity(face.embedding, current.embedding, 2); const similarity = human.similarity(face.embedding, current.embedding, 3);
// get best match // get best match
const person = await human.match(current.embedding, db); // draw the canvas
// draw the canvas and similarity score
canvas.title = similarity; canvas.title = similarity;
await human.tf.browser.toPixels(current.tensor, canvas); await human.tf.browser.toPixels(current.tensor, canvas);
const ctx = canvas.getContext('2d'); const ctx = canvas.getContext('2d');
@ -75,8 +74,10 @@ async function analyze(face) {
ctx.fillText(`${(100 * similarity).toFixed(1)}%`, 4, 24); ctx.fillText(`${(100 * similarity).toFixed(1)}%`, 4, 24);
ctx.font = 'small-caps 0.8rem "Lato"'; ctx.font = 'small-caps 0.8rem "Lato"';
ctx.fillText(`${current.age}y ${(100 * current.genderConfidence).toFixed(1)}% ${current.gender}`, 4, canvas.height - 6); ctx.fillText(`${current.age}y ${(100 * current.genderConfidence).toFixed(1)}% ${current.gender}`, 4, canvas.height - 6);
ctx.font = 'small-caps 1rem "Lato"'; // identify person
if (person.similarity) ctx.fillText(`${(100 * person.similarity).toFixed(1)}% ${person.name}`, 4, canvas.height - 30); // ctx.font = 'small-caps 1rem "Lato"';
// const person = await human.match(current.embedding, db);
// if (person.similarity) ctx.fillText(`${(100 * person.similarity).toFixed(1)}% ${person.name}`, 4, canvas.height - 30);
} }
// sort all faces by similarity // sort all faces by similarity
@ -109,9 +110,9 @@ async function faces(index, res, fileName) {
ctx.font = 'small-caps 0.8rem "Lato"'; ctx.font = 'small-caps 0.8rem "Lato"';
ctx.fillStyle = 'rgba(255, 255, 255, 1)'; ctx.fillStyle = 'rgba(255, 255, 255, 1)';
ctx.fillText(`${res.face[i].age}y ${(100 * res.face[i].genderConfidence).toFixed(1)}% ${res.face[i].gender}`, 4, canvas.height - 6); ctx.fillText(`${res.face[i].age}y ${(100 * res.face[i].genderConfidence).toFixed(1)}% ${res.face[i].gender}`, 4, canvas.height - 6);
const person = await human.match(res.face[i].embedding, db); // const person = await human.match(res.face[i].embedding, db);
ctx.font = 'small-caps 1rem "Lato"'; // ctx.font = 'small-caps 1rem "Lato"';
if (person.similarity && person.similarity > 0.60) ctx.fillText(`${(100 * person.similarity).toFixed(1)}% ${person.name}`, 4, canvas.height - 30); // if (person.similarity && person.similarity > 0.60) ctx.fillText(`${(100 * person.similarity).toFixed(1)}% ${person.name}`, 4, canvas.height - 30);
} }
} }
} }

View File

@ -47,15 +47,13 @@ export function enhance(input): Tensor {
// input received from detector is already normalized to 0..1 // input received from detector is already normalized to 0..1
// input is also assumed to be straightened // input is also assumed to be straightened
const tensor = input.image || input.tensor || input; const tensor = input.image || input.tensor || input;
/* if (!(tensor instanceof tf.Tensor)) return null;
// do a tight crop of image and resize it to fit the model // do a tight crop of image and resize it to fit the model
const box = [[0.05, 0.15, 0.85, 0.85]]; // empyrical values for top, left, bottom, right const box = [[0.05, 0.15, 0.85, 0.85]]; // empyrical values for top, left, bottom, right
if (!(tensor instanceof tf.Tensor)) return null;
const crop = (tensor.shape.length === 3) const crop = (tensor.shape.length === 3)
? tf.image.cropAndResize(tf.expandDims(tensor, 0), box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]) // add batch dimension if missing ? tf.image.cropAndResize(tf.expandDims(tensor, 0), box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]) // add batch dimension if missing
: tf.image.cropAndResize(tensor, box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]); : tf.image.cropAndResize(tensor, box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]);
*/ // const crop = tf.image.resizeBilinear(tensor, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false); // just resize to fit the embedding model
const crop = tf.image.resizeBilinear(tensor, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false); // just resize to fit the embedding model
/* /*
// convert to black&white to avoid colorization impact // convert to black&white to avoid colorization impact