mirror of https://github.com/vladmandic/human
fine tuning age and face models
parent
4eb6fa709c
commit
b77b98e8d4
|
@ -0,0 +1,6 @@
|
||||||
|
{
|
||||||
|
"MD012": false,
|
||||||
|
"MD013": false,
|
||||||
|
"MD033": false,
|
||||||
|
"MD036": false
|
||||||
|
}
|
10
config.js
10
config.js
|
@ -82,10 +82,10 @@ export default {
|
||||||
// in short time (10 * 1/25 = 0.25 sec)
|
// in short time (10 * 1/25 = 0.25 sec)
|
||||||
skipInitial: false, // if previous detection resulted in no faces detected,
|
skipInitial: false, // if previous detection resulted in no faces detected,
|
||||||
// should skipFrames be reset immediately
|
// should skipFrames be reset immediately
|
||||||
minConfidence: 0.2, // threshold for discarding a prediction
|
minConfidence: 0.1, // threshold for discarding a prediction
|
||||||
iouThreshold: 0.2, // threshold for deciding whether boxes overlap too much in
|
iouThreshold: 0.1, // threshold for deciding whether boxes overlap too much in
|
||||||
// non-maximum suppression (0.1 means drop if overlap 10%)
|
// non-maximum suppression (0.1 means drop if overlap 10%)
|
||||||
scoreThreshold: 0.2, // threshold for deciding when to remove boxes based on score
|
scoreThreshold: 0.1, // threshold for deciding when to remove boxes based on score
|
||||||
// in non-maximum suppression,
|
// in non-maximum suppression,
|
||||||
// this is applied on detection objects only and before minConfidence
|
// this is applied on detection objects only and before minConfidence
|
||||||
},
|
},
|
||||||
|
@ -114,7 +114,7 @@ export default {
|
||||||
|
|
||||||
gender: {
|
gender: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
minConfidence: 0.4, // threshold for discarding a prediction
|
minConfidence: 0.1, // threshold for discarding a prediction
|
||||||
modelPath: '../models/gender.json', // can be 'gender', 'gender-ssrnet-imdb' or 'gender-ssrnet-wiki'
|
modelPath: '../models/gender.json', // can be 'gender', 'gender-ssrnet-imdb' or 'gender-ssrnet-wiki'
|
||||||
inputSize: 64, // fixed value
|
inputSize: 64, // fixed value
|
||||||
skipFrames: 41, // how many frames to go without re-running the detector
|
skipFrames: 41, // how many frames to go without re-running the detector
|
||||||
|
@ -124,7 +124,7 @@ export default {
|
||||||
emotion: {
|
emotion: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
inputSize: 64, // fixed value
|
inputSize: 64, // fixed value
|
||||||
minConfidence: 0.2, // threshold for discarding a prediction
|
minConfidence: 0.1, // threshold for discarding a prediction
|
||||||
skipFrames: 21, // how many frames to go without re-running the detector
|
skipFrames: 21, // how many frames to go without re-running the detector
|
||||||
modelPath: '../models/emotion-large.json', // can be 'mini', 'large'
|
modelPath: '../models/emotion-large.json', // can be 'mini', 'large'
|
||||||
},
|
},
|
||||||
|
|
|
@ -31,7 +31,7 @@ const ui = {
|
||||||
baseFontProto: 'small-caps {size} "Segoe UI"',
|
baseFontProto: 'small-caps {size} "Segoe UI"',
|
||||||
baseLineWidth: 12,
|
baseLineWidth: 12,
|
||||||
crop: true,
|
crop: true,
|
||||||
columns: 2,
|
columns: 4,
|
||||||
busy: false,
|
busy: false,
|
||||||
facing: true,
|
facing: true,
|
||||||
useWorker: false,
|
useWorker: false,
|
||||||
|
@ -339,7 +339,7 @@ async function processImage(input) {
|
||||||
return new Promise((resolve) => {
|
return new Promise((resolve) => {
|
||||||
const image = new Image();
|
const image = new Image();
|
||||||
image.onload = async () => {
|
image.onload = async () => {
|
||||||
log('Processing image:', image.src);
|
log('Processing image:', encodeURI(image.src));
|
||||||
const canvas = document.getElementById('canvas');
|
const canvas = document.getElementById('canvas');
|
||||||
image.width = image.naturalWidth;
|
image.width = image.naturalWidth;
|
||||||
image.height = image.naturalHeight;
|
image.height = image.naturalHeight;
|
||||||
|
@ -351,7 +351,12 @@ async function processImage(input) {
|
||||||
const thumb = document.createElement('canvas');
|
const thumb = document.createElement('canvas');
|
||||||
thumb.className = 'thumbnail';
|
thumb.className = 'thumbnail';
|
||||||
thumb.width = window.innerWidth / (ui.columns + 0.1);
|
thumb.width = window.innerWidth / (ui.columns + 0.1);
|
||||||
thumb.height = canvas.height / (window.innerWidth / thumb.width);
|
thumb.height = thumb.width * canvas.height / canvas.width;
|
||||||
|
if (result.face && result.face.length > 0) {
|
||||||
|
thumb.title = result.face.map((a, i) => `#${i} face: ${Math.trunc(100 * a.faceConfidence)}% box: ${Math.trunc(100 * a.boxConfidence)}% age: ${Math.trunc(a.age)} gender: ${Math.trunc(100 * a.genderConfidence)}% ${a.gender}`).join(' | ');
|
||||||
|
} else {
|
||||||
|
thumb.title = 'no face detected';
|
||||||
|
}
|
||||||
const ctx = thumb.getContext('2d');
|
const ctx = thumb.getContext('2d');
|
||||||
ctx.drawImage(canvas, 0, 0, canvas.width, canvas.height, 0, 0, thumb.width, thumb.height);
|
ctx.drawImage(canvas, 0, 0, canvas.width, canvas.height, 0, 0, thumb.width, thumb.height);
|
||||||
document.getElementById('samples-container').appendChild(thumb);
|
document.getElementById('samples-container').appendChild(thumb);
|
||||||
|
@ -403,6 +408,7 @@ async function detectSampleImages() {
|
||||||
log('Running detection of sample images');
|
log('Running detection of sample images');
|
||||||
status('processing images');
|
status('processing images');
|
||||||
document.getElementById('samples-container').innerHTML = '';
|
document.getElementById('samples-container').innerHTML = '';
|
||||||
|
for (const m of Object.values(menu)) m.hide();
|
||||||
for (const image of ui.samples) await processImage(image);
|
for (const image of ui.samples) await processImage(image);
|
||||||
status('');
|
status('');
|
||||||
}
|
}
|
||||||
|
|
|
@ -235,7 +235,7 @@ class Human {
|
||||||
let genderRes;
|
let genderRes;
|
||||||
let emotionRes;
|
let emotionRes;
|
||||||
let embeddingRes;
|
let embeddingRes;
|
||||||
const faceRes: Array<{ confidence: number, box: any, mesh: any, meshRaw: any, boxRaw: any, annotations: any, age: number, gender: string, genderConfidence: number, emotion: string, embedding: any, iris: number }> = [];
|
const faceRes: Array<{ confidence: number, boxConfidence: number, faceConfidence: number, box: any, mesh: any, meshRaw: any, boxRaw: any, annotations: any, age: number, gender: string, genderConfidence: number, emotion: string, embedding: any, iris: number }> = [];
|
||||||
this.state = 'run:face';
|
this.state = 'run:face';
|
||||||
timeStamp = now();
|
timeStamp = now();
|
||||||
const faces = await this.models.face?.estimateFaces(input, this.config);
|
const faces = await this.models.face?.estimateFaces(input, this.config);
|
||||||
|
@ -316,6 +316,8 @@ class Human {
|
||||||
// combine results
|
// combine results
|
||||||
faceRes.push({
|
faceRes.push({
|
||||||
confidence: face.confidence,
|
confidence: face.confidence,
|
||||||
|
faceConfidence: face.faceConfidence,
|
||||||
|
boxConfidence: face.boxConfidence,
|
||||||
box: face.box,
|
box: face.box,
|
||||||
mesh: face.mesh,
|
mesh: face.mesh,
|
||||||
boxRaw: face.boxRaw,
|
boxRaw: face.boxRaw,
|
||||||
|
|
Loading…
Reference in New Issue