mirror of https://github.com/vladmandic/human
fix canvas size on different orientation
parent
ceccda54cf
commit
8cc256bc93
|
@ -53,7 +53,7 @@ export default {
|
||||||
// 'front' is optimized for large faces such as front-facing camera and 'back' is optimized for distanct faces.
|
// 'front' is optimized for large faces such as front-facing camera and 'back' is optimized for distanct faces.
|
||||||
inputSize: 256, // fixed value: 128 for front and 256 for 'back'
|
inputSize: 256, // fixed value: 128 for front and 256 for 'back'
|
||||||
maxFaces: 10, // maximum number of faces detected in the input, should be set to the minimum number for performance
|
maxFaces: 10, // maximum number of faces detected in the input, should be set to the minimum number for performance
|
||||||
skipFrames: 10, // how many frames to go without re-running the face bounding box detector, only used for video inputs
|
skipFrames: 15, // how many frames to go without re-running the face bounding box detector, only used for video inputs
|
||||||
// if model is running st 25 FPS, we can re-use existing bounding box for updated face mesh analysis
|
// if model is running st 25 FPS, we can re-use existing bounding box for updated face mesh analysis
|
||||||
// as face probably hasn't moved much in short time (10 * 1/25 = 0.25 sec)
|
// as face probably hasn't moved much in short time (10 * 1/25 = 0.25 sec)
|
||||||
minConfidence: 0.3, // threshold for discarding a prediction
|
minConfidence: 0.3, // threshold for discarding a prediction
|
||||||
|
@ -76,7 +76,7 @@ export default {
|
||||||
modelPath: '../models/ssrnet-age-imdb.json', // can be 'imdb' or 'wiki'
|
modelPath: '../models/ssrnet-age-imdb.json', // can be 'imdb' or 'wiki'
|
||||||
// which determines training set for model
|
// which determines training set for model
|
||||||
inputSize: 64, // fixed value
|
inputSize: 64, // fixed value
|
||||||
skipFrames: 10, // how many frames to go without re-running the detector, only used for video inputs
|
skipFrames: 15, // how many frames to go without re-running the detector, only used for video inputs
|
||||||
},
|
},
|
||||||
gender: {
|
gender: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
|
@ -87,7 +87,7 @@ export default {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
inputSize: 64, // fixed value, 64 for 'mini' and 'lage', 48 for 'cnn'
|
inputSize: 64, // fixed value, 64 for 'mini' and 'lage', 48 for 'cnn'
|
||||||
minConfidence: 0.3, // threshold for discarding a prediction
|
minConfidence: 0.3, // threshold for discarding a prediction
|
||||||
skipFrames: 10, // how many frames to go without re-running the detector
|
skipFrames: 15, // how many frames to go without re-running the detector
|
||||||
modelPath: '../models/emotion-large.json', // can be 'mini', 'large' or 'cnn'
|
modelPath: '../models/emotion-large.json', // can be 'mini', 'large' or 'cnn'
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -103,7 +103,7 @@ export default {
|
||||||
hand: {
|
hand: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
inputSize: 256, // fixed value
|
inputSize: 256, // fixed value
|
||||||
skipFrames: 10, // how many frames to go without re-running the hand bounding box detector, only used for video inputs
|
skipFrames: 15, // how many frames to go without re-running the hand bounding box detector, only used for video inputs
|
||||||
// if model is running st 25 FPS, we can re-use existing bounding box for updated hand skeleton analysis
|
// if model is running st 25 FPS, we can re-use existing bounding box for updated hand skeleton analysis
|
||||||
// as the hand probably hasn't moved much in short time (10 * 1/25 = 0.25 sec)
|
// as the hand probably hasn't moved much in short time (10 * 1/25 = 0.25 sec)
|
||||||
minConfidence: 0.3, // threshold for discarding a prediction
|
minConfidence: 0.3, // threshold for discarding a prediction
|
||||||
|
|
|
@ -55,15 +55,15 @@ const config = {
|
||||||
videoOptimized: true,
|
videoOptimized: true,
|
||||||
face: {
|
face: {
|
||||||
enabled: true,
|
enabled: true,
|
||||||
detector: { maxFaces: 10, skipFrames: 10, minConfidence: 0.3, iouThreshold: 0.3, scoreThreshold: 0.5 },
|
detector: { maxFaces: 10, skipFrames: 15, minConfidence: 0.3, iouThreshold: 0.3, scoreThreshold: 0.5 },
|
||||||
mesh: { enabled: true },
|
mesh: { enabled: true },
|
||||||
iris: { enabled: true },
|
iris: { enabled: true },
|
||||||
age: { enabled: true, skipFrames: 10 },
|
age: { enabled: true, skipFrames: 15 },
|
||||||
gender: { enabled: true },
|
gender: { enabled: true },
|
||||||
emotion: { enabled: true, minConfidence: 0.3, useGrayscale: true },
|
emotion: { enabled: true, minConfidence: 0.3, useGrayscale: true },
|
||||||
},
|
},
|
||||||
body: { enabled: true, maxDetections: 10, scoreThreshold: 0.5, nmsRadius: 20 },
|
body: { enabled: true, maxDetections: 10, scoreThreshold: 0.5, nmsRadius: 20 },
|
||||||
hand: { enabled: true, skipFrames: 10, minConfidence: 0.3, iouThreshold: 0.3, scoreThreshold: 0.5 },
|
hand: { enabled: true, skipFrames: 15, minConfidence: 0.3, iouThreshold: 0.3, scoreThreshold: 0.5 },
|
||||||
gesture: { enabled: true },
|
gesture: { enabled: true },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -187,6 +187,8 @@ async function setupCamera() {
|
||||||
video.height = video.videoHeight;
|
video.height = video.videoHeight;
|
||||||
canvas.width = video.width;
|
canvas.width = video.width;
|
||||||
canvas.height = video.height;
|
canvas.height = video.height;
|
||||||
|
canvas.style.width = canvas.width > canvas.height ? '100vw' : '';
|
||||||
|
canvas.style.height = canvas.width > canvas.height ? '' : '100vh';
|
||||||
if (live) video.play();
|
if (live) video.play();
|
||||||
ui.busy = false;
|
ui.busy = false;
|
||||||
// do once more because onresize events can be delayed or skipped
|
// do once more because onresize events can be delayed or skipped
|
||||||
|
|
|
@ -35,7 +35,7 @@
|
||||||
.log { position: fixed; bottom: 0; margin: 0.4rem; font-size: 0.9rem; }
|
.log { position: fixed; bottom: 0; margin: 0.4rem; font-size: 0.9rem; }
|
||||||
.samples-container { display: flex; flex-wrap: wrap; }
|
.samples-container { display: flex; flex-wrap: wrap; }
|
||||||
.video { display: none; }
|
.video { display: none; }
|
||||||
.canvas { margin: 0 auto; height: 100vh; }
|
.canvas { margin: 0 auto; }
|
||||||
.loader { width: 300px; height: 300px; border: 3px solid transparent; border-radius: 50%; border-top: 4px solid #f15e41; animation: spin 4s linear infinite; position: absolute; top: 30%; left: 50%; margin-left: -150px; z-index: 15; }
|
.loader { width: 300px; height: 300px; border: 3px solid transparent; border-radius: 50%; border-top: 4px solid #f15e41; animation: spin 4s linear infinite; position: absolute; top: 30%; left: 50%; margin-left: -150px; z-index: 15; }
|
||||||
.loader::before, .loader::after { content: ""; position: absolute; top: 6px; bottom: 6px; left: 6px; right: 6px; border-radius: 50%; border: 4px solid transparent; }
|
.loader::before, .loader::after { content: ""; position: absolute; top: 6px; bottom: 6px; left: 6px; right: 6px; border-radius: 50%; border: 4px solid transparent; }
|
||||||
.loader::before { border-top-color: #bad375; animation: 3s spin linear infinite; }
|
.loader::before { border-top-color: #bad375; animation: 3s spin linear infinite; }
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
const tf = require('@tensorflow/tfjs');
|
const tf = require('@tensorflow/tfjs');
|
||||||
const profile = require('../profile.js');
|
const profile = require('../profile.js');
|
||||||
|
|
||||||
const annotations = ['angry', 'discust', 'fear', 'happy', 'sad', 'surpise', 'neutral'];
|
const annotations = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surpise', 'neutral'];
|
||||||
const models = {};
|
const models = {};
|
||||||
let last = [];
|
let last = [];
|
||||||
let frame = Number.MAX_SAFE_INTEGER;
|
let frame = Number.MAX_SAFE_INTEGER;
|
||||||
|
|
Loading…
Reference in New Issue