diff --git a/config.js b/config.js index f7d87a4a..d26f0ee3 100644 --- a/config.js +++ b/config.js @@ -53,7 +53,7 @@ export default { // 'front' is optimized for large faces such as front-facing camera and 'back' is optimized for distanct faces. inputSize: 256, // fixed value: 128 for front and 256 for 'back' maxFaces: 10, // maximum number of faces detected in the input, should be set to the minimum number for performance - skipFrames: 10, // how many frames to go without re-running the face bounding box detector, only used for video inputs + skipFrames: 15, // how many frames to go without re-running the face bounding box detector, only used for video inputs // if model is running st 25 FPS, we can re-use existing bounding box for updated face mesh analysis // as face probably hasn't moved much in short time (10 * 1/25 = 0.25 sec) minConfidence: 0.3, // threshold for discarding a prediction @@ -76,7 +76,7 @@ export default { modelPath: '../models/ssrnet-age-imdb.json', // can be 'imdb' or 'wiki' // which determines training set for model inputSize: 64, // fixed value - skipFrames: 10, // how many frames to go without re-running the detector, only used for video inputs + skipFrames: 15, // how many frames to go without re-running the detector, only used for video inputs }, gender: { enabled: true, @@ -87,7 +87,7 @@ export default { enabled: true, inputSize: 64, // fixed value, 64 for 'mini' and 'lage', 48 for 'cnn' minConfidence: 0.3, // threshold for discarding a prediction - skipFrames: 10, // how many frames to go without re-running the detector + skipFrames: 15, // how many frames to go without re-running the detector modelPath: '../models/emotion-large.json', // can be 'mini', 'large' or 'cnn' }, }, @@ -103,7 +103,7 @@ export default { hand: { enabled: true, inputSize: 256, // fixed value - skipFrames: 10, // how many frames to go without re-running the hand bounding box detector, only used for video inputs + skipFrames: 15, // how many frames to go without re-running the hand bounding box detector, only used for video inputs // if model is running st 25 FPS, we can re-use existing bounding box for updated hand skeleton analysis // as the hand probably hasn't moved much in short time (10 * 1/25 = 0.25 sec) minConfidence: 0.3, // threshold for discarding a prediction diff --git a/demo/browser.js b/demo/browser.js index 3d9636bb..c2ebb234 100644 --- a/demo/browser.js +++ b/demo/browser.js @@ -55,15 +55,15 @@ const config = { videoOptimized: true, face: { enabled: true, - detector: { maxFaces: 10, skipFrames: 10, minConfidence: 0.3, iouThreshold: 0.3, scoreThreshold: 0.5 }, + detector: { maxFaces: 10, skipFrames: 15, minConfidence: 0.3, iouThreshold: 0.3, scoreThreshold: 0.5 }, mesh: { enabled: true }, iris: { enabled: true }, - age: { enabled: true, skipFrames: 10 }, + age: { enabled: true, skipFrames: 15 }, gender: { enabled: true }, emotion: { enabled: true, minConfidence: 0.3, useGrayscale: true }, }, body: { enabled: true, maxDetections: 10, scoreThreshold: 0.5, nmsRadius: 20 }, - hand: { enabled: true, skipFrames: 10, minConfidence: 0.3, iouThreshold: 0.3, scoreThreshold: 0.5 }, + hand: { enabled: true, skipFrames: 15, minConfidence: 0.3, iouThreshold: 0.3, scoreThreshold: 0.5 }, gesture: { enabled: true }, }; @@ -187,6 +187,8 @@ async function setupCamera() { video.height = video.videoHeight; canvas.width = video.width; canvas.height = video.height; + canvas.style.width = canvas.width > canvas.height ? '100vw' : ''; + canvas.style.height = canvas.width > canvas.height ? '' : '100vh'; if (live) video.play(); ui.busy = false; // do once more because onresize events can be delayed or skipped diff --git a/demo/index.html b/demo/index.html index e1a3b8e7..048fd8ab 100644 --- a/demo/index.html +++ b/demo/index.html @@ -35,7 +35,7 @@ .log { position: fixed; bottom: 0; margin: 0.4rem; font-size: 0.9rem; } .samples-container { display: flex; flex-wrap: wrap; } .video { display: none; } - .canvas { margin: 0 auto; height: 100vh; } + .canvas { margin: 0 auto; } .loader { width: 300px; height: 300px; border: 3px solid transparent; border-radius: 50%; border-top: 4px solid #f15e41; animation: spin 4s linear infinite; position: absolute; top: 30%; left: 50%; margin-left: -150px; z-index: 15; } .loader::before, .loader::after { content: ""; position: absolute; top: 6px; bottom: 6px; left: 6px; right: 6px; border-radius: 50%; border: 4px solid transparent; } .loader::before { border-top-color: #bad375; animation: 3s spin linear infinite; } diff --git a/src/emotion/emotion.js b/src/emotion/emotion.js index 342aee15..e4902c77 100644 --- a/src/emotion/emotion.js +++ b/src/emotion/emotion.js @@ -1,7 +1,7 @@ const tf = require('@tensorflow/tfjs'); const profile = require('../profile.js'); -const annotations = ['angry', 'discust', 'fear', 'happy', 'sad', 'surpise', 'neutral']; +const annotations = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surpise', 'neutral']; const models = {}; let last = []; let frame = Number.MAX_SAFE_INTEGER;