input type validation

pull/94/head
Vladimir Mandic 2021-04-02 08:37:35 -04:00
parent 1cd492b9d5
commit 4a2a69fc18
17 changed files with 75 additions and 43 deletions

View File

@ -9,6 +9,10 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
## Changelog ## Changelog
### **HEAD -> main** 2021/04/02 mandic00@live.com
- normalize all scores
### **1.3.1** 2021/03/30 mandic00@live.com ### **1.3.1** 2021/03/30 mandic00@live.com
- added face3d demo - added face3d demo

View File

@ -28,6 +28,7 @@
<div id="media"> <div id="media">
<canvas id="canvas" class="canvas"></canvas> <canvas id="canvas" class="canvas"></canvas>
<video id="video" playsinline class="video"></video> <video id="video" playsinline class="video"></video>
<div id="log"></div>
</div> </div>
</body> </body>
</html> </html>

View File

@ -134,6 +134,19 @@ async function setupCamera() {
const stream = await navigator.mediaDevices.getUserMedia(constraints); const stream = await navigator.mediaDevices.getUserMedia(constraints);
if (stream) video.srcObject = stream; if (stream) video.srcObject = stream;
else return null; else return null;
// get information data
const track = stream.getVideoTracks()[0];
const settings = track.getSettings();
// log('camera constraints:', constraints, 'window:', { width: window.innerWidth, height: window.innerHeight }, 'settings:', settings, 'track:', track);
const engineData = human.tf.engine();
const gpuData = (engineData.backendInstance && engineData.backendInstance.numBytesInGPU > 0) ? `gpu: ${(engineData.backendInstance.numBytesInGPU ? engineData.backendInstance.numBytesInGPU : 0).toLocaleString()} bytes` : '';
const cameraData = { name: track.label?.toLowerCase(), width: settings.width, height: settings.height, facing: settings.facingMode === 'user' ? 'front' : 'back' };
const memoryData = `system: ${engineData.state.numBytes.toLocaleString()} bytes ${gpuData} | tensors: ${engineData.state.numTensors.toLocaleString()}`;
document.getElementById('log').innerHTML = `
video: ${cameraData.name} | facing: ${cameraData.facing} | screen: ${window.innerWidth} x ${window.innerHeight} camera: ${cameraData.width} x ${cameraData.height}<br>
backend: ${human.tf.getBackend()} | ${memoryData}<br>
`;
// return when camera is ready
return new Promise((resolve) => { return new Promise((resolve) => {
video.onloadeddata = async () => { video.onloadeddata = async () => {
video.width = video.videoWidth; video.width = video.videoWidth;
@ -147,6 +160,7 @@ async function setupCamera() {
} }
async function main() { async function main() {
await human.load();
const video = await setupCamera(); const video = await setupCamera();
if (video) { if (video) {
const videoTexture = new VideoTexture(video); // now load textures from video const videoTexture = new VideoTexture(video); // now load textures from video

View File

@ -43,7 +43,8 @@ function log(...msg) {
async function getFaceDB() { async function getFaceDB() {
// download db with known faces // download db with known faces
try { try {
const res = await fetch('/demo/facematch-faces.json'); let res = await fetch('/demo/facematch-faces.json');
if (!res || !res.ok) res = await fetch('/human/demo/facematch-faces.json');
db = (res && res.ok) ? await res.json() : []; db = (res && res.ok) ? await res.json() : [];
for (const rec of db) { for (const rec of db) {
rec.embedding = rec.embedding.map((a) => parseFloat(a.toFixed(4))); rec.embedding = rec.embedding.map((a) => parseFloat(a.toFixed(4)));
@ -171,7 +172,9 @@ async function main() {
let res; let res;
let images = []; let images = [];
let dir = []; let dir = [];
// load face descriptor database
await getFaceDB(); await getFaceDB();
// enumerate all sample images in /assets // enumerate all sample images in /assets
res = await fetch('/assets'); res = await fetch('/assets');
dir = (res && res.ok) ? await res.json() : []; dir = (res && res.ok) ? await res.json() : [];
@ -185,24 +188,25 @@ async function main() {
res = await fetch('/private/err'); res = await fetch('/private/err');
dir = (res && res.ok) ? await res.json() : []; dir = (res && res.ok) ? await res.json() : [];
images = images.concat(dir.filter((img) => (img.endsWith('.jpg')))); images = images.concat(dir.filter((img) => (img.endsWith('.jpg'))));
log('Enumerated:', images.length, 'images'); log('Enumerated:', images.length, 'images');
// could not dynamically enumerate images so using static list // could not dynamically enumerate images so using static list
if (images.length === 0) { if (images.length === 0) {
images = [ images = [
'/human/assets/sample1.jpg', 'sample1.jpg',
'/human/assets/sample2.jpg', 'sample2.jpg',
'/human/assets/sample3.jpg', 'sample3.jpg',
'/human/assets/sample4.jpg', 'sample4.jpg',
'/human/assets/sample5.jpg', 'sample5.jpg',
'/human/assets/sample6.jpg', 'sample6.jpg',
'/human/assets/sample6.jpg', 'sample6.jpg',
'/human/assets/sample-me.jpg', 'sample-me.jpg',
'/human/assets/human-sample-face.jpg', 'human-sample-face.jpg',
'/human/assets/human-sample-upper.jpg', 'human-sample-upper.jpg',
'/human/assets/human-sample-body.jpg', 'human-sample-body.jpg',
]; ];
// add prefix for gitpages
images = images.map((a) => `/human/assets/${a}`);
log('Adding static image list:', images.length, 'images'); log('Adding static image list:', images.length, 'images');
} }

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

2
dist/human.esm.js vendored

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

2
dist/human.js vendored

File diff suppressed because one or more lines are too long

4
dist/human.js.map vendored

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

8
dist/human.node.js vendored

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -28,7 +28,7 @@ export type Tensor = typeof tf.Tensor;
export type { Config } from './config'; export type { Config } from './config';
export type { Result } from './result'; export type { Result } from './result';
/** Defines all possible input types for **Human** detection */ /** Defines all possible input types for **Human** detection */
export type Input = Tensor | ImageData | ImageBitmap | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas; export type Input = Tensor | ImageData | ImageBitmap | HTMLImageElement | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas;
/** Error message */ /** Error message */
export type Error = { error: string }; export type Error = { error: string };
/** Instance of TensorFlow/JS */ /** Instance of TensorFlow/JS */

View File

@ -16,7 +16,16 @@ let fx = null;
export function process(input, config): { tensor: tf.Tensor, canvas: OffscreenCanvas | HTMLCanvasElement } { export function process(input, config): { tensor: tf.Tensor, canvas: OffscreenCanvas | HTMLCanvasElement } {
let tensor; let tensor;
if (!input) throw new Error('Human: Input is missing'); if (!input) throw new Error('Human: Input is missing');
if (!(input instanceof tf.Tensor) && !(input instanceof ImageData) && !(input instanceof ImageBitmap) && !(input instanceof HTMLVideoElement) && !(input instanceof HTMLCanvasElement) && !(input instanceof OffscreenCanvas)) { if (
!(input instanceof tf.Tensor)
&& !(input instanceof Image)
&& !(input instanceof ImageData)
&& !(input instanceof ImageBitmap)
&& !(input instanceof HTMLImageElement)
&& !(input instanceof HTMLVideoElement)
&& !(input instanceof HTMLCanvasElement)
&& !(input instanceof OffscreenCanvas)
) {
throw new Error('Human: Input type is not recognized'); throw new Error('Human: Input type is not recognized');
} }
if (input instanceof tf.Tensor) { if (input instanceof tf.Tensor) {

2
types/human.d.ts vendored
View File

@ -16,7 +16,7 @@ export declare type Tensor = typeof tf.Tensor;
export type { Config } from './config'; export type { Config } from './config';
export type { Result } from './result'; export type { Result } from './result';
/** Defines all possible input types for **Human** detection */ /** Defines all possible input types for **Human** detection */
export declare type Input = Tensor | ImageData | ImageBitmap | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas; export declare type Input = Tensor | ImageData | ImageBitmap | HTMLImageElement | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas;
/** Error message */ /** Error message */
export declare type Error = { export declare type Error = {
error: string; error: string;