mirror of https://github.com/vladmandic/human
input type validation
parent
1cd492b9d5
commit
4a2a69fc18
|
@ -9,6 +9,10 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
|
|||
|
||||
## Changelog
|
||||
|
||||
### **HEAD -> main** 2021/04/02 mandic00@live.com
|
||||
|
||||
- normalize all scores
|
||||
|
||||
### **1.3.1** 2021/03/30 mandic00@live.com
|
||||
|
||||
- added face3d demo
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
<div id="media">
|
||||
<canvas id="canvas" class="canvas"></canvas>
|
||||
<video id="video" playsinline class="video"></video>
|
||||
<div id="log"></div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
|
|
|
@ -134,6 +134,19 @@ async function setupCamera() {
|
|||
const stream = await navigator.mediaDevices.getUserMedia(constraints);
|
||||
if (stream) video.srcObject = stream;
|
||||
else return null;
|
||||
// get information data
|
||||
const track = stream.getVideoTracks()[0];
|
||||
const settings = track.getSettings();
|
||||
// log('camera constraints:', constraints, 'window:', { width: window.innerWidth, height: window.innerHeight }, 'settings:', settings, 'track:', track);
|
||||
const engineData = human.tf.engine();
|
||||
const gpuData = (engineData.backendInstance && engineData.backendInstance.numBytesInGPU > 0) ? `gpu: ${(engineData.backendInstance.numBytesInGPU ? engineData.backendInstance.numBytesInGPU : 0).toLocaleString()} bytes` : '';
|
||||
const cameraData = { name: track.label?.toLowerCase(), width: settings.width, height: settings.height, facing: settings.facingMode === 'user' ? 'front' : 'back' };
|
||||
const memoryData = `system: ${engineData.state.numBytes.toLocaleString()} bytes ${gpuData} | tensors: ${engineData.state.numTensors.toLocaleString()}`;
|
||||
document.getElementById('log').innerHTML = `
|
||||
video: ${cameraData.name} | facing: ${cameraData.facing} | screen: ${window.innerWidth} x ${window.innerHeight} camera: ${cameraData.width} x ${cameraData.height}<br>
|
||||
backend: ${human.tf.getBackend()} | ${memoryData}<br>
|
||||
`;
|
||||
// return when camera is ready
|
||||
return new Promise((resolve) => {
|
||||
video.onloadeddata = async () => {
|
||||
video.width = video.videoWidth;
|
||||
|
@ -147,6 +160,7 @@ async function setupCamera() {
|
|||
}
|
||||
|
||||
async function main() {
|
||||
await human.load();
|
||||
const video = await setupCamera();
|
||||
if (video) {
|
||||
const videoTexture = new VideoTexture(video); // now load textures from video
|
||||
|
|
|
@ -43,7 +43,8 @@ function log(...msg) {
|
|||
async function getFaceDB() {
|
||||
// download db with known faces
|
||||
try {
|
||||
const res = await fetch('/demo/facematch-faces.json');
|
||||
let res = await fetch('/demo/facematch-faces.json');
|
||||
if (!res || !res.ok) res = await fetch('/human/demo/facematch-faces.json');
|
||||
db = (res && res.ok) ? await res.json() : [];
|
||||
for (const rec of db) {
|
||||
rec.embedding = rec.embedding.map((a) => parseFloat(a.toFixed(4)));
|
||||
|
@ -171,7 +172,9 @@ async function main() {
|
|||
let res;
|
||||
let images = [];
|
||||
let dir = [];
|
||||
// load face descriptor database
|
||||
await getFaceDB();
|
||||
|
||||
// enumerate all sample images in /assets
|
||||
res = await fetch('/assets');
|
||||
dir = (res && res.ok) ? await res.json() : [];
|
||||
|
@ -185,24 +188,25 @@ async function main() {
|
|||
res = await fetch('/private/err');
|
||||
dir = (res && res.ok) ? await res.json() : [];
|
||||
images = images.concat(dir.filter((img) => (img.endsWith('.jpg'))));
|
||||
|
||||
log('Enumerated:', images.length, 'images');
|
||||
|
||||
// could not dynamically enumerate images so using static list
|
||||
if (images.length === 0) {
|
||||
images = [
|
||||
'/human/assets/sample1.jpg',
|
||||
'/human/assets/sample2.jpg',
|
||||
'/human/assets/sample3.jpg',
|
||||
'/human/assets/sample4.jpg',
|
||||
'/human/assets/sample5.jpg',
|
||||
'/human/assets/sample6.jpg',
|
||||
'/human/assets/sample6.jpg',
|
||||
'/human/assets/sample-me.jpg',
|
||||
'/human/assets/human-sample-face.jpg',
|
||||
'/human/assets/human-sample-upper.jpg',
|
||||
'/human/assets/human-sample-body.jpg',
|
||||
'sample1.jpg',
|
||||
'sample2.jpg',
|
||||
'sample3.jpg',
|
||||
'sample4.jpg',
|
||||
'sample5.jpg',
|
||||
'sample6.jpg',
|
||||
'sample6.jpg',
|
||||
'sample-me.jpg',
|
||||
'human-sample-face.jpg',
|
||||
'human-sample-upper.jpg',
|
||||
'human-sample-body.jpg',
|
||||
];
|
||||
// add prefix for gitpages
|
||||
images = images.map((a) => `/human/assets/${a}`);
|
||||
log('Adding static image list:', images.length, 'images');
|
||||
}
|
||||
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -28,7 +28,7 @@ export type Tensor = typeof tf.Tensor;
|
|||
export type { Config } from './config';
|
||||
export type { Result } from './result';
|
||||
/** Defines all possible input types for **Human** detection */
|
||||
export type Input = Tensor | ImageData | ImageBitmap | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas;
|
||||
export type Input = Tensor | ImageData | ImageBitmap | HTMLImageElement | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas;
|
||||
/** Error message */
|
||||
export type Error = { error: string };
|
||||
/** Instance of TensorFlow/JS */
|
||||
|
|
|
@ -16,7 +16,16 @@ let fx = null;
|
|||
export function process(input, config): { tensor: tf.Tensor, canvas: OffscreenCanvas | HTMLCanvasElement } {
|
||||
let tensor;
|
||||
if (!input) throw new Error('Human: Input is missing');
|
||||
if (!(input instanceof tf.Tensor) && !(input instanceof ImageData) && !(input instanceof ImageBitmap) && !(input instanceof HTMLVideoElement) && !(input instanceof HTMLCanvasElement) && !(input instanceof OffscreenCanvas)) {
|
||||
if (
|
||||
!(input instanceof tf.Tensor)
|
||||
&& !(input instanceof Image)
|
||||
&& !(input instanceof ImageData)
|
||||
&& !(input instanceof ImageBitmap)
|
||||
&& !(input instanceof HTMLImageElement)
|
||||
&& !(input instanceof HTMLVideoElement)
|
||||
&& !(input instanceof HTMLCanvasElement)
|
||||
&& !(input instanceof OffscreenCanvas)
|
||||
) {
|
||||
throw new Error('Human: Input type is not recognized');
|
||||
}
|
||||
if (input instanceof tf.Tensor) {
|
||||
|
|
|
@ -16,7 +16,7 @@ export declare type Tensor = typeof tf.Tensor;
|
|||
export type { Config } from './config';
|
||||
export type { Result } from './result';
|
||||
/** Defines all possible input types for **Human** detection */
|
||||
export declare type Input = Tensor | ImageData | ImageBitmap | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas;
|
||||
export declare type Input = Tensor | ImageData | ImageBitmap | HTMLImageElement | HTMLVideoElement | HTMLCanvasElement | OffscreenCanvas;
|
||||
/** Error message */
|
||||
export declare type Error = {
|
||||
error: string;
|
||||
|
|
Loading…
Reference in New Issue