2021-11-09 16:39:23 +01:00
/ * *
* Human demo for browsers
* @default Human Library
* @summary < https : / / github.com / vladmandic / human >
* @author < https : / / github.com / vladmandic >
* @copyright < https : / / github.com / vladmandic >
* @license MIT
* /
2021-11-11 17:30:55 +01:00
import { Human , TensorLike , FaceResult } from '../../dist/human.esm.js' ; // equivalent of @vladmandic/Human
import * as indexDb from './indexdb' ; // methods to deal with indexdb
2021-11-09 16:39:23 +01:00
const humanConfig = { // user configuration for human, used to fine-tune behavior
modelBasePath : '../../models' ,
filter : { equalization : true } , // lets run with histogram equilizer
face : {
enabled : true ,
2021-11-12 21:07:23 +01:00
detector : { rotation : true , return : true , cropFactor : 1.6 , mask : false } , // return tensor is used to get detected face image
2021-11-13 23:26:19 +01:00
description : { enabled : true } , // default model for face descriptor extraction is faceres
mobilefacenet : { enabled : false , modelPath : 'https://vladmandic.github.io/human-models/models/mobilefacenet.json' } , // alternative model
2021-11-09 16:39:23 +01:00
iris : { enabled : true } , // needed to determine gaze direction
emotion : { enabled : false } , // not needed
2021-11-09 20:37:50 +01:00
antispoof : { enabled : true } , // enable optional antispoof module
liveness : { enabled : true } , // enable optional liveness module
2021-11-09 16:39:23 +01:00
} ,
body : { enabled : false } ,
hand : { enabled : false } ,
object : { enabled : false } ,
2021-11-11 17:30:55 +01:00
gesture : { enabled : true } , // parses face and iris gestures
2021-11-09 16:39:23 +01:00
} ;
2021-11-13 23:26:19 +01:00
// const matchOptions = { order: 2, multiplier: 1000, min: 0.0, max: 1.0 }; // for embedding model
const matchOptions = { order : 2 , multiplier : 25 , min : 0.2 , max : 0.8 } ; // for faceres model
2021-11-09 16:39:23 +01:00
const options = {
2021-11-11 17:30:55 +01:00
minConfidence : 0.6 , // overal face confidence for box, face, gender, real, live
2021-11-09 16:39:23 +01:00
minSize : 224 , // min input to face descriptor model before degradation
maxTime : 10000 , // max time before giving up
2021-11-09 20:37:50 +01:00
blinkMin : 10 , // minimum duration of a valid blink
blinkMax : 800 , // maximum duration of a valid blink
2021-11-11 17:30:55 +01:00
threshold : 0.5 , // minimum similarity
2021-11-12 21:07:23 +01:00
mask : humanConfig.face.detector.mask ,
rotation : humanConfig.face.detector.rotation ,
cropFactor : humanConfig.face.detector.cropFactor ,
2021-11-13 23:26:19 +01:00
. . . matchOptions ,
2021-11-09 16:39:23 +01:00
} ;
2021-11-09 20:37:50 +01:00
const ok = { // must meet all rules
faceCount : false ,
faceConfidence : false ,
facingCenter : false ,
2021-11-12 21:07:23 +01:00
lookingCenter : false ,
2021-11-09 20:37:50 +01:00
blinkDetected : false ,
faceSize : false ,
antispoofCheck : false ,
livenessCheck : false ,
elapsedMs : 0 , // total time while waiting for valid face
} ;
2021-11-12 21:07:23 +01:00
const allOk = ( ) = > ok . faceCount && ok . faceSize && ok . blinkDetected && ok . facingCenter && ok . lookingCenter && ok . faceConfidence && ok . antispoofCheck && ok . livenessCheck ;
const current : { face : FaceResult | null , record : indexDb.FaceRecord | null } = { face : null , record : null } ; // current face record and matched database record
2021-11-09 20:37:50 +01:00
const blink = { // internal timers for blink start/end/duration
start : 0 ,
end : 0 ,
time : 0 ,
} ;
2021-11-11 17:30:55 +01:00
// let db: Array<{ name: string, source: string, embedding: number[] }> = []; // holds loaded face descriptor database
2021-11-09 16:39:23 +01:00
const human = new Human ( humanConfig ) ; // create instance of human with overrides from user configuration
human . env [ 'perfadd' ] = false ; // is performance data showing instant or total values
human . draw . options . font = 'small-caps 18px "Lato"' ; // set font used to draw labels when using draw methods
human . draw . options . lineHeight = 20 ;
const dom = { // grab instances of dom objects so we dont have to look them up later
video : document.getElementById ( 'video' ) as HTMLVideoElement ,
canvas : document.getElementById ( 'canvas' ) as HTMLCanvasElement ,
log : document.getElementById ( 'log' ) as HTMLPreElement ,
fps : document.getElementById ( 'fps' ) as HTMLPreElement ,
2021-11-11 17:30:55 +01:00
match : document.getElementById ( 'match' ) as HTMLDivElement ,
name : document.getElementById ( 'name' ) as HTMLInputElement ,
save : document.getElementById ( 'save' ) as HTMLSpanElement ,
delete : document . getElementById ( 'delete' ) as HTMLSpanElement ,
retry : document.getElementById ( 'retry' ) as HTMLDivElement ,
source : document.getElementById ( 'source' ) as HTMLCanvasElement ,
2021-11-12 21:07:23 +01:00
ok : document.getElementById ( 'ok' ) as HTMLDivElement ,
2021-11-09 16:39:23 +01:00
} ;
const timestamp = { detect : 0 , draw : 0 } ; // holds information used to calculate performance and possible memory leaks
const fps = { detect : 0 , draw : 0 } ; // holds calculated fps information for both detect and screen refresh
let startTime = 0 ;
const log = ( . . . msg ) = > { // helper method to output messages
dom . log . innerText += msg . join ( ' ' ) + '\n' ;
// eslint-disable-next-line no-console
console . log ( . . . msg ) ;
} ;
const printFPS = ( msg ) = > dom . fps . innerText = msg ; // print status element
async function webCam() { // initialize webcam
printFPS ( 'starting webcam...' ) ;
// @ts-ignore resizeMode is not yet defined in tslib
const cameraOptions : MediaStreamConstraints = { audio : false , video : { facingMode : 'user' , resizeMode : 'none' , width : { ideal : document.body.clientWidth } } } ;
const stream : MediaStream = await navigator . mediaDevices . getUserMedia ( cameraOptions ) ;
const ready = new Promise ( ( resolve ) = > { dom . video . onloadeddata = ( ) = > resolve ( true ) ; } ) ;
dom . video . srcObject = stream ;
dom . video . play ( ) ;
await ready ;
dom . canvas . width = dom . video . videoWidth ;
dom . canvas . height = dom . video . videoHeight ;
2021-11-11 17:30:55 +01:00
if ( human . env . initial ) log ( 'video:' , dom . video . videoWidth , dom . video . videoHeight , '|' , stream . getVideoTracks ( ) [ 0 ] . label ) ;
2021-11-09 16:39:23 +01:00
dom . canvas . onclick = ( ) = > { // pause when clicked on screen and resume on next click
if ( dom . video . paused ) dom . video . play ( ) ;
else dom . video . pause ( ) ;
} ;
}
async function detectionLoop() { // main detection loop
if ( ! dom . video . paused ) {
2021-11-12 21:07:23 +01:00
if ( current . face && current . face . tensor ) human . tf . dispose ( current . face . tensor ) ; // dispose previous tensor
2021-11-09 16:39:23 +01:00
await human . detect ( dom . video ) ; // actual detection; were not capturing output in a local variable as it can also be reached via human.result
const now = human . now ( ) ;
fps . detect = 1000 / ( now - timestamp . detect ) ;
timestamp . detect = now ;
requestAnimationFrame ( detectionLoop ) ; // start new frame immediately
}
}
2021-11-11 17:30:55 +01:00
async function validationLoop ( ) : Promise < FaceResult > { // main screen refresh loop
2021-11-09 16:39:23 +01:00
const interpolated = await human . next ( human . result ) ; // smoothen result using last-known results
await human . draw . canvas ( dom . video , dom . canvas ) ; // draw canvas to screen
await human . draw . all ( dom . canvas , interpolated ) ; // draw labels, boxes, lines, etc.
const now = human . now ( ) ;
fps . draw = 1000 / ( now - timestamp . draw ) ;
timestamp . draw = now ;
printFPS ( ` fps: ${ fps . detect . toFixed ( 1 ) . padStart ( 5 , ' ' ) } detect | ${ fps . draw . toFixed ( 1 ) . padStart ( 5 , ' ' ) } draw ` ) ; // write status
ok . faceCount = human . result . face . length === 1 ; // must be exactly detected face
2021-11-09 20:37:50 +01:00
if ( ok . faceCount ) { // skip the rest if no face
const gestures : string [ ] = Object . values ( human . result . gesture ) . map ( ( gesture ) = > gesture . gesture ) ; // flatten all gestures
if ( gestures . includes ( 'blink left eye' ) || gestures . includes ( 'blink right eye' ) ) blink . start = human . now ( ) ; // blink starts when eyes get closed
if ( blink . start > 0 && ! gestures . includes ( 'blink left eye' ) && ! gestures . includes ( 'blink right eye' ) ) blink . end = human . now ( ) ; // if blink started how long until eyes are back open
2021-11-12 21:07:23 +01:00
ok . blinkDetected = ok . blinkDetected || ( Math . abs ( blink . end - blink . start ) > options . blinkMin && Math . abs ( blink . end - blink . start ) < options . blinkMax ) ;
2021-11-09 20:37:50 +01:00
if ( ok . blinkDetected && blink . time === 0 ) blink . time = Math . trunc ( blink . end - blink . start ) ;
2021-11-12 21:07:23 +01:00
ok . facingCenter = gestures . includes ( 'facing center' ) ;
ok . lookingCenter = gestures . includes ( 'looking center' ) ; // must face camera and look at camera
2021-11-09 20:37:50 +01:00
ok . faceConfidence = ( human . result . face [ 0 ] . boxScore || 0 ) > options . minConfidence && ( human . result . face [ 0 ] . faceScore || 0 ) > options . minConfidence && ( human . result . face [ 0 ] . genderScore || 0 ) > options . minConfidence ;
ok . antispoofCheck = ( human . result . face [ 0 ] . real || 0 ) > options . minConfidence ;
ok . livenessCheck = ( human . result . face [ 0 ] . live || 0 ) > options . minConfidence ;
ok . faceSize = human . result . face [ 0 ] . box [ 2 ] >= options . minSize && human . result . face [ 0 ] . box [ 3 ] >= options . minSize ;
}
2021-11-12 21:07:23 +01:00
let y = 32 ;
for ( const [ key , val ] of Object . entries ( ok ) ) {
let el = document . getElementById ( ` ok- ${ key } ` ) ;
if ( ! el ) {
el = document . createElement ( 'div' ) ;
el . innerText = key ;
el . className = 'ok' ;
el . style . top = ` ${ y } px ` ;
dom . ok . appendChild ( el ) ;
}
if ( typeof val === 'boolean' ) el . style . backgroundColor = val ? 'lightgreen' : 'lightcoral' ;
else el . innerText = ` ${ key } : ${ val } ` ;
y += 28 ;
}
2021-11-09 16:39:23 +01:00
if ( allOk ( ) ) { // all criteria met
dom . video . pause ( ) ;
2021-11-11 17:30:55 +01:00
return human . result . face [ 0 ] ;
2021-11-09 16:39:23 +01:00
}
if ( ok . elapsedMs > options . maxTime ) { // give up
dom . video . pause ( ) ;
2021-11-11 17:30:55 +01:00
return human . result . face [ 0 ] ;
2021-11-09 16:39:23 +01:00
} else { // run again
ok . elapsedMs = Math . trunc ( human . now ( ) - startTime ) ;
return new Promise ( ( resolve ) = > {
setTimeout ( async ( ) = > {
const res = await validationLoop ( ) ; // run validation loop until conditions are met
2021-11-11 17:30:55 +01:00
if ( res ) resolve ( human . result . face [ 0 ] ) ; // recursive promise resolve
2021-11-09 16:39:23 +01:00
} , 30 ) ; // use to slow down refresh from max refresh rate to target of 30 fps
} ) ;
}
}
2021-11-11 17:30:55 +01:00
async function saveRecords() {
if ( dom . name . value . length > 0 ) {
const image = dom . canvas . getContext ( '2d' ) ? . getImageData ( 0 , 0 , dom . canvas . width , dom . canvas . height ) as ImageData ;
2021-11-12 21:07:23 +01:00
const rec = { id : 0 , name : dom.name.value , descriptor : current.face?.embedding as number [ ] , image } ;
2021-11-11 17:30:55 +01:00
await indexDb . save ( rec ) ;
log ( 'saved face record:' , rec . name ) ;
} else {
log ( 'invalid name' ) ;
}
}
2021-11-09 16:39:23 +01:00
2021-11-11 17:30:55 +01:00
async function deleteRecord() {
2021-11-12 21:07:23 +01:00
if ( current . record && current . record . id > 0 ) {
await indexDb . remove ( current . record ) ;
2021-11-11 17:30:55 +01:00
}
2021-11-09 20:37:50 +01:00
}
2021-11-11 17:30:55 +01:00
async function detectFace() {
2021-11-11 23:01:10 +01:00
dom . canvas . getContext ( '2d' ) ? . clearRect ( 0 , 0 , options . minSize , options . minSize ) ;
2021-11-12 21:07:23 +01:00
if ( ! current . face || ! current . face . tensor || ! current . face . embedding ) return false ;
2021-11-13 23:26:19 +01:00
// eslint-disable-next-line no-console
console . log ( 'face record:' , current . face ) ;
2021-11-12 21:07:23 +01:00
human . tf . browser . toPixels ( current . face . tensor as unknown as TensorLike , dom . canvas ) ;
if ( await indexDb . count ( ) === 0 ) {
log ( 'face database is empty' ) ;
document . body . style . background = 'black' ;
2021-11-11 17:30:55 +01:00
dom . delete . style . display = 'none' ;
2021-11-12 21:07:23 +01:00
return false ;
}
const db = await indexDb . load ( ) ;
const descriptors = db . map ( ( rec ) = > rec . descriptor ) ;
2021-11-13 23:26:19 +01:00
const res = await human . match ( current . face . embedding , descriptors , matchOptions ) ;
2021-11-12 21:07:23 +01:00
current . record = db [ res . index ] || null ;
if ( current . record ) {
log ( ` best match: ${ current . record . name } | id: ${ current . record . id } | similarity: ${ Math . round ( 1000 * res . similarity ) / 10 } % ` ) ;
dom . name . value = current . record . name ;
2021-11-11 17:30:55 +01:00
dom . source . style . display = '' ;
2021-11-12 21:07:23 +01:00
dom . source . getContext ( '2d' ) ? . putImageData ( current . record . image , 0 , 0 ) ;
2021-11-11 17:30:55 +01:00
}
2021-11-12 21:07:23 +01:00
document . body . style . background = res . similarity > options . threshold ? 'darkgreen' : 'maroon' ;
2021-11-11 17:30:55 +01:00
return res . similarity > options . threshold ;
2021-11-09 16:39:23 +01:00
}
async function main() { // main entry point
2021-11-11 17:30:55 +01:00
ok . faceCount = false ;
ok . faceConfidence = false ;
ok . facingCenter = false ;
ok . blinkDetected = false ;
ok . faceSize = false ;
ok . antispoofCheck = false ;
ok . livenessCheck = false ;
ok . elapsedMs = 0 ;
dom . match . style . display = 'none' ;
dom . retry . style . display = 'none' ;
2021-11-12 21:07:23 +01:00
dom . source . style . display = 'none' ;
2021-11-11 17:30:55 +01:00
document . body . style . background = 'black' ;
await webCam ( ) ;
await detectionLoop ( ) ; // start detection loop
startTime = human . now ( ) ;
2021-11-12 21:07:23 +01:00
current . face = await validationLoop ( ) ; // start validation loop
dom . canvas . width = current . face ? . tensor ? . shape [ 1 ] || options . minSize ;
dom . canvas . height = current . face ? . tensor ? . shape [ 0 ] || options . minSize ;
2021-11-11 23:01:10 +01:00
dom . source . width = dom . canvas . width ;
dom . source . height = dom . canvas . height ;
dom . canvas . style . width = '' ;
dom . match . style . display = 'flex' ;
2021-11-12 21:07:23 +01:00
dom . save . style . display = 'flex' ;
dom . delete . style . display = 'flex' ;
2021-11-11 23:01:10 +01:00
dom . retry . style . display = 'block' ;
2021-11-12 21:07:23 +01:00
if ( ! allOk ( ) ) { // is all criteria met?
2021-11-11 23:01:10 +01:00
log ( 'did not find valid face' ) ;
return false ;
2021-11-11 17:30:55 +01:00
} else {
2021-11-12 21:07:23 +01:00
return detectFace ( ) ;
2021-11-11 17:30:55 +01:00
}
}
async function init() {
2021-11-18 16:10:06 +01:00
log ( 'human version:' , human . version , '| tfjs version:' , human . tf . version [ 'tfjs-core' ] ) ;
2021-11-11 17:30:55 +01:00
log ( 'options:' , JSON . stringify ( options ) . replace ( /{|}|"|\[|\]/g , '' ) . replace ( /,/g , ' ' ) ) ;
2021-11-09 16:39:23 +01:00
printFPS ( 'loading...' ) ;
2021-11-12 21:07:23 +01:00
log ( 'known face records:' , await indexDb . count ( ) ) ;
2021-11-11 17:30:55 +01:00
await webCam ( ) ; // start webcam
2021-11-09 16:39:23 +01:00
await human . load ( ) ; // preload all models
printFPS ( 'initializing...' ) ;
2021-11-11 17:30:55 +01:00
dom . retry . addEventListener ( 'click' , main ) ;
dom . save . addEventListener ( 'click' , saveRecords ) ;
dom . delete . addEventListener ( 'click' , deleteRecord ) ;
2021-11-09 16:39:23 +01:00
await human . warmup ( ) ; // warmup function to initialize backend for future faster detection
2021-11-11 17:30:55 +01:00
await main ( ) ;
2021-11-09 16:39:23 +01:00
}
2021-11-11 17:30:55 +01:00
window . onload = init ;