2020-10-12 20:28:19 +02:00
import * as faceapi from '../dist/face-api.esm.js' ;
// configuration options
const modelPath = 'https://vladmandic.github.io/face-api/model/' ; // path to model folder that will be loaded using http
2021-02-13 14:38:41 +01:00
// const modelPath = '../model/'; // path to model folder that will be loaded using http
2020-10-12 20:28:19 +02:00
const imgSize = 512 ; // maximum image size in pixels
const minScore = 0.1 ; // minimum score
const maxResults = 5 ; // maximum number of results to return
const samples = [ 'sample (1).jpg' , 'sample (2).jpg' , 'sample (3).jpg' , 'sample (4).jpg' , 'sample (5).jpg' , 'sample (6).jpg' ] ; // sample images to be loaded using http
// helper function to pretty-print json object to string
function str ( json ) {
let text = '<font color="lightblue">' ;
text += json ? JSON . stringify ( json ) . replace ( /{|}|"|\[|\]/g , '' ) . replace ( /,/g , ', ' ) : '' ;
text += '</font>' ;
return text ;
}
// helper function to print strings to html document as a log
function log ( ... txt ) {
// eslint-disable-next-line no-console
console . log ( ... txt ) ;
2021-01-03 17:05:09 +01:00
// @ts-ignore
2020-10-12 20:28:19 +02:00
document . getElementById ( 'log' ) . innerHTML += ` <br> ${ txt } ` ;
}
// helper function to draw detected faces
function faces ( name , title , id , data ) {
// create canvas to draw on
const img = document . getElementById ( id ) ;
2021-01-03 17:05:09 +01:00
if ( ! img ) return ;
2020-10-12 20:28:19 +02:00
const canvas = document . createElement ( 'canvas' ) ;
canvas . style . position = 'absolute' ;
canvas . style . left = ` ${ img . offsetLeft } px ` ;
canvas . style . top = ` ${ img . offsetTop } px ` ;
2021-01-03 17:05:09 +01:00
// @ts-ignore
2020-10-12 20:28:19 +02:00
canvas . width = img . width ;
2021-01-03 17:05:09 +01:00
// @ts-ignore
2020-10-12 20:28:19 +02:00
canvas . height = img . height ;
const ctx = canvas . getContext ( '2d' ) ;
2021-01-03 17:05:09 +01:00
if ( ! ctx ) return ;
2020-10-12 20:28:19 +02:00
// draw title
ctx . font = '1rem sans-serif' ;
ctx . fillStyle = 'black' ;
ctx . fillText ( name , 2 , 15 ) ;
ctx . fillText ( title , 2 , 35 ) ;
for ( const person of data ) {
// draw box around each face
ctx . lineWidth = 3 ;
ctx . strokeStyle = 'deepskyblue' ;
ctx . fillStyle = 'deepskyblue' ;
ctx . globalAlpha = 0.4 ;
ctx . beginPath ( ) ;
ctx . rect ( person . detection . box . x , person . detection . box . y , person . detection . box . width , person . detection . box . height ) ;
ctx . stroke ( ) ;
ctx . globalAlpha = 1 ;
ctx . fillText ( ` ${ Math . round ( 100 * person . genderProbability ) } % ${ person . gender } ` , person . detection . box . x , person . detection . box . y - 18 ) ;
ctx . fillText ( ` ${ Math . round ( person . age ) } years ` , person . detection . box . x , person . detection . box . y - 2 ) ;
// draw face points for each face
ctx . fillStyle = 'lightblue' ;
ctx . globalAlpha = 0.5 ;
const pointSize = 2 ;
for ( const pt of person . landmarks . positions ) {
ctx . beginPath ( ) ;
ctx . arc ( pt . x , pt . y , pointSize , 0 , 2 * Math . PI ) ;
ctx . fill ( ) ;
}
}
// add canvas to document
document . body . appendChild ( canvas ) ;
}
// helper function to draw processed image and its results
function print ( title , img , data ) {
// eslint-disable-next-line no-console
console . log ( 'Results:' , title , img , data ) ;
const el = new Image ( ) ;
2021-01-03 17:05:09 +01:00
el . id = Math . floor ( Math . random ( ) * 100000 ) . toString ( ) ;
2020-10-12 20:28:19 +02:00
el . src = img ;
el . width = imgSize ;
el . onload = ( ) => faces ( img , title , el . id , data ) ;
document . body . appendChild ( el ) ;
}
// loads image and draws it on resized canvas so we alwys have correct image size regardless of source
async function image ( url ) {
return new Promise ( ( resolve ) => {
const img = new Image ( ) ;
// wait until image is actually loaded
img . addEventListener ( 'load' , ( ) => {
// resize image so larger axis is not bigger than limit
const ratio = 1.0 * img . height / img . width ;
img . width = ratio <= 1 ? imgSize : 1.0 * imgSize / ratio ;
img . height = ratio >= 1 ? imgSize : 1.0 * imgSize * ratio ;
// create canvas and draw loaded image
const canvas = document . createElement ( 'canvas' ) ;
canvas . height = img . height ;
canvas . width = img . width ;
const ctx = canvas . getContext ( '2d' ) ;
2021-01-03 17:05:09 +01:00
if ( ctx ) ctx . drawImage ( img , 0 , 0 , img . width , img . height ) ;
2020-10-12 20:28:19 +02:00
// return generated canvas to be used by tfjs during detection
resolve ( canvas ) ;
} ) ;
// load image
img . src = url ;
} ) ;
}
async function main ( ) {
// initialize tfjs
log ( 'FaceAPI Test' ) ;
2020-12-19 17:46:41 +01:00
// if you want to use wasm backend location for wasm binaries must be specified
2021-01-10 16:35:51 +01:00
// await faceapi.tf.setWasmPaths('../node_modules/@tensorflow/tfjs-backend-wasm/dist/');
2020-12-19 17:46:41 +01:00
// await faceapi.tf.setBackend('wasm');
// default is webgl backend
2020-10-12 20:28:19 +02:00
await faceapi . tf . setBackend ( 'webgl' ) ;
2020-12-19 17:46:41 +01:00
2020-10-12 20:28:19 +02:00
await faceapi . tf . enableProdMode ( ) ;
await faceapi . tf . ENV . set ( 'DEBUG' , false ) ;
await faceapi . tf . ready ( ) ;
// check version
log ( ` Version: TensorFlow/JS ${ str ( faceapi . tf ? . version _core || '(not loaded)' ) } FaceAPI ${ str ( faceapi ? . version || '(not loaded)' ) } Backend: ${ str ( faceapi . tf ? . getBackend ( ) || '(not loaded)' ) } ` ) ;
log ( ` Flags: ${ JSON . stringify ( faceapi . tf . ENV . flags ) } ` ) ;
// load face-api models
log ( 'Loading FaceAPI models' ) ;
await faceapi . nets . tinyFaceDetector . load ( modelPath ) ;
await faceapi . nets . ssdMobilenetv1 . load ( modelPath ) ;
await faceapi . nets . ageGenderNet . load ( modelPath ) ;
await faceapi . nets . faceLandmark68Net . load ( modelPath ) ;
await faceapi . nets . faceRecognitionNet . load ( modelPath ) ;
await faceapi . nets . faceExpressionNet . load ( modelPath ) ;
const optionsTinyFace = new faceapi . TinyFaceDetectorOptions ( { inputSize : imgSize , scoreThreshold : minScore } ) ;
const optionsSSDMobileNet = new faceapi . SsdMobilenetv1Options ( { minConfidence : minScore , maxResults } ) ;
// check tf engine state
const engine = await faceapi . tf . engine ( ) ;
log ( ` TF Engine State: ${ str ( engine . state ) } ` ) ;
2020-10-13 22:57:06 +02:00
// const testT = faceapi.tf.tensor([0]);
// const testF = testT.toFloat();
// console.log(testT.print(), testF.print());
// testT.dispose();
// testF.dispose();
2020-10-12 20:28:19 +02:00
// loop through all images and try to process them
log ( ` Start processing: ${ samples . length } images ...<br> ` ) ;
for ( const img of samples ) {
// new line
document . body . appendChild ( document . createElement ( 'br' ) ) ;
// load and resize image
const canvas = await image ( img ) ;
try {
// actual model execution
const dataTinyYolo = await faceapi
2021-01-03 17:05:09 +01:00
// @ts-ignore
2020-10-12 20:28:19 +02:00
. detectAllFaces ( canvas , optionsTinyFace )
. withFaceLandmarks ( )
. withFaceExpressions ( )
. withFaceDescriptors ( )
. withAgeAndGender ( ) ;
// print results to screen
print ( 'TinyFace Detector' , img , dataTinyYolo ) ;
// actual model execution
const dataSSDMobileNet = await faceapi
. detectAllFaces ( canvas , optionsSSDMobileNet )
. withFaceLandmarks ( )
. withFaceExpressions ( )
. withFaceDescriptors ( )
. withAgeAndGender ( ) ;
// print results to screen
print ( 'SSD MobileNet' , img , dataSSDMobileNet ) ;
} catch ( err ) {
log ( ` Image: ${ img } Error during processing ${ str ( err ) } ` ) ;
// eslint-disable-next-line no-console
console . error ( err ) ;
}
}
}
// start processing as soon as page is loaded
window . onload = main ;