mirror of https://github.com/vladmandic/human
fixed memory leak
parent
ca81481cb2
commit
fa3a9e5372
|
@ -280,4 +280,4 @@ Library can also be used on mobile devices
|
|||
## Todo
|
||||
|
||||
- Improve detection of smaller faces
|
||||
- Fix memory leak in face detector
|
||||
- Verify age/gender models
|
||||
|
|
|
@ -7,13 +7,13 @@ const config = {
|
|||
face: {
|
||||
enabled: true,
|
||||
detector: { maxFaces: 10, skipFrames: 5, minConfidence: 0.8, iouThreshold: 0.3, scoreThreshold: 0.75 },
|
||||
mesh: { enabled: true },
|
||||
iris: { enabled: true },
|
||||
age: { enabled: true, skipFrames: 5 },
|
||||
gender: { enabled: true },
|
||||
mesh: { enabled: false },
|
||||
iris: { enabled: false },
|
||||
age: { enabled: false, skipFrames: 5 },
|
||||
gender: { enabled: false },
|
||||
},
|
||||
body: { enabled: true, maxDetections: 5, scoreThreshold: 0.75, nmsRadius: 20 },
|
||||
hand: { enabled: true, skipFrames: 5, minConfidence: 0.8, iouThreshold: 0.3, scoreThreshold: 0.75 },
|
||||
body: { enabled: false, maxDetections: 5, scoreThreshold: 0.75, nmsRadius: 20 },
|
||||
hand: { enabled: false, skipFrames: 5, minConfidence: 0.8, iouThreshold: 0.3, scoreThreshold: 0.75 },
|
||||
};
|
||||
let settings;
|
||||
|
||||
|
@ -181,10 +181,11 @@ async function runHumanDetect() {
|
|||
log.innerText = `
|
||||
TFJS Version: ${human.tf.version_core} Memory: ${engine.state.numBytes.toLocaleString()} bytes ${engine.state.numDataBuffers.toLocaleString()} buffers ${engine.state.numTensors.toLocaleString()} tensors
|
||||
GPU Memory: used ${engine.backendInstance.numBytesInGPU.toLocaleString()} bytes free ${Math.floor(1024 * 1024 * engine.backendInstance.numMBBeforeWarning).toLocaleString()} bytes
|
||||
Result: Face: ${(JSON.stringify(result.face)).length.toLocaleString()} bytes Body: ${(JSON.stringify(result.body)).length.toLocaleString()} bytes Hand: ${(JSON.stringify(result.hand)).length.toLocaleString()} bytes
|
||||
Result Object Size: Face: ${(JSON.stringify(result.face)).length.toLocaleString()} bytes Body: ${(JSON.stringify(result.body)).length.toLocaleString()} bytes Hand: ${(JSON.stringify(result.hand)).length.toLocaleString()} bytes
|
||||
`;
|
||||
// rinse & repeate
|
||||
requestAnimationFrame(runHumanDetect);
|
||||
// setTimeout(() => runHumanDetect(), 1000); // slow loop for debugging purposes
|
||||
requestAnimationFrame(runHumanDetect); // immediate loop
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -242,6 +243,10 @@ async function setupCanvas() {
|
|||
|
||||
async function setupCamera() {
|
||||
const video = document.getElementById('video');
|
||||
if (!navigator.mediaDevices) {
|
||||
document.getElementById('log').innerText = 'Video not supported';
|
||||
return;
|
||||
}
|
||||
const stream = await navigator.mediaDevices.getUserMedia({
|
||||
audio: false,
|
||||
video: { facingMode: 'user', width: window.innerWidth, height: window.innerHeight },
|
||||
|
|
23
src/index.js
23
src/index.js
|
@ -33,28 +33,31 @@ function mergeDeep(...objects) {
|
|||
async function detect(input, userConfig) {
|
||||
const config = mergeDeep(defaults, userConfig);
|
||||
|
||||
// load models if enabled
|
||||
if (config.body.enabled && !models.posenet) models.posenet = await posenet.load(config.body);
|
||||
if (config.hand.enabled && !models.handpose) models.handpose = await handpose.load(config.hand);
|
||||
if (config.face.enabled && !models.facemesh) models.facemesh = await facemesh.load(config.face);
|
||||
if (config.face.age.enabled) await ssrnet.loadAge(config);
|
||||
if (config.face.gender.enabled) await ssrnet.loadGender(config);
|
||||
|
||||
tf.engine().startScope();
|
||||
|
||||
// run posenet
|
||||
let poseRes = [];
|
||||
if (config.body.enabled) {
|
||||
if (!models.posenet) models.posenet = await posenet.load(config.body);
|
||||
poseRes = await models.posenet.estimateMultiplePoses(input, config.body);
|
||||
}
|
||||
if (config.body.enabled) poseRes = await models.posenet.estimateMultiplePoses(input, config.body);
|
||||
|
||||
// run handpose
|
||||
let handRes = [];
|
||||
if (config.hand.enabled) {
|
||||
if (!models.handpose) models.handpose = await handpose.load(config.hand);
|
||||
handRes = await models.handpose.estimateHands(input, config.hand);
|
||||
}
|
||||
if (config.hand.enabled) handRes = await models.handpose.estimateHands(input, config.hand);
|
||||
|
||||
// run facemesh, includes blazeface and iris
|
||||
const faceRes = [];
|
||||
if (config.face.enabled) {
|
||||
if (!models.facemesh) models.facemesh = await facemesh.load(config.face);
|
||||
const faces = await models.facemesh.estimateFaces(input, config.face);
|
||||
for (const face of faces) {
|
||||
// run ssr-net age & gender, inherits face from blazeface
|
||||
const ssrdata = (config.face.age.enabled || config.face.gender.enabled) ? await ssrnet.predict(face.image, config) : {};
|
||||
face.image.dispose();
|
||||
// iris: array[ bottom, left, top, right, center ]
|
||||
const iris = (face.annotations.leftEyeIris && face.annotations.rightEyeIris)
|
||||
? Math.max(face.annotations.leftEyeIris[3][0] - face.annotations.leftEyeIris[1][0], face.annotations.rightEyeIris[3][0] - face.annotations.rightEyeIris[1][0])
|
||||
|
@ -71,6 +74,8 @@ async function detect(input, userConfig) {
|
|||
}
|
||||
}
|
||||
|
||||
tf.engine().endScope();
|
||||
|
||||
// combine results
|
||||
return { face: faceRes, body: poseRes, hand: handRes };
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue