From d3e31ec79f0f7f9b3382576dd246cd86de22bb43 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Sat, 22 May 2021 13:14:49 -0400 Subject: [PATCH] update results documentation --- Outputs.md | 32 ++++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/Outputs.md b/Outputs.md index db9bfa1..29e21af 100644 --- a/Outputs.md +++ b/Outputs.md @@ -6,10 +6,12 @@ Result of `humand.detect()` is a single object that includes data for all enable ```js result = { - version: // version string of the human library + timestamp: // timestamp in miliseconds when detection occured + canvas: // optional processed canvas face: // [ { + id, // face id number confidence, // returns faceConfidence if exists, otherwise boxConfidence faceConfidence // confidence in detection box after running mesh boxConfidence // confidence in detection box before running mesh @@ -18,10 +20,18 @@ result = { mesh, // 468 base points & 10 iris points, normalized to input impact size meshRaw, // 468 base points & 10 iris points, normalized to range of 0..1 annotations, // 32 base annotated landmarks & 2 iris annotations - iris, // relative distance of iris to camera, multiple by focal lenght to get actual distance age, // estimated age gender, // 'male', 'female' - embedding, // [float] vector of 192 values used for face similarity compare + genderConfidence // confidence in gender detection + embedding, // [float] vector of number values used for face similarity compare + iris, // relative distance of iris to camera, multiple by focal lenght to get actual distance + emotion: // returns multiple possible emotions for a given face, each with probability + [ + { + score, // probabily of emotion + emotion, // 'angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral' + } + ], rotation: { angle: // 3d face rotation values in radians in range of -pi/2 to pi/2 which is -90 to +90 degrees { @@ -31,13 +41,6 @@ result = { } matrix: [] // flat array of [3,3] that can be directly used for GL matrix rotations such as in Three.js } - emotion: // returns multiple possible emotions for a given face, each with probability - [ - { - score, // probabily of emotion - emotion, // 'angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral' - } - ], tensor: // if config.face.detector.return is set to true, detector will return // a raw tensor containing cropped image of a face // note that tensors must be explicitly disposed to free memory by calling tensor.dispose(); @@ -46,16 +49,20 @@ result = { body: // [ { + id, // body id number score, // , overal detection score, only used for 'posenet', not used for 'blazepose' keypoints, // for 'posenet': 17 annotated landmarks // for 'blazepose': // 39 annotated landmarks for full or 31 annotated landmarks for upper // presence denotes probability value in range 0..1 that the point is located within the frame + box, // , clamped and normalized to input image size + boxRaw, // , unclamped and normalized to range of 0..1 } ], hand: // [ { + id, // hand id number confidence, // , box, // , clamped and normalized to input image size boxRaw, // , unclamped and normalized to range of 0..1 @@ -69,8 +76,6 @@ result = { score, // class, // class id based on coco labels label, // class label based on coco labels - center, // , clamped and normalized to input image size - centerRaw, // , unclamped and normalized to range of 0..1 box, // , clamped and normalized to input image size boxRaw, // , unclamped and normalized to range of 0..1 } @@ -84,6 +89,8 @@ result = { ], performance = { // performance data of last execution for each module measuredin miliseconds // note that per-model performance data is not available in async execution mode + frames, // total number of frames processed + cached, // total number of frames where some cached values were used backend, // time to initialize tf backend, keeps longest value measured load, // time to load models, keeps longest value measured image, // time for image processing @@ -93,6 +100,7 @@ result = { face, // model time agegender, // model time emotion, // model time + change, // frame change detection time total, // end to end time } }