update results documentation

master
Vladimir Mandic 2021-05-22 13:14:49 -04:00
parent fa896c5330
commit d3e31ec79f
1 changed files with 20 additions and 12 deletions

@ -6,10 +6,12 @@ Result of `humand.detect()` is a single object that includes data for all enable
```js
result = {
version: // <string> version string of the human library
timestamp: // timestamp in miliseconds when detection occured
canvas: // optional processed canvas
face: // <array of detected objects>
[
{
id, // <number> face id number
confidence, // <number> returns faceConfidence if exists, otherwise boxConfidence
faceConfidence // <number> confidence in detection box after running mesh
boxConfidence // <number> confidence in detection box before running mesh
@ -18,10 +20,18 @@ result = {
mesh, // <array of 3D points [x, y, z]> 468 base points & 10 iris points, normalized to input impact size
meshRaw, // <array of 3D points [x, y, z]> 468 base points & 10 iris points, normalized to range of 0..1
annotations, // <list of object { landmark: array of points }> 32 base annotated landmarks & 2 iris annotations
iris, // <number> relative distance of iris to camera, multiple by focal lenght to get actual distance
age, // <number> estimated age
gender, // <string> 'male', 'female'
embedding, // <array>[float] vector of 192 values used for face similarity compare
genderConfidence // <number> confidence in gender detection
embedding, // <array>[float] vector of number values used for face similarity compare
iris, // <number> relative distance of iris to camera, multiple by focal lenght to get actual distance
emotion: // <array of emotions> returns multiple possible emotions for a given face, each with probability
[
{
score, // <number> probabily of emotion
emotion, // <string> 'angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral'
}
],
rotation: {
angle: // 3d face rotation values in radians in range of -pi/2 to pi/2 which is -90 to +90 degrees
{
@ -31,13 +41,6 @@ result = {
}
matrix: [] // flat array of [3,3] that can be directly used for GL matrix rotations such as in Three.js
}
emotion: // <array of emotions> returns multiple possible emotions for a given face, each with probability
[
{
score, // <number> probabily of emotion
emotion, // <string> 'angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral'
}
],
tensor: // if config.face.detector.return is set to true, detector will return
// a raw tensor containing cropped image of a face
// note that tensors must be explicitly disposed to free memory by calling tensor.dispose();
@ -46,16 +49,20 @@ result = {
body: // <array of detected objects>
[
{
id, // body id number
score, // <number>, overal detection score, only used for 'posenet', not used for 'blazepose'
keypoints, // for 'posenet': <array of 2D landmarks [ score, landmark, position [x, y] ]> 17 annotated landmarks
// for 'blazepose': <array of 2D landmarks [ score, landmark, position [x, y, z], presence ]>
// 39 annotated landmarks for full or 31 annotated landmarks for upper
// presence denotes probability value in range 0..1 that the point is located within the frame
box, // <array [x, y, width, height]>, clamped and normalized to input image size
boxRaw, // <array [x, y, width, height]>, unclamped and normalized to range of 0..1
}
],
hand: // <array of detected objects>
[
{
id, // hand id number
confidence, // <number>,
box, // <array [x, y, width, height]>, clamped and normalized to input image size
boxRaw, // <array [x, y, width, height]>, unclamped and normalized to range of 0..1
@ -69,8 +76,6 @@ result = {
score, // <number>
class, // <number> class id based on coco labels
label, // <string> class label based on coco labels
center, // <array [x, y]>, clamped and normalized to input image size
centerRaw, // <array [x, y]>, unclamped and normalized to range of 0..1
box, // <array [x, y, width, height]>, clamped and normalized to input image size
boxRaw, // <array [x, y, width, height]>, unclamped and normalized to range of 0..1
}
@ -84,6 +89,8 @@ result = {
],
performance = { // performance data of last execution for each module measuredin miliseconds
// note that per-model performance data is not available in async execution mode
frames, // total number of frames processed
cached, // total number of frames where some cached values were used
backend, // time to initialize tf backend, keeps longest value measured
load, // time to load models, keeps longest value measured
image, // time for image processing
@ -93,6 +100,7 @@ result = {
face, // model time
agegender, // model time
emotion, // model time
change, // frame change detection time
total, // end to end time
}
}