mirror of https://github.com/vladmandic/human
update results documentation
parent
fa896c5330
commit
d3e31ec79f
32
Outputs.md
32
Outputs.md
|
@ -6,10 +6,12 @@ Result of `humand.detect()` is a single object that includes data for all enable
|
||||||
|
|
||||||
```js
|
```js
|
||||||
result = {
|
result = {
|
||||||
version: // <string> version string of the human library
|
timestamp: // timestamp in miliseconds when detection occured
|
||||||
|
canvas: // optional processed canvas
|
||||||
face: // <array of detected objects>
|
face: // <array of detected objects>
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
|
id, // <number> face id number
|
||||||
confidence, // <number> returns faceConfidence if exists, otherwise boxConfidence
|
confidence, // <number> returns faceConfidence if exists, otherwise boxConfidence
|
||||||
faceConfidence // <number> confidence in detection box after running mesh
|
faceConfidence // <number> confidence in detection box after running mesh
|
||||||
boxConfidence // <number> confidence in detection box before running mesh
|
boxConfidence // <number> confidence in detection box before running mesh
|
||||||
|
@ -18,10 +20,18 @@ result = {
|
||||||
mesh, // <array of 3D points [x, y, z]> 468 base points & 10 iris points, normalized to input impact size
|
mesh, // <array of 3D points [x, y, z]> 468 base points & 10 iris points, normalized to input impact size
|
||||||
meshRaw, // <array of 3D points [x, y, z]> 468 base points & 10 iris points, normalized to range of 0..1
|
meshRaw, // <array of 3D points [x, y, z]> 468 base points & 10 iris points, normalized to range of 0..1
|
||||||
annotations, // <list of object { landmark: array of points }> 32 base annotated landmarks & 2 iris annotations
|
annotations, // <list of object { landmark: array of points }> 32 base annotated landmarks & 2 iris annotations
|
||||||
iris, // <number> relative distance of iris to camera, multiple by focal lenght to get actual distance
|
|
||||||
age, // <number> estimated age
|
age, // <number> estimated age
|
||||||
gender, // <string> 'male', 'female'
|
gender, // <string> 'male', 'female'
|
||||||
embedding, // <array>[float] vector of 192 values used for face similarity compare
|
genderConfidence // <number> confidence in gender detection
|
||||||
|
embedding, // <array>[float] vector of number values used for face similarity compare
|
||||||
|
iris, // <number> relative distance of iris to camera, multiple by focal lenght to get actual distance
|
||||||
|
emotion: // <array of emotions> returns multiple possible emotions for a given face, each with probability
|
||||||
|
[
|
||||||
|
{
|
||||||
|
score, // <number> probabily of emotion
|
||||||
|
emotion, // <string> 'angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral'
|
||||||
|
}
|
||||||
|
],
|
||||||
rotation: {
|
rotation: {
|
||||||
angle: // 3d face rotation values in radians in range of -pi/2 to pi/2 which is -90 to +90 degrees
|
angle: // 3d face rotation values in radians in range of -pi/2 to pi/2 which is -90 to +90 degrees
|
||||||
{
|
{
|
||||||
|
@ -31,13 +41,6 @@ result = {
|
||||||
}
|
}
|
||||||
matrix: [] // flat array of [3,3] that can be directly used for GL matrix rotations such as in Three.js
|
matrix: [] // flat array of [3,3] that can be directly used for GL matrix rotations such as in Three.js
|
||||||
}
|
}
|
||||||
emotion: // <array of emotions> returns multiple possible emotions for a given face, each with probability
|
|
||||||
[
|
|
||||||
{
|
|
||||||
score, // <number> probabily of emotion
|
|
||||||
emotion, // <string> 'angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral'
|
|
||||||
}
|
|
||||||
],
|
|
||||||
tensor: // if config.face.detector.return is set to true, detector will return
|
tensor: // if config.face.detector.return is set to true, detector will return
|
||||||
// a raw tensor containing cropped image of a face
|
// a raw tensor containing cropped image of a face
|
||||||
// note that tensors must be explicitly disposed to free memory by calling tensor.dispose();
|
// note that tensors must be explicitly disposed to free memory by calling tensor.dispose();
|
||||||
|
@ -46,16 +49,20 @@ result = {
|
||||||
body: // <array of detected objects>
|
body: // <array of detected objects>
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
|
id, // body id number
|
||||||
score, // <number>, overal detection score, only used for 'posenet', not used for 'blazepose'
|
score, // <number>, overal detection score, only used for 'posenet', not used for 'blazepose'
|
||||||
keypoints, // for 'posenet': <array of 2D landmarks [ score, landmark, position [x, y] ]> 17 annotated landmarks
|
keypoints, // for 'posenet': <array of 2D landmarks [ score, landmark, position [x, y] ]> 17 annotated landmarks
|
||||||
// for 'blazepose': <array of 2D landmarks [ score, landmark, position [x, y, z], presence ]>
|
// for 'blazepose': <array of 2D landmarks [ score, landmark, position [x, y, z], presence ]>
|
||||||
// 39 annotated landmarks for full or 31 annotated landmarks for upper
|
// 39 annotated landmarks for full or 31 annotated landmarks for upper
|
||||||
// presence denotes probability value in range 0..1 that the point is located within the frame
|
// presence denotes probability value in range 0..1 that the point is located within the frame
|
||||||
|
box, // <array [x, y, width, height]>, clamped and normalized to input image size
|
||||||
|
boxRaw, // <array [x, y, width, height]>, unclamped and normalized to range of 0..1
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
hand: // <array of detected objects>
|
hand: // <array of detected objects>
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
|
id, // hand id number
|
||||||
confidence, // <number>,
|
confidence, // <number>,
|
||||||
box, // <array [x, y, width, height]>, clamped and normalized to input image size
|
box, // <array [x, y, width, height]>, clamped and normalized to input image size
|
||||||
boxRaw, // <array [x, y, width, height]>, unclamped and normalized to range of 0..1
|
boxRaw, // <array [x, y, width, height]>, unclamped and normalized to range of 0..1
|
||||||
|
@ -69,8 +76,6 @@ result = {
|
||||||
score, // <number>
|
score, // <number>
|
||||||
class, // <number> class id based on coco labels
|
class, // <number> class id based on coco labels
|
||||||
label, // <string> class label based on coco labels
|
label, // <string> class label based on coco labels
|
||||||
center, // <array [x, y]>, clamped and normalized to input image size
|
|
||||||
centerRaw, // <array [x, y]>, unclamped and normalized to range of 0..1
|
|
||||||
box, // <array [x, y, width, height]>, clamped and normalized to input image size
|
box, // <array [x, y, width, height]>, clamped and normalized to input image size
|
||||||
boxRaw, // <array [x, y, width, height]>, unclamped and normalized to range of 0..1
|
boxRaw, // <array [x, y, width, height]>, unclamped and normalized to range of 0..1
|
||||||
}
|
}
|
||||||
|
@ -84,6 +89,8 @@ result = {
|
||||||
],
|
],
|
||||||
performance = { // performance data of last execution for each module measuredin miliseconds
|
performance = { // performance data of last execution for each module measuredin miliseconds
|
||||||
// note that per-model performance data is not available in async execution mode
|
// note that per-model performance data is not available in async execution mode
|
||||||
|
frames, // total number of frames processed
|
||||||
|
cached, // total number of frames where some cached values were used
|
||||||
backend, // time to initialize tf backend, keeps longest value measured
|
backend, // time to initialize tf backend, keeps longest value measured
|
||||||
load, // time to load models, keeps longest value measured
|
load, // time to load models, keeps longest value measured
|
||||||
image, // time for image processing
|
image, // time for image processing
|
||||||
|
@ -93,6 +100,7 @@ result = {
|
||||||
face, // model time
|
face, // model time
|
||||||
agegender, // model time
|
agegender, // model time
|
||||||
emotion, // model time
|
emotion, // model time
|
||||||
|
change, // frame change detection time
|
||||||
total, // end to end time
|
total, // end to end time
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue