mirror of https://github.com/vladmandic/human
added docs on embedding
parent
726b24b9a7
commit
1a591da2e2
|
@ -1,7 +1,7 @@
|
|||
|
||||
# @vladmandic/human
|
||||
|
||||
Version: **0.8.7**
|
||||
Version: **0.8.8**
|
||||
Description: **human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition**
|
||||
|
||||
Author: **Vladimir Mandic <mandic00@live.com>**
|
||||
|
@ -12,7 +12,9 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
|
|||
|
||||
### **HEAD -> main** 2020/11/12 mandic00@live.com
|
||||
|
||||
### **origin/main, origin/HEAD** 2020/11/12 mandic00@live.com
|
||||
### **0.8.8** 2020/11/12 mandic00@live.com
|
||||
- reduced bundle size
|
||||
- implemented buffered processing
|
||||
- fix for conditional model loading
|
||||
|
||||
### **0.8.7** 2020/11/11 mandic00@live.com
|
||||
|
|
|
@ -1,10 +1,31 @@
|
|||
## Configuration
|
||||
|
||||
Detailed configuration options are explained below, but they are best seen in the menus present in the `demo` application:
|
||||
*note: some advanced configuration options are not exposed in the UI*
|
||||
|
||||

|
||||
<br>
|
||||
|
||||
Below is output of `human.defaults` object
|
||||

|
||||
|
||||
<br>
|
||||
|
||||
Main configuration objects are:
|
||||
- **config.filter**: controls image pre-processing
|
||||
- **config.face**: controls face detection
|
||||
- **config.body**: controls body pose detection
|
||||
- **config.hand**: contros hand and finger detection
|
||||
|
||||
With **config.face** having several subsections:
|
||||
- **config.face.mesh**: controls facial mesh and landscape detection
|
||||
- **config.face.iris**: controls iris detection
|
||||
- **config.face.age**: controls age prediction
|
||||
- **config.face.gender**: controls gender prediction
|
||||
- **config.face.emotion**: controls emotion prediction
|
||||
- **config.face.embedding**: controls generation of face embedding data used for face simmilarity checks
|
||||
|
||||
<br>
|
||||
|
||||
Below is full output of `human.defaults` object
|
||||
Any property can be overriden by passing user object during `human.detect()`
|
||||
Note that user object and default configuration are merged using deep-merge, so you do not need to redefine entire configuration
|
||||
|
||||
|
@ -14,6 +35,8 @@ All configuration details can be changed in real-time!
|
|||
```js
|
||||
config = {
|
||||
backend: 'webgl', // select tfjs backend to use
|
||||
wasmPath: '../assets/', // path for wasm binaries
|
||||
// only used for backend: wasm
|
||||
console: true, // enable debugging output to console
|
||||
async: true, // execute enabled models in parallel
|
||||
// this disables per-model performance data but
|
||||
|
@ -74,6 +97,8 @@ config = {
|
|||
// 'front' is optimized for large faces
|
||||
// such as front-facing camera and
|
||||
// 'back' is optimized for distanct faces.
|
||||
inputSize: 256, // fixed value: 128 for front and 256 for 'back'
|
||||
rotation: false, // use best-guess rotated face image or just box with rotation as-is
|
||||
maxFaces: 10, // maximum number of faces detected in the input
|
||||
// should be set to the minimum number for performance
|
||||
skipFrames: 15, // how many frames to go without re-running the face bounding box detector
|
||||
|
@ -81,10 +106,10 @@ config = {
|
|||
// e.g., if model is running st 25 FPS, we can re-use existing bounding
|
||||
// box for updated face analysis as the head probably hasn't moved much
|
||||
// in short time (10 * 1/25 = 0.25 sec)
|
||||
minConfidence: 0.1, // threshold for discarding a prediction
|
||||
iouThreshold: 0.1, // threshold for deciding whether boxes overlap too much in
|
||||
minConfidence: 0.5, // threshold for discarding a prediction
|
||||
iouThreshold: 0.2, // threshold for deciding whether boxes overlap too much in
|
||||
// non-maximum suppression (0.1 means drop if overlap 10%)
|
||||
scoreThreshold: 0.2, // threshold for deciding when to remove boxes based on score
|
||||
scoreThreshold: 0.5, // threshold for deciding when to remove boxes based on score
|
||||
// in non-maximum suppression,
|
||||
// this is applied on detection objects only and before minConfidence
|
||||
},
|
||||
|
@ -92,17 +117,20 @@ config = {
|
|||
mesh: {
|
||||
enabled: true,
|
||||
modelPath: '../models/facemesh.json',
|
||||
inputSize: 192, // fixed value
|
||||
},
|
||||
|
||||
iris: {
|
||||
enabled: true,
|
||||
modelPath: '../models/iris.json',
|
||||
inputSize: 64, // fixed value
|
||||
},
|
||||
|
||||
age: {
|
||||
enabled: true,
|
||||
modelPath: '../models/age-ssrnet-imdb.json', // can be 'age-ssrnet-imdb' or 'age-ssrnet-wiki'
|
||||
// which determines training set for model
|
||||
inputSize: 64, // fixed value
|
||||
skipFrames: 15, // how many frames to go without re-running the detector
|
||||
// only used for video inputs
|
||||
},
|
||||
|
@ -111,21 +139,30 @@ config = {
|
|||
enabled: true,
|
||||
minConfidence: 0.1, // threshold for discarding a prediction
|
||||
modelPath: '../models/gender-ssrnet-imdb.json', // can be 'gender', 'gender-ssrnet-imdb' or 'gender-ssrnet-wiki'
|
||||
inputSize: 64, // fixed value
|
||||
skipFrames: 15, // how many frames to go without re-running the detector
|
||||
// only used for video inputs
|
||||
},
|
||||
|
||||
emotion: {
|
||||
enabled: true,
|
||||
inputSize: 64, // fixed value
|
||||
minConfidence: 0.2, // threshold for discarding a prediction
|
||||
skipFrames: 15, // how many frames to go without re-running the detector
|
||||
modelPath: '../models/emotion-large.json', // can be 'mini', 'large'
|
||||
},
|
||||
|
||||
embedding: {
|
||||
enabled: false,
|
||||
inputSize: 112, // fixed value
|
||||
modelPath: '../models/mobilefacenet.json',
|
||||
},
|
||||
},
|
||||
|
||||
body: {
|
||||
enabled: true,
|
||||
modelPath: '../models/posenet.json',
|
||||
inputSize: 257, // fixed value
|
||||
maxDetections: 10, // maximum number of people detected in the input
|
||||
// should be set to the minimum number for performance
|
||||
scoreThreshold: 0.8, // threshold for deciding when to remove boxes based on score
|
||||
|
@ -135,6 +172,7 @@ config = {
|
|||
|
||||
hand: {
|
||||
enabled: true,
|
||||
inputSize: 256, // fixed value
|
||||
skipFrames: 15, // how many frames to go without re-running the hand bounding box detector
|
||||
// only used for video inputs
|
||||
// e.g., if model is running st 25 FPS, we can re-use existing bounding
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
- Body Pose Detection: [**PoseNet**](https://medium.com/tensorflow/real-time-human-pose-estimation-in-the-browser-with-tensorflow-js-7dd0bc881cd5)
|
||||
- Age & Gender Prediction: [**SSR-Net**](https://github.com/shamangary/SSR-Net)
|
||||
- Emotion Prediction: [**Oarriaga**](https://github.com/oarriaga/face_classification)
|
||||
- Face Embedding: [**Sirius-AI MobileFaceNet**](https://github.com/sirius-ai/MobileFaceNet_TF)
|
||||
- Image Filters: [**WebGLImageFilter**](https://github.com/phoboslab/WebGLImageFilter)
|
||||
- Pinto Model Zoo: [**Pinto**](https://github.com/PINTO0309/PINTO_model_zoo)
|
||||
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
## Face Feature Embedding and Simmilarity Compare
|
||||
|
||||
<br>
|
||||
|
||||
To use face simmilaity compare feature, you must first enable `face.embedding` module
|
||||
and calculate embedding vectors for both first and second image you want to compare.
|
||||
|
||||
For example,
|
||||
|
||||
```js
|
||||
const myConfig = { face: { embedding: true }};
|
||||
const human = new Human(myConfig);
|
||||
|
||||
const firstResult = await human.detect(firstImage);
|
||||
const secondResult = await human.detect(secondImage);
|
||||
|
||||
const firstEmbedding = firstResult.face[0].embedding;
|
||||
const secondEmbedding = secondResult.face[0].embedding;
|
||||
|
||||
const simmilarity = human.simmilarity(firstEmbedding, secondEmbedding);
|
||||
|
||||
console.log(`faces are ${100 * simmilarity}% simmilar`);
|
||||
```
|
||||
|
||||
Embedding vectors are calulated values uniquely identifying a given face and presented as array of 192 float values
|
||||
|
||||
They can be stored as normal arrays and reused as needed
|
||||
|
||||
Simmilarity function calculates Eucilidean distance between all points in vector
|
||||
|
14
Home.md
14
Home.md
|
@ -1,6 +1,6 @@
|
|||
# Human Library
|
||||
|
||||
## 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition
|
||||
## 3D Face Detection, Face Embedding & Recognition, Body Pose Tracking, Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction & Gesture Recognition
|
||||
|
||||
<br>
|
||||
|
||||
|
@ -17,11 +17,15 @@
|
|||
- [**Home**](https://github.com/vladmandic/human/wiki)
|
||||
- [**Demos**](https://github.com/vladmandic/human/wiki/Demos)
|
||||
- [**Installation**](https://github.com/vladmandic/human/wiki/Install)
|
||||
- [**Usage**](https://github.com/vladmandic/human/wiki/Usage)
|
||||
- [**Configuration**](https://github.com/vladmandic/human/wiki/Configuration)
|
||||
- [**Outputs**](https://github.com/vladmandic/human/wiki/Outputs)
|
||||
- [**Notes on Backends**](https://github.com/vladmandic/human/wiki/Backends)
|
||||
- [**Usage & Functions**](https://github.com/vladmandic/human/wiki/Usage)
|
||||
- [**Configuration Details**](https://github.com/vladmandic/human/wiki/Configuration)
|
||||
- [**Output Details**](https://github.com/vladmandic/human/wiki/Outputs)
|
||||
- [**Face Embedding and Recognition**](https://github.com/vladmandic/human/wiki/Embedding)
|
||||
- [**Gesture Recognition**](https://github.com/vladmandic/human/wiki/Gesture)
|
||||
|
||||
### Additional notes:
|
||||
|
||||
- [**Notes on Backends**](https://github.com/vladmandic/human/wiki/Backends)
|
||||
- [**Development Server**](https://github.com/vladmandic/human/wiki/Development-Server)
|
||||
- [**Build Process**](https://github.com/vladmandic/human/wiki/Build-Process)
|
||||
- [**List of Models**](https://github.com/vladmandic/human/wiki/Models)
|
||||
|
|
|
@ -15,6 +15,7 @@ result = {
|
|||
iris, // <number> relative distance of iris to camera, multiple by focal lenght to get actual distance
|
||||
age, // <number> estimated age
|
||||
gender, // <string> 'male', 'female'
|
||||
embedding, // <array>[float] vector of 192 values used for face simmilarity compare
|
||||
}
|
||||
],
|
||||
body: // <array of detected objects>
|
||||
|
|
5
Usage.md
5
Usage.md
|
@ -35,6 +35,11 @@ Additionally, `Human` library exposes several objects and methods:
|
|||
human.load(config) // explicitly call load method that loads configured models
|
||||
// if you want to pre-load them instead of on-demand loading during 'human.detect()'
|
||||
human.image(image, config?) // runs image processing without detection and returns canvas
|
||||
human.warmup(config, image? // warms up human library for faster initial execution after loading
|
||||
// if image is not provided, it will generate internal sample
|
||||
human.simmilarity(embedding1, embedding2) // runs simmilarity calculation between two provided embedding vectors
|
||||
// vectors for source and target must be previously detected using
|
||||
// face.embedding module
|
||||
```
|
||||
|
||||
<br>
|
||||
|
|
Loading…
Reference in New Issue