full rebuild

pull/50/head
Vladimir Mandic 2020-11-13 16:42:00 -05:00
parent 8d38c977be
commit 5c3de60f44
16 changed files with 295 additions and 415 deletions

View File

@ -1,6 +1,6 @@
# Human Library
## 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition
## 3D Face Detection, Face Embedding & Recognition, Body Pose Tracking, Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction & Gesture Recognition
<br>
@ -12,18 +12,20 @@
- [**Issues Tracker**](https://github.com/vladmandic/human/issues)
- [**Change Log**](https://github.com/vladmandic/human/wiki/Change-Log)
<br>
### Wiki pages:
- [**Home**](https://github.com/vladmandic/human/wiki)
- [**Demos**](https://github.com/vladmandic/human/wiki/Demos)
- [**Installation**](https://github.com/vladmandic/human/wiki/Install)
- [**Usage**](https://github.com/vladmandic/human/wiki/Usage)
- [**Configuration**](https://github.com/vladmandic/human/wiki/Configuration)
- [**Outputs**](https://github.com/vladmandic/human/wiki/Outputs)
- [**Notes on Backends**](https://github.com/vladmandic/human/wiki/Backends)
- [**Usage & Functions**](https://github.com/vladmandic/human/wiki/Usage)
- [**Configuration Details**](https://github.com/vladmandic/human/wiki/Configuration)
- [**Output Details**](https://github.com/vladmandic/human/wiki/Outputs)
- [**Face Embedding and Recognition**](https://github.com/vladmandic/human/wiki/Embedding)
- [**Gesture Recognition**](https://github.com/vladmandic/human/wiki/Gesture)
### Additional notes:
- [**Notes on Backends**](https://github.com/vladmandic/human/wiki/Backends)
- [**Development Server**](https://github.com/vladmandic/human/wiki/Development-Server)
- [**Build Process**](https://github.com/vladmandic/human/wiki/Build-Process)
- [**List of Models**](https://github.com/vladmandic/human/wiki/Models)
@ -34,7 +36,7 @@
<br>
Compatible with *Browser*, *WebWorker* and *NodeJS* execution on both Windows and Linux
- Browser/WebWorker: Compatible with *CPU*, *WebGL* and *WASM* backends
- Browser/WebWorker: Compatible with *CPU*, *WebGL*, *WASM* and *WebGPU* backends
- NodeJS: Compatible with software *tfjs-node* and CUDA accelerated backends *tfjs-node-gpu*
- (and maybe with React-Native as it doesn't use any DOM objects)

View File

@ -4270,15 +4270,15 @@ var require_facepipeline = __commonJS((exports) => {
rotatedImage = tf.image.rotateWithOffset(input, angle, 0, faceCenterNormalized);
rotationMatrix = util.buildRotationMatrix(-angle, faceCenter);
}
const boxCPU = {startPoint: box.startPoint, endPoint: box.endPoint};
const face2 = bounding.cutBoxFromImageAndResize(boxCPU, rotatedImage, [this.meshHeight, this.meshWidth]).div(255);
const face2 = bounding.cutBoxFromImageAndResize({startPoint: box.startPoint, endPoint: box.endPoint}, rotatedImage, [this.meshHeight, this.meshWidth]).div(255);
const outputFace = config2.detector.rotation ? tf.image.rotateWithOffset(face2, angle) : face2;
if (!config2.mesh.enabled) {
const prediction2 = {
coords: null,
box,
faceConfidence: null,
confidence: box.confidence,
image: face2
image: outputFace
};
return prediction2;
}
@ -4323,7 +4323,7 @@ var require_facepipeline = __commonJS((exports) => {
box: landmarksBox,
faceConfidence: confidenceVal,
confidence: box.confidence,
image: face2
image: outputFace
};
this.storedBoxes[i] = {...landmarksBox, landmarks: transformedCoords.arraySync(), confidence: box.confidence, faceConfidence: confidenceVal};
return prediction;
@ -4435,7 +4435,6 @@ var require_age = __commonJS((exports) => {
const models = {};
let last = {age: 0};
let frame = Number.MAX_SAFE_INTEGER;
const zoom = [0, 0];
async function load2(config2) {
if (!models.age) {
models.age = await loadGraphModel(config2.face.age.modelPath);
@ -4444,19 +4443,15 @@ var require_age = __commonJS((exports) => {
return models.age;
}
async function predict2(image2, config2) {
if (!models.age)
return null;
if (frame < config2.face.age.skipFrames && last.age && last.age > 0) {
frame += 1;
return last;
}
frame = 0;
return new Promise(async (resolve) => {
const box = [[
image2.shape[1] * zoom[0] / image2.shape[1],
image2.shape[2] * zoom[1] / image2.shape[2],
(image2.shape[1] - image2.shape[1] * zoom[0]) / image2.shape[1],
(image2.shape[2] - image2.shape[2] * zoom[1]) / image2.shape[2]
]];
const resize = tf.image.cropAndResize(image2, box, [0], [config2.face.age.inputSize, config2.face.age.inputSize]);
const resize = tf.image.resizeBilinear(image2, [config2.face.age.inputSize, config2.face.age.inputSize], false);
const enhance = tf.mul(resize, [255]);
tf.dispose(resize);
let ageT;
@ -4491,7 +4486,6 @@ var require_gender = __commonJS((exports) => {
let last = {gender: ""};
let frame = Number.MAX_SAFE_INTEGER;
let alternative = false;
const zoom = [0, 0];
const rgb = [0.2989, 0.587, 0.114];
async function load2(config2) {
if (!models.gender) {
@ -4502,19 +4496,15 @@ var require_gender = __commonJS((exports) => {
return models.gender;
}
async function predict2(image2, config2) {
if (!models.gender)
return null;
if (frame < config2.face.gender.skipFrames && last.gender !== "") {
frame += 1;
return last;
}
frame = 0;
return new Promise(async (resolve) => {
const box = [[
image2.shape[1] * zoom[0] / image2.shape[1],
image2.shape[2] * zoom[1] / image2.shape[2],
(image2.shape[1] - image2.shape[1] * zoom[0]) / image2.shape[1],
(image2.shape[2] - image2.shape[2] * zoom[1]) / image2.shape[2]
]];
const resize = tf.image.cropAndResize(image2, box, [0], [config2.face.gender.inputSize, config2.face.gender.inputSize]);
const resize = tf.image.resizeBilinear(image2, [config2.face.gender.inputSize, config2.face.gender.inputSize], false);
let enhance;
if (alternative) {
enhance = tf.tidy(() => {
@ -4573,7 +4563,6 @@ var require_emotion = __commonJS((exports) => {
const models = {};
let last = [];
let frame = Number.MAX_SAFE_INTEGER;
const zoom = [0, 0];
const rgb = [0.2989, 0.587, 0.114];
const scale = 1;
async function load2(config2) {
@ -4584,19 +4573,15 @@ var require_emotion = __commonJS((exports) => {
return models.emotion;
}
async function predict2(image2, config2) {
if (!models.emotion)
return null;
if (frame < config2.face.emotion.skipFrames && last.length > 0) {
frame += 1;
return last;
}
frame = 0;
return new Promise(async (resolve) => {
const box = [[
image2.shape[1] * zoom[0] / image2.shape[1],
image2.shape[2] * zoom[1] / image2.shape[2],
(image2.shape[1] - image2.shape[1] * zoom[0]) / image2.shape[1],
(image2.shape[2] - image2.shape[2] * zoom[1]) / image2.shape[2]
]];
const resize = tf.image.cropAndResize(image2, box, [0], [config2.face.emotion.inputSize, config2.face.emotion.inputSize]);
const resize = tf.image.resizeBilinear(image2, [config2.face.emotion.inputSize, config2.face.emotion.inputSize], false);
const [red, green, blue] = tf.split(resize, 3, 3);
resize.dispose();
const redNorm = tf.mul(red, rgb[0]);
@ -4643,10 +4628,6 @@ var require_emotion = __commonJS((exports) => {
var require_embedding = __commonJS((exports) => {
const profile2 = __toModule(require_profile());
const models = {};
let last = [];
let frame = Number.MAX_SAFE_INTEGER;
const zoom = [0, 0];
const rgb = [0.2989, 0.587, 0.114];
async function load2(config2) {
if (!models.embedding) {
models.embedding = await loadGraphModel(config2.face.embedding.modelPath);
@ -4654,57 +4635,37 @@ var require_embedding = __commonJS((exports) => {
}
return models.embedding;
}
function simmilarity2(embedding1, embedding2) {
if ((embedding1 == null ? void 0 : embedding1.length) !== (embedding2 == null ? void 0 : embedding2.length))
return 0;
const distance = 10 * Math.sqrt(embedding1.map((val, i) => val - embedding2[i]).reduce((dist2, diff) => dist2 + diff ** 2, 0));
const confidence = 2 * (0.5 - distance);
return Math.trunc(1e3 * confidence) / 1e3;
}
async function predict2(image2, config2) {
if (frame < config2.face.embedding.skipFrames && last.length > 0) {
frame += 1;
return last;
}
frame = 0;
if (!models.embedding)
return null;
return new Promise(async (resolve) => {
const box = [[
image2.shape[1] * zoom[0] / image2.shape[1],
image2.shape[2] * zoom[1] / image2.shape[2],
(image2.shape[1] - image2.shape[1] * zoom[0]) / image2.shape[1],
(image2.shape[2] - image2.shape[2] * zoom[1]) / image2.shape[2]
]];
const resize = tf.image.cropAndResize(image2, box, [0], [config2.face.embedding.inputSize, config2.face.embedding.inputSize]);
const [red, green, blue] = tf.split(resize, 3, 3);
resize.dispose();
const redNorm = tf.mul(red, rgb[0]);
const greenNorm = tf.mul(green, rgb[1]);
const blueNorm = tf.mul(blue, rgb[2]);
red.dispose();
green.dispose();
blue.dispose();
const grayscale = tf.addN([redNorm, greenNorm, blueNorm]);
redNorm.dispose();
greenNorm.dispose();
blueNorm.dispose();
const normalize = tf.tidy(() => grayscale.sub(0.5).mul(2));
grayscale.dispose();
const obj = [];
const resize = tf.image.resizeBilinear(image2, [config2.face.embedding.inputSize, config2.face.embedding.inputSize], false);
let data2 = [];
if (config2.face.embedding.enabled) {
let data2;
if (!config2.profile) {
console.log("model", models.embedding);
const embeddingT = await models.embedding.predict({img_inputs: normalize});
data2 = embeddingT.dataSync();
console.log("embedding", data2);
const embeddingT = await models.embedding.predict({img_inputs: resize});
data2 = [...embeddingT.dataSync()];
tf.dispose(embeddingT);
} else {
const profileData = await tf.profile(() => models.embedding.predict(normalize));
data2 = profileData.result.dataSync();
const profileData = await tf.profile(() => models.embedding.predict({img_inputs: resize}));
data2 = [...profileData.result.dataSync()];
profileData.result.dispose();
profile2.run("emotion", profileData);
}
obj.sort((a, b) => b.score - a.score);
}
normalize.dispose();
last = obj;
resolve(obj);
resize.dispose();
resolve(data2);
});
}
exports.predict = predict2;
exports.simmilarity = simmilarity2;
exports.load = load2;
});
@ -24353,6 +24314,7 @@ var config_default = {
detector: {
modelPath: "../models/blazeface-back.json",
inputSize: 256,
rotation: false,
maxFaces: 10,
skipFrames: 15,
minConfidence: 0.5,
@ -24390,7 +24352,7 @@ var config_default = {
modelPath: "../models/emotion-large.json"
},
embedding: {
enabled: true,
enabled: false,
inputSize: 112,
modelPath: "../models/mobilefacenet.json"
}
@ -24513,6 +24475,11 @@ class Human {
}
return null;
}
simmilarity(embedding1, embedding2) {
if (this.config.face.embedding.enabled)
return embedding.simmilarity(embedding1, embedding2);
return 0;
}
async load(userConfig) {
this.state = "load";
const timeStamp = now();
@ -24686,18 +24653,18 @@ class Human {
return process3.canvas;
}
async detect(input, userConfig = {}) {
this.state = "config";
let timeStamp;
this.config = mergeDeep(this.config, userConfig);
if (!this.config.videoOptimized)
this.config = mergeDeep(this.config, disableSkipFrames);
this.state = "check";
const error = this.sanity(input);
if (error) {
this.log(error, input);
return {error};
}
return new Promise(async (resolve) => {
this.state = "config";
let timeStamp;
this.config = mergeDeep(this.config, userConfig);
if (!this.config.videoOptimized)
this.config = mergeDeep(this.config, disableSkipFrames);
this.state = "check";
const error = this.sanity(input);
if (error) {
this.log(error, input);
resolve({error});
}
let poseRes;
let handRes;
let faceRes;
@ -24765,10 +24732,12 @@ class Human {
resolve({face: faceRes, body: poseRes, hand: handRes, gesture: gestureRes, performance: this.perf, canvas: process3.canvas});
});
}
async warmup(userConfig) {
const warmup = new ImageData(255, 255);
await this.detect(warmup, userConfig);
async warmup(userConfig, sample) {
if (!sample)
sample = new ImageData(255, 255);
const warmup = await this.detect(sample, userConfig);
this.log("warmed up");
return warmup;
}
}
export {

File diff suppressed because one or more lines are too long

View File

@ -1,15 +1,15 @@
{
"inputs": {
"config.js": {
"bytes": 8623,
"bytes": 8721,
"imports": []
},
"package.json": {
"bytes": 3554,
"bytes": 3616,
"imports": []
},
"src/age/age.js": {
"bytes": 1912,
"bytes": 1941,
"imports": [
{
"path": "src/tf.js"
@ -133,7 +133,7 @@
]
},
"src/embedding/embedding.js": {
"bytes": 3111,
"bytes": 2105,
"imports": [
{
"path": "src/tf.js"
@ -144,7 +144,7 @@
]
},
"src/emotion/emotion.js": {
"bytes": 2928,
"bytes": 2979,
"imports": [
{
"path": "src/tf.js"
@ -192,7 +192,7 @@
]
},
"src/face/facepipeline.js": {
"bytes": 13680,
"bytes": 13762,
"imports": [
{
"path": "src/tf.js"
@ -213,7 +213,7 @@
"imports": []
},
"src/gender/gender.js": {
"bytes": 3209,
"bytes": 3382,
"imports": [
{
"path": "src/tf.js"
@ -286,7 +286,7 @@
"imports": []
},
"src/human.js": {
"bytes": 15788,
"bytes": 16020,
"imports": [
{
"path": "src/tf.js"
@ -357,7 +357,7 @@
"dist/human.esm-nobundle.js.map": {
"imports": [],
"inputs": {},
"bytes": 786297
"bytes": 785345
},
"dist/human.esm-nobundle.js": {
"imports": [],
@ -375,7 +375,7 @@
"bytesInOutput": 51519
},
"src/face/facepipeline.js": {
"bytesInOutput": 12370
"bytesInOutput": 12453
},
"src/face/facemesh.js": {
"bytesInOutput": 2461
@ -384,16 +384,16 @@
"bytesInOutput": 1118
},
"src/age/age.js": {
"bytesInOutput": 1830
"bytesInOutput": 1563
},
"src/gender/gender.js": {
"bytesInOutput": 3000
"bytesInOutput": 2736
},
"src/emotion/emotion.js": {
"bytesInOutput": 2701
"bytesInOutput": 2438
},
"src/embedding/embedding.js": {
"bytesInOutput": 2487
"bytesInOutput": 1769
},
"src/body/modelBase.js": {
"bytesInOutput": 866
@ -465,7 +465,7 @@
"bytesInOutput": 186
},
"config.js": {
"bytesInOutput": 2048
"bytesInOutput": 2072
},
"package.json": {
"bytesInOutput": 0
@ -474,13 +474,13 @@
"bytesInOutput": 23
},
"src/human.js": {
"bytesInOutput": 13232
"bytesInOutput": 13473
},
"src/human.js": {
"bytesInOutput": 0
}
},
"bytes": 432856
"bytes": 431692
}
}
}

2
dist/human.esm.json vendored
View File

@ -12316,7 +12316,7 @@
]
},
"package.json": {
"bytes": 3554,
"bytes": 3616,
"imports": []
},
"src/age/age.js": {

139
dist/human.js vendored
View File

@ -69844,15 +69844,15 @@ return a / b;`;
rotatedImage = tf.image.rotateWithOffset(input, angle, 0, faceCenterNormalized);
rotationMatrix = util30.buildRotationMatrix(-angle, faceCenter);
}
const boxCPU = {startPoint: box.startPoint, endPoint: box.endPoint};
const face = bounding.cutBoxFromImageAndResize(boxCPU, rotatedImage, [this.meshHeight, this.meshWidth]).div(255);
const face = bounding.cutBoxFromImageAndResize({startPoint: box.startPoint, endPoint: box.endPoint}, rotatedImage, [this.meshHeight, this.meshWidth]).div(255);
const outputFace = config.detector.rotation ? tf.image.rotateWithOffset(face, angle) : face;
if (!config.mesh.enabled) {
const prediction2 = {
coords: null,
box,
faceConfidence: null,
confidence: box.confidence,
image: face
image: outputFace
};
return prediction2;
}
@ -69897,7 +69897,7 @@ return a / b;`;
box: landmarksBox,
faceConfidence: confidenceVal,
confidence: box.confidence,
image: face
image: outputFace
};
this.storedBoxes[i] = {...landmarksBox, landmarks: transformedCoords.arraySync(), confidence: box.confidence, faceConfidence: confidenceVal};
return prediction;
@ -70009,7 +70009,6 @@ return a / b;`;
const models = {};
let last = {age: 0};
let frame = Number.MAX_SAFE_INTEGER;
const zoom = [0, 0];
async function load(config) {
if (!models.age) {
models.age = await loadGraphModel(config.face.age.modelPath);
@ -70018,19 +70017,15 @@ return a / b;`;
return models.age;
}
async function predict(image, config) {
if (!models.age)
return null;
if (frame < config.face.age.skipFrames && last.age && last.age > 0) {
frame += 1;
return last;
}
frame = 0;
return new Promise(async (resolve) => {
const box = [[
image.shape[1] * zoom[0] / image.shape[1],
image.shape[2] * zoom[1] / image.shape[2],
(image.shape[1] - image.shape[1] * zoom[0]) / image.shape[1],
(image.shape[2] - image.shape[2] * zoom[1]) / image.shape[2]
]];
const resize = tf.image.cropAndResize(image, box, [0], [config.face.age.inputSize, config.face.age.inputSize]);
const resize = tf.image.resizeBilinear(image, [config.face.age.inputSize, config.face.age.inputSize], false);
const enhance = tf.mul(resize, [255]);
tf.dispose(resize);
let ageT;
@ -70065,7 +70060,6 @@ return a / b;`;
let last = {gender: ""};
let frame = Number.MAX_SAFE_INTEGER;
let alternative = false;
const zoom = [0, 0];
const rgb = [0.2989, 0.587, 0.114];
async function load(config) {
if (!models.gender) {
@ -70076,19 +70070,15 @@ return a / b;`;
return models.gender;
}
async function predict(image, config) {
if (!models.gender)
return null;
if (frame < config.face.gender.skipFrames && last.gender !== "") {
frame += 1;
return last;
}
frame = 0;
return new Promise(async (resolve) => {
const box = [[
image.shape[1] * zoom[0] / image.shape[1],
image.shape[2] * zoom[1] / image.shape[2],
(image.shape[1] - image.shape[1] * zoom[0]) / image.shape[1],
(image.shape[2] - image.shape[2] * zoom[1]) / image.shape[2]
]];
const resize = tf.image.cropAndResize(image, box, [0], [config.face.gender.inputSize, config.face.gender.inputSize]);
const resize = tf.image.resizeBilinear(image, [config.face.gender.inputSize, config.face.gender.inputSize], false);
let enhance;
if (alternative) {
enhance = tf.tidy(() => {
@ -70147,7 +70137,6 @@ return a / b;`;
const models = {};
let last = [];
let frame = Number.MAX_SAFE_INTEGER;
const zoom = [0, 0];
const rgb = [0.2989, 0.587, 0.114];
const scale = 1;
async function load(config) {
@ -70158,19 +70147,15 @@ return a / b;`;
return models.emotion;
}
async function predict(image, config) {
if (!models.emotion)
return null;
if (frame < config.face.emotion.skipFrames && last.length > 0) {
frame += 1;
return last;
}
frame = 0;
return new Promise(async (resolve) => {
const box = [[
image.shape[1] * zoom[0] / image.shape[1],
image.shape[2] * zoom[1] / image.shape[2],
(image.shape[1] - image.shape[1] * zoom[0]) / image.shape[1],
(image.shape[2] - image.shape[2] * zoom[1]) / image.shape[2]
]];
const resize = tf.image.cropAndResize(image, box, [0], [config.face.emotion.inputSize, config.face.emotion.inputSize]);
const resize = tf.image.resizeBilinear(image, [config.face.emotion.inputSize, config.face.emotion.inputSize], false);
const [red, green, blue] = tf.split(resize, 3, 3);
resize.dispose();
const redNorm = tf.mul(red, rgb[0]);
@ -70217,10 +70202,6 @@ return a / b;`;
var require_embedding = __commonJS((exports) => {
const profile = __toModule(require_profile());
const models = {};
let last = [];
let frame = Number.MAX_SAFE_INTEGER;
const zoom = [0, 0];
const rgb = [0.2989, 0.587, 0.114];
async function load(config) {
if (!models.embedding) {
models.embedding = await loadGraphModel(config.face.embedding.modelPath);
@ -70228,57 +70209,37 @@ return a / b;`;
}
return models.embedding;
}
function simmilarity(embedding1, embedding2) {
if ((embedding1 == null ? void 0 : embedding1.length) !== (embedding2 == null ? void 0 : embedding2.length))
return 0;
const distance = 10 * Math.sqrt(embedding1.map((val, i) => val - embedding2[i]).reduce((dist2, diff) => dist2 + diff ** 2, 0));
const confidence = 2 * (0.5 - distance);
return Math.trunc(1e3 * confidence) / 1e3;
}
async function predict(image, config) {
if (frame < config.face.embedding.skipFrames && last.length > 0) {
frame += 1;
return last;
}
frame = 0;
if (!models.embedding)
return null;
return new Promise(async (resolve) => {
const box = [[
image.shape[1] * zoom[0] / image.shape[1],
image.shape[2] * zoom[1] / image.shape[2],
(image.shape[1] - image.shape[1] * zoom[0]) / image.shape[1],
(image.shape[2] - image.shape[2] * zoom[1]) / image.shape[2]
]];
const resize = tf.image.cropAndResize(image, box, [0], [config.face.embedding.inputSize, config.face.embedding.inputSize]);
const [red, green, blue] = tf.split(resize, 3, 3);
resize.dispose();
const redNorm = tf.mul(red, rgb[0]);
const greenNorm = tf.mul(green, rgb[1]);
const blueNorm = tf.mul(blue, rgb[2]);
red.dispose();
green.dispose();
blue.dispose();
const grayscale = tf.addN([redNorm, greenNorm, blueNorm]);
redNorm.dispose();
greenNorm.dispose();
blueNorm.dispose();
const normalize = tf.tidy(() => grayscale.sub(0.5).mul(2));
grayscale.dispose();
const obj = [];
const resize = tf.image.resizeBilinear(image, [config.face.embedding.inputSize, config.face.embedding.inputSize], false);
let data = [];
if (config.face.embedding.enabled) {
let data;
if (!config.profile) {
console.log("model", models.embedding);
const embeddingT = await models.embedding.predict({img_inputs: normalize});
data = embeddingT.dataSync();
console.log("embedding", data);
const embeddingT = await models.embedding.predict({img_inputs: resize});
data = [...embeddingT.dataSync()];
tf.dispose(embeddingT);
} else {
const profileData = await tf.profile(() => models.embedding.predict(normalize));
data = profileData.result.dataSync();
const profileData = await tf.profile(() => models.embedding.predict({img_inputs: resize}));
data = [...profileData.result.dataSync()];
profileData.result.dispose();
profile.run("emotion", profileData);
}
obj.sort((a, b) => b.score - a.score);
}
normalize.dispose();
last = obj;
resolve(obj);
resize.dispose();
resolve(data);
});
}
exports.predict = predict;
exports.simmilarity = simmilarity;
exports.load = load;
});
@ -89818,6 +89779,11 @@ return a / b;`;
}
return null;
}
simmilarity(embedding1, embedding2) {
if (this.config.face.embedding.enabled)
return embedding.simmilarity(embedding1, embedding2);
return 0;
}
async load(userConfig) {
this.state = "load";
const timeStamp = now2();
@ -89991,18 +89957,18 @@ return a / b;`;
return process3.canvas;
}
async detect(input, userConfig = {}) {
this.state = "config";
let timeStamp;
this.config = mergeDeep(this.config, userConfig);
if (!this.config.videoOptimized)
this.config = mergeDeep(this.config, disableSkipFrames);
this.state = "check";
const error = this.sanity(input);
if (error) {
this.log(error, input);
return {error};
}
return new Promise(async (resolve) => {
this.state = "config";
let timeStamp;
this.config = mergeDeep(this.config, userConfig);
if (!this.config.videoOptimized)
this.config = mergeDeep(this.config, disableSkipFrames);
this.state = "check";
const error = this.sanity(input);
if (error) {
this.log(error, input);
resolve({error});
}
let poseRes;
let handRes;
let faceRes;
@ -90070,10 +90036,12 @@ return a / b;`;
resolve({face: faceRes, body: poseRes, hand: handRes, gesture: gestureRes, performance: this.perf, canvas: process3.canvas});
});
}
async warmup(userConfig) {
const warmup = new ImageData(255, 255);
await this.detect(warmup, userConfig);
async warmup(userConfig, sample) {
if (!sample)
sample = new ImageData(255, 255);
const warmup = await this.detect(sample, userConfig);
this.log("warmed up");
return warmup;
}
}
});
@ -99203,6 +99171,7 @@ return a / b;`;
detector: {
modelPath: "../models/blazeface-back.json",
inputSize: 256,
rotation: false,
maxFaces: 10,
skipFrames: 15,
minConfidence: 0.5,
@ -99240,7 +99209,7 @@ return a / b;`;
modelPath: "../models/emotion-large.json"
},
embedding: {
enabled: true,
enabled: false,
inputSize: 112,
modelPath: "../models/mobilefacenet.json"
}

4
dist/human.js.map vendored

File diff suppressed because one or more lines are too long

34
dist/human.json vendored
View File

@ -1,7 +1,7 @@
{
"inputs": {
"config.js": {
"bytes": 8623,
"bytes": 8721,
"imports": []
},
"node_modules/@tensorflow/tfjs-backend-wasm/dist/backend_wasm.js": {
@ -12316,11 +12316,11 @@
]
},
"package.json": {
"bytes": 3554,
"bytes": 3616,
"imports": []
},
"src/age/age.js": {
"bytes": 1912,
"bytes": 1941,
"imports": [
{
"path": "src/tf.js"
@ -12444,7 +12444,7 @@
]
},
"src/embedding/embedding.js": {
"bytes": 3111,
"bytes": 2105,
"imports": [
{
"path": "src/tf.js"
@ -12455,7 +12455,7 @@
]
},
"src/emotion/emotion.js": {
"bytes": 2928,
"bytes": 2979,
"imports": [
{
"path": "src/tf.js"
@ -12503,7 +12503,7 @@
]
},
"src/face/facepipeline.js": {
"bytes": 13680,
"bytes": 13762,
"imports": [
{
"path": "src/tf.js"
@ -12524,7 +12524,7 @@
"imports": []
},
"src/gender/gender.js": {
"bytes": 3209,
"bytes": 3382,
"imports": [
{
"path": "src/tf.js"
@ -12597,7 +12597,7 @@
"imports": []
},
"src/human.js": {
"bytes": 15788,
"bytes": 16020,
"imports": [
{
"path": "src/tf.js"
@ -12695,7 +12695,7 @@
"dist/human.js.map": {
"imports": [],
"inputs": {},
"bytes": 5451131
"bytes": 5449697
},
"dist/human.js": {
"imports": [],
@ -12740,7 +12740,7 @@
"bytesInOutput": 59051
},
"src/face/facepipeline.js": {
"bytesInOutput": 12860
"bytesInOutput": 12942
},
"src/face/facemesh.js": {
"bytesInOutput": 2556
@ -12749,16 +12749,16 @@
"bytesInOutput": 1156
},
"src/age/age.js": {
"bytesInOutput": 1906
"bytesInOutput": 1639
},
"src/gender/gender.js": {
"bytesInOutput": 3123
"bytesInOutput": 2859
},
"src/emotion/emotion.js": {
"bytesInOutput": 2809
"bytesInOutput": 2546
},
"src/embedding/embedding.js": {
"bytesInOutput": 2590
"bytesInOutput": 1833
},
"src/body/modelBase.js": {
"bytesInOutput": 920
@ -12815,7 +12815,7 @@
"bytesInOutput": 5588
},
"src/human.js": {
"bytesInOutput": 15261
"bytesInOutput": 15530
},
"src/tf.js": {
"bytesInOutput": 46
@ -13418,7 +13418,7 @@
"bytesInOutput": 3038
},
"config.js": {
"bytesInOutput": 2254
"bytesInOutput": 2280
},
"package.json": {
"bytesInOutput": 0
@ -13427,7 +13427,7 @@
"bytesInOutput": 26
}
},
"bytes": 3647380
"bytes": 3646206
}
}
}

View File

@ -4275,15 +4275,15 @@ var require_facepipeline = __commonJS((exports2) => {
rotatedImage = tf.image.rotateWithOffset(input, angle, 0, faceCenterNormalized);
rotationMatrix = util.buildRotationMatrix(-angle, faceCenter);
}
const boxCPU = {startPoint: box.startPoint, endPoint: box.endPoint};
const face2 = bounding.cutBoxFromImageAndResize(boxCPU, rotatedImage, [this.meshHeight, this.meshWidth]).div(255);
const face2 = bounding.cutBoxFromImageAndResize({startPoint: box.startPoint, endPoint: box.endPoint}, rotatedImage, [this.meshHeight, this.meshWidth]).div(255);
const outputFace = config2.detector.rotation ? tf.image.rotateWithOffset(face2, angle) : face2;
if (!config2.mesh.enabled) {
const prediction2 = {
coords: null,
box,
faceConfidence: null,
confidence: box.confidence,
image: face2
image: outputFace
};
return prediction2;
}
@ -4328,7 +4328,7 @@ var require_facepipeline = __commonJS((exports2) => {
box: landmarksBox,
faceConfidence: confidenceVal,
confidence: box.confidence,
image: face2
image: outputFace
};
this.storedBoxes[i] = {...landmarksBox, landmarks: transformedCoords.arraySync(), confidence: box.confidence, faceConfidence: confidenceVal};
return prediction;
@ -4440,7 +4440,6 @@ var require_age = __commonJS((exports2) => {
const models = {};
let last = {age: 0};
let frame = Number.MAX_SAFE_INTEGER;
const zoom = [0, 0];
async function load2(config2) {
if (!models.age) {
models.age = await loadGraphModel(config2.face.age.modelPath);
@ -4449,19 +4448,15 @@ var require_age = __commonJS((exports2) => {
return models.age;
}
async function predict2(image2, config2) {
if (!models.age)
return null;
if (frame < config2.face.age.skipFrames && last.age && last.age > 0) {
frame += 1;
return last;
}
frame = 0;
return new Promise(async (resolve) => {
const box = [[
image2.shape[1] * zoom[0] / image2.shape[1],
image2.shape[2] * zoom[1] / image2.shape[2],
(image2.shape[1] - image2.shape[1] * zoom[0]) / image2.shape[1],
(image2.shape[2] - image2.shape[2] * zoom[1]) / image2.shape[2]
]];
const resize = tf.image.cropAndResize(image2, box, [0], [config2.face.age.inputSize, config2.face.age.inputSize]);
const resize = tf.image.resizeBilinear(image2, [config2.face.age.inputSize, config2.face.age.inputSize], false);
const enhance = tf.mul(resize, [255]);
tf.dispose(resize);
let ageT;
@ -4496,7 +4491,6 @@ var require_gender = __commonJS((exports2) => {
let last = {gender: ""};
let frame = Number.MAX_SAFE_INTEGER;
let alternative = false;
const zoom = [0, 0];
const rgb = [0.2989, 0.587, 0.114];
async function load2(config2) {
if (!models.gender) {
@ -4507,19 +4501,15 @@ var require_gender = __commonJS((exports2) => {
return models.gender;
}
async function predict2(image2, config2) {
if (!models.gender)
return null;
if (frame < config2.face.gender.skipFrames && last.gender !== "") {
frame += 1;
return last;
}
frame = 0;
return new Promise(async (resolve) => {
const box = [[
image2.shape[1] * zoom[0] / image2.shape[1],
image2.shape[2] * zoom[1] / image2.shape[2],
(image2.shape[1] - image2.shape[1] * zoom[0]) / image2.shape[1],
(image2.shape[2] - image2.shape[2] * zoom[1]) / image2.shape[2]
]];
const resize = tf.image.cropAndResize(image2, box, [0], [config2.face.gender.inputSize, config2.face.gender.inputSize]);
const resize = tf.image.resizeBilinear(image2, [config2.face.gender.inputSize, config2.face.gender.inputSize], false);
let enhance;
if (alternative) {
enhance = tf.tidy(() => {
@ -4578,7 +4568,6 @@ var require_emotion = __commonJS((exports2) => {
const models = {};
let last = [];
let frame = Number.MAX_SAFE_INTEGER;
const zoom = [0, 0];
const rgb = [0.2989, 0.587, 0.114];
const scale = 1;
async function load2(config2) {
@ -4589,19 +4578,15 @@ var require_emotion = __commonJS((exports2) => {
return models.emotion;
}
async function predict2(image2, config2) {
if (!models.emotion)
return null;
if (frame < config2.face.emotion.skipFrames && last.length > 0) {
frame += 1;
return last;
}
frame = 0;
return new Promise(async (resolve) => {
const box = [[
image2.shape[1] * zoom[0] / image2.shape[1],
image2.shape[2] * zoom[1] / image2.shape[2],
(image2.shape[1] - image2.shape[1] * zoom[0]) / image2.shape[1],
(image2.shape[2] - image2.shape[2] * zoom[1]) / image2.shape[2]
]];
const resize = tf.image.cropAndResize(image2, box, [0], [config2.face.emotion.inputSize, config2.face.emotion.inputSize]);
const resize = tf.image.resizeBilinear(image2, [config2.face.emotion.inputSize, config2.face.emotion.inputSize], false);
const [red, green, blue] = tf.split(resize, 3, 3);
resize.dispose();
const redNorm = tf.mul(red, rgb[0]);
@ -4648,10 +4633,6 @@ var require_emotion = __commonJS((exports2) => {
var require_embedding = __commonJS((exports2) => {
const profile2 = __toModule(require_profile());
const models = {};
let last = [];
let frame = Number.MAX_SAFE_INTEGER;
const zoom = [0, 0];
const rgb = [0.2989, 0.587, 0.114];
async function load2(config2) {
if (!models.embedding) {
models.embedding = await loadGraphModel(config2.face.embedding.modelPath);
@ -4659,57 +4640,37 @@ var require_embedding = __commonJS((exports2) => {
}
return models.embedding;
}
function simmilarity2(embedding1, embedding2) {
if ((embedding1 == null ? void 0 : embedding1.length) !== (embedding2 == null ? void 0 : embedding2.length))
return 0;
const distance = 10 * Math.sqrt(embedding1.map((val, i) => val - embedding2[i]).reduce((dist2, diff) => dist2 + diff ** 2, 0));
const confidence = 2 * (0.5 - distance);
return Math.trunc(1e3 * confidence) / 1e3;
}
async function predict2(image2, config2) {
if (frame < config2.face.embedding.skipFrames && last.length > 0) {
frame += 1;
return last;
}
frame = 0;
if (!models.embedding)
return null;
return new Promise(async (resolve) => {
const box = [[
image2.shape[1] * zoom[0] / image2.shape[1],
image2.shape[2] * zoom[1] / image2.shape[2],
(image2.shape[1] - image2.shape[1] * zoom[0]) / image2.shape[1],
(image2.shape[2] - image2.shape[2] * zoom[1]) / image2.shape[2]
]];
const resize = tf.image.cropAndResize(image2, box, [0], [config2.face.embedding.inputSize, config2.face.embedding.inputSize]);
const [red, green, blue] = tf.split(resize, 3, 3);
resize.dispose();
const redNorm = tf.mul(red, rgb[0]);
const greenNorm = tf.mul(green, rgb[1]);
const blueNorm = tf.mul(blue, rgb[2]);
red.dispose();
green.dispose();
blue.dispose();
const grayscale = tf.addN([redNorm, greenNorm, blueNorm]);
redNorm.dispose();
greenNorm.dispose();
blueNorm.dispose();
const normalize = tf.tidy(() => grayscale.sub(0.5).mul(2));
grayscale.dispose();
const obj = [];
const resize = tf.image.resizeBilinear(image2, [config2.face.embedding.inputSize, config2.face.embedding.inputSize], false);
let data2 = [];
if (config2.face.embedding.enabled) {
let data2;
if (!config2.profile) {
console.log("model", models.embedding);
const embeddingT = await models.embedding.predict({img_inputs: normalize});
data2 = embeddingT.dataSync();
console.log("embedding", data2);
const embeddingT = await models.embedding.predict({img_inputs: resize});
data2 = [...embeddingT.dataSync()];
tf.dispose(embeddingT);
} else {
const profileData = await tf.profile(() => models.embedding.predict(normalize));
data2 = profileData.result.dataSync();
const profileData = await tf.profile(() => models.embedding.predict({img_inputs: resize}));
data2 = [...profileData.result.dataSync()];
profileData.result.dispose();
profile2.run("emotion", profileData);
}
obj.sort((a, b) => b.score - a.score);
}
normalize.dispose();
last = obj;
resolve(obj);
resize.dispose();
resolve(data2);
});
}
exports2.predict = predict2;
exports2.simmilarity = simmilarity2;
exports2.load = load2;
});
@ -24363,6 +24324,7 @@ var config_default = {
detector: {
modelPath: "../models/blazeface-back.json",
inputSize: 256,
rotation: false,
maxFaces: 10,
skipFrames: 15,
minConfidence: 0.5,
@ -24400,7 +24362,7 @@ var config_default = {
modelPath: "../models/emotion-large.json"
},
embedding: {
enabled: true,
enabled: false,
inputSize: 112,
modelPath: "../models/mobilefacenet.json"
}
@ -24523,6 +24485,11 @@ class Human {
}
return null;
}
simmilarity(embedding1, embedding2) {
if (this.config.face.embedding.enabled)
return embedding.simmilarity(embedding1, embedding2);
return 0;
}
async load(userConfig) {
this.state = "load";
const timeStamp = now();
@ -24696,18 +24663,18 @@ class Human {
return process3.canvas;
}
async detect(input, userConfig = {}) {
this.state = "config";
let timeStamp;
this.config = mergeDeep(this.config, userConfig);
if (!this.config.videoOptimized)
this.config = mergeDeep(this.config, disableSkipFrames);
this.state = "check";
const error = this.sanity(input);
if (error) {
this.log(error, input);
return {error};
}
return new Promise(async (resolve) => {
this.state = "config";
let timeStamp;
this.config = mergeDeep(this.config, userConfig);
if (!this.config.videoOptimized)
this.config = mergeDeep(this.config, disableSkipFrames);
this.state = "check";
const error = this.sanity(input);
if (error) {
this.log(error, input);
resolve({error});
}
let poseRes;
let handRes;
let faceRes;
@ -24775,10 +24742,12 @@ class Human {
resolve({face: faceRes, body: poseRes, hand: handRes, gesture: gestureRes, performance: this.perf, canvas: process3.canvas});
});
}
async warmup(userConfig) {
const warmup = new ImageData(255, 255);
await this.detect(warmup, userConfig);
async warmup(userConfig, sample) {
if (!sample)
sample = new ImageData(255, 255);
const warmup = await this.detect(sample, userConfig);
this.log("warmed up");
return warmup;
}
}
//# sourceMappingURL=human.node-nobundle.js.map

File diff suppressed because one or more lines are too long

139
dist/human.node.js vendored
View File

@ -95993,15 +95993,15 @@ var require_facepipeline = __commonJS((exports2) => {
rotatedImage = tf.image.rotateWithOffset(input, angle, 0, faceCenterNormalized);
rotationMatrix = util27.buildRotationMatrix(-angle, faceCenter);
}
const boxCPU = {startPoint: box.startPoint, endPoint: box.endPoint};
const face2 = bounding.cutBoxFromImageAndResize(boxCPU, rotatedImage, [this.meshHeight, this.meshWidth]).div(255);
const face2 = bounding.cutBoxFromImageAndResize({startPoint: box.startPoint, endPoint: box.endPoint}, rotatedImage, [this.meshHeight, this.meshWidth]).div(255);
const outputFace = config2.detector.rotation ? tf.image.rotateWithOffset(face2, angle) : face2;
if (!config2.mesh.enabled) {
const prediction2 = {
coords: null,
box,
faceConfidence: null,
confidence: box.confidence,
image: face2
image: outputFace
};
return prediction2;
}
@ -96046,7 +96046,7 @@ var require_facepipeline = __commonJS((exports2) => {
box: landmarksBox,
faceConfidence: confidenceVal,
confidence: box.confidence,
image: face2
image: outputFace
};
this.storedBoxes[i] = {...landmarksBox, landmarks: transformedCoords.arraySync(), confidence: box.confidence, faceConfidence: confidenceVal};
return prediction;
@ -96158,7 +96158,6 @@ var require_age = __commonJS((exports2) => {
const models = {};
let last = {age: 0};
let frame = Number.MAX_SAFE_INTEGER;
const zoom = [0, 0];
async function load2(config2) {
if (!models.age) {
models.age = await loadGraphModel(config2.face.age.modelPath);
@ -96167,19 +96166,15 @@ var require_age = __commonJS((exports2) => {
return models.age;
}
async function predict2(image2, config2) {
if (!models.age)
return null;
if (frame < config2.face.age.skipFrames && last.age && last.age > 0) {
frame += 1;
return last;
}
frame = 0;
return new Promise(async (resolve) => {
const box = [[
image2.shape[1] * zoom[0] / image2.shape[1],
image2.shape[2] * zoom[1] / image2.shape[2],
(image2.shape[1] - image2.shape[1] * zoom[0]) / image2.shape[1],
(image2.shape[2] - image2.shape[2] * zoom[1]) / image2.shape[2]
]];
const resize = tf.image.cropAndResize(image2, box, [0], [config2.face.age.inputSize, config2.face.age.inputSize]);
const resize = tf.image.resizeBilinear(image2, [config2.face.age.inputSize, config2.face.age.inputSize], false);
const enhance = tf.mul(resize, [255]);
tf.dispose(resize);
let ageT;
@ -96214,7 +96209,6 @@ var require_gender = __commonJS((exports2) => {
let last = {gender: ""};
let frame = Number.MAX_SAFE_INTEGER;
let alternative = false;
const zoom = [0, 0];
const rgb = [0.2989, 0.587, 0.114];
async function load2(config2) {
if (!models.gender) {
@ -96225,19 +96219,15 @@ var require_gender = __commonJS((exports2) => {
return models.gender;
}
async function predict2(image2, config2) {
if (!models.gender)
return null;
if (frame < config2.face.gender.skipFrames && last.gender !== "") {
frame += 1;
return last;
}
frame = 0;
return new Promise(async (resolve) => {
const box = [[
image2.shape[1] * zoom[0] / image2.shape[1],
image2.shape[2] * zoom[1] / image2.shape[2],
(image2.shape[1] - image2.shape[1] * zoom[0]) / image2.shape[1],
(image2.shape[2] - image2.shape[2] * zoom[1]) / image2.shape[2]
]];
const resize = tf.image.cropAndResize(image2, box, [0], [config2.face.gender.inputSize, config2.face.gender.inputSize]);
const resize = tf.image.resizeBilinear(image2, [config2.face.gender.inputSize, config2.face.gender.inputSize], false);
let enhance;
if (alternative) {
enhance = tf.tidy(() => {
@ -96296,7 +96286,6 @@ var require_emotion = __commonJS((exports2) => {
const models = {};
let last = [];
let frame = Number.MAX_SAFE_INTEGER;
const zoom = [0, 0];
const rgb = [0.2989, 0.587, 0.114];
const scale = 1;
async function load2(config2) {
@ -96307,19 +96296,15 @@ var require_emotion = __commonJS((exports2) => {
return models.emotion;
}
async function predict2(image2, config2) {
if (!models.emotion)
return null;
if (frame < config2.face.emotion.skipFrames && last.length > 0) {
frame += 1;
return last;
}
frame = 0;
return new Promise(async (resolve) => {
const box = [[
image2.shape[1] * zoom[0] / image2.shape[1],
image2.shape[2] * zoom[1] / image2.shape[2],
(image2.shape[1] - image2.shape[1] * zoom[0]) / image2.shape[1],
(image2.shape[2] - image2.shape[2] * zoom[1]) / image2.shape[2]
]];
const resize = tf.image.cropAndResize(image2, box, [0], [config2.face.emotion.inputSize, config2.face.emotion.inputSize]);
const resize = tf.image.resizeBilinear(image2, [config2.face.emotion.inputSize, config2.face.emotion.inputSize], false);
const [red, green, blue] = tf.split(resize, 3, 3);
resize.dispose();
const redNorm = tf.mul(red, rgb[0]);
@ -96366,10 +96351,6 @@ var require_emotion = __commonJS((exports2) => {
var require_embedding = __commonJS((exports2) => {
const profile2 = __toModule(require_profile());
const models = {};
let last = [];
let frame = Number.MAX_SAFE_INTEGER;
const zoom = [0, 0];
const rgb = [0.2989, 0.587, 0.114];
async function load2(config2) {
if (!models.embedding) {
models.embedding = await loadGraphModel(config2.face.embedding.modelPath);
@ -96377,57 +96358,37 @@ var require_embedding = __commonJS((exports2) => {
}
return models.embedding;
}
function simmilarity2(embedding1, embedding2) {
if ((embedding1 == null ? void 0 : embedding1.length) !== (embedding2 == null ? void 0 : embedding2.length))
return 0;
const distance = 10 * Math.sqrt(embedding1.map((val, i) => val - embedding2[i]).reduce((dist2, diff) => dist2 + diff ** 2, 0));
const confidence = 2 * (0.5 - distance);
return Math.trunc(1e3 * confidence) / 1e3;
}
async function predict2(image2, config2) {
if (frame < config2.face.embedding.skipFrames && last.length > 0) {
frame += 1;
return last;
}
frame = 0;
if (!models.embedding)
return null;
return new Promise(async (resolve) => {
const box = [[
image2.shape[1] * zoom[0] / image2.shape[1],
image2.shape[2] * zoom[1] / image2.shape[2],
(image2.shape[1] - image2.shape[1] * zoom[0]) / image2.shape[1],
(image2.shape[2] - image2.shape[2] * zoom[1]) / image2.shape[2]
]];
const resize = tf.image.cropAndResize(image2, box, [0], [config2.face.embedding.inputSize, config2.face.embedding.inputSize]);
const [red, green, blue] = tf.split(resize, 3, 3);
resize.dispose();
const redNorm = tf.mul(red, rgb[0]);
const greenNorm = tf.mul(green, rgb[1]);
const blueNorm = tf.mul(blue, rgb[2]);
red.dispose();
green.dispose();
blue.dispose();
const grayscale = tf.addN([redNorm, greenNorm, blueNorm]);
redNorm.dispose();
greenNorm.dispose();
blueNorm.dispose();
const normalize = tf.tidy(() => grayscale.sub(0.5).mul(2));
grayscale.dispose();
const obj = [];
const resize = tf.image.resizeBilinear(image2, [config2.face.embedding.inputSize, config2.face.embedding.inputSize], false);
let data2 = [];
if (config2.face.embedding.enabled) {
let data2;
if (!config2.profile) {
console.log("model", models.embedding);
const embeddingT = await models.embedding.predict({img_inputs: normalize});
data2 = embeddingT.dataSync();
console.log("embedding", data2);
const embeddingT = await models.embedding.predict({img_inputs: resize});
data2 = [...embeddingT.dataSync()];
tf.dispose(embeddingT);
} else {
const profileData = await tf.profile(() => models.embedding.predict(normalize));
data2 = profileData.result.dataSync();
const profileData = await tf.profile(() => models.embedding.predict({img_inputs: resize}));
data2 = [...profileData.result.dataSync()];
profileData.result.dispose();
profile2.run("emotion", profileData);
}
obj.sort((a, b) => b.score - a.score);
}
normalize.dispose();
last = obj;
resolve(obj);
resize.dispose();
resolve(data2);
});
}
exports2.predict = predict2;
exports2.simmilarity = simmilarity2;
exports2.load = load2;
});
@ -120187,6 +120148,7 @@ var config_default = {
detector: {
modelPath: "../models/blazeface-back.json",
inputSize: 256,
rotation: false,
maxFaces: 10,
skipFrames: 15,
minConfidence: 0.5,
@ -120224,7 +120186,7 @@ var config_default = {
modelPath: "../models/emotion-large.json"
},
embedding: {
enabled: true,
enabled: false,
inputSize: 112,
modelPath: "../models/mobilefacenet.json"
}
@ -120347,6 +120309,11 @@ class Human {
}
return null;
}
simmilarity(embedding1, embedding2) {
if (this.config.face.embedding.enabled)
return embedding.simmilarity(embedding1, embedding2);
return 0;
}
async load(userConfig) {
this.state = "load";
const timeStamp = now();
@ -120520,18 +120487,18 @@ class Human {
return process3.canvas;
}
async detect(input, userConfig = {}) {
this.state = "config";
let timeStamp;
this.config = mergeDeep(this.config, userConfig);
if (!this.config.videoOptimized)
this.config = mergeDeep(this.config, disableSkipFrames);
this.state = "check";
const error = this.sanity(input);
if (error) {
this.log(error, input);
return {error};
}
return new Promise(async (resolve) => {
this.state = "config";
let timeStamp;
this.config = mergeDeep(this.config, userConfig);
if (!this.config.videoOptimized)
this.config = mergeDeep(this.config, disableSkipFrames);
this.state = "check";
const error = this.sanity(input);
if (error) {
this.log(error, input);
resolve({error});
}
let poseRes;
let handRes;
let faceRes;
@ -120599,10 +120566,12 @@ class Human {
resolve({face: faceRes, body: poseRes, hand: handRes, gesture: gestureRes, performance: this.perf, canvas: process3.canvas});
});
}
async warmup(userConfig) {
const warmup = new ImageData(255, 255);
await this.detect(warmup, userConfig);
async warmup(userConfig, sample) {
if (!sample)
sample = new ImageData(255, 255);
const warmup = await this.detect(sample, userConfig);
this.log("warmed up");
return warmup;
}
}
//# sourceMappingURL=human.node.js.map

File diff suppressed because one or more lines are too long

34
dist/human.node.json vendored
View File

@ -1,15 +1,15 @@
{
"inputs": {
"config.js": {
"bytes": 8623,
"bytes": 8721,
"imports": []
},
"package.json": {
"bytes": 3554,
"bytes": 3616,
"imports": []
},
"src/age/age.js": {
"bytes": 1912,
"bytes": 1941,
"imports": [
{
"path": "src/tf.js"
@ -133,7 +133,7 @@
]
},
"src/embedding/embedding.js": {
"bytes": 3111,
"bytes": 2105,
"imports": [
{
"path": "src/tf.js"
@ -144,7 +144,7 @@
]
},
"src/emotion/emotion.js": {
"bytes": 2928,
"bytes": 2979,
"imports": [
{
"path": "src/tf.js"
@ -192,7 +192,7 @@
]
},
"src/face/facepipeline.js": {
"bytes": 13680,
"bytes": 13762,
"imports": [
{
"path": "src/tf.js"
@ -213,7 +213,7 @@
"imports": []
},
"src/gender/gender.js": {
"bytes": 3209,
"bytes": 3382,
"imports": [
{
"path": "src/tf.js"
@ -286,7 +286,7 @@
"imports": []
},
"src/human.js": {
"bytes": 15788,
"bytes": 16020,
"imports": [
{
"path": "src/tf.js"
@ -357,7 +357,7 @@
"dist/human.node-nobundle.js.map": {
"imports": [],
"inputs": {},
"bytes": 802566
"bytes": 801852
},
"dist/human.node-nobundle.js": {
"imports": [],
@ -375,7 +375,7 @@
"bytesInOutput": 51530
},
"src/face/facepipeline.js": {
"bytesInOutput": 12372
"bytesInOutput": 12455
},
"src/face/facemesh.js": {
"bytesInOutput": 2465
@ -384,16 +384,16 @@
"bytesInOutput": 1120
},
"src/age/age.js": {
"bytesInOutput": 1833
"bytesInOutput": 1566
},
"src/gender/gender.js": {
"bytesInOutput": 3003
"bytesInOutput": 2739
},
"src/emotion/emotion.js": {
"bytesInOutput": 2704
"bytesInOutput": 2441
},
"src/embedding/embedding.js": {
"bytesInOutput": 2490
"bytesInOutput": 1773
},
"src/body/modelBase.js": {
"bytesInOutput": 868
@ -468,7 +468,7 @@
"bytesInOutput": 186
},
"config.js": {
"bytesInOutput": 2048
"bytesInOutput": 2072
},
"package.json": {
"bytesInOutput": 0
@ -477,10 +477,10 @@
"bytesInOutput": 23
},
"src/human.js": {
"bytesInOutput": 13237
"bytesInOutput": 13478
}
},
"bytes": 433211
"bytes": 432048
}
}
}

6
package-lock.json generated
View File

@ -2306,9 +2306,9 @@
"dev": true
},
"simple-git": {
"version": "2.21.0",
"resolved": "https://registry.npmjs.org/simple-git/-/simple-git-2.21.0.tgz",
"integrity": "sha512-rohCHmEjD/ESXFLxF4bVeqgdb4Awc65ZyyuCKl3f7BvgMbZOBa/Ye3HN/GFnvruiUOAWWNupxhz3Rz5/3vJLTg==",
"version": "2.22.0",
"resolved": "https://registry.npmjs.org/simple-git/-/simple-git-2.22.0.tgz",
"integrity": "sha512-/8WMNrQ5slYl05jYOpeh4BIyMQc84PkEvk9YAcBHVJaPoAgoxzLzdRzUzWDZJ9U6Z2pUjLxkGcQC0kU4pbRN5Q==",
"dev": true,
"requires": {
"@kwsites/file-exists": "^1.1.1",

View File

@ -1,7 +1,7 @@
{
"name": "@vladmandic/human",
"version": "0.8.8",
"description": "human: 3D Face Detection, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition",
"description": "Human: 3D Face Detection, Face Embedding, Body Pose, Hand & Finger Tracking, Iris Tracking, Age & Gender Prediction, Emotion Prediction & Gesture Recognition",
"sideEffects": false,
"main": "dist/human.node.js",
"module": "dist/human.esm.js",
@ -37,7 +37,7 @@
"eslint-plugin-promise": "^4.2.1",
"rimraf": "^3.0.2",
"seedrandom": "^3.0.5",
"simple-git": "^2.21.0"
"simple-git": "^2.22.0"
},
"scripts": {
"start": "node --trace-warnings --unhandled-rejections=strict --trace-uncaught --no-deprecation src/node.js",
@ -57,6 +57,8 @@
"tensorflowjs",
"face-detection",
"face-geometry",
"face-embedding",
"face-recognition",
"body-tracking",
"hand-tracking",
"iris-tracking",

2
wiki

@ -1 +1 @@
Subproject commit 726b24b9a76fd3df36fc309bd4f962daf4c74e45
Subproject commit 1a8d273b7fda90f496a792fd2a16c6978a88baa3