face-api/build/dom/extractFaceTensors.js

35 lines
1.8 KiB
JavaScript
Raw Normal View History

"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.extractFaceTensors = void 0;
const tf = require("@tensorflow/tfjs-core");
const FaceDetection_1 = require("../classes/FaceDetection");
const utils_1 = require("../utils");
2020-08-18 14:04:33 +02:00
/**
* Extracts the tensors of the image regions containing the detected faces.
* Useful if you want to compute the face descriptors for the face images.
* Using this method is faster then extracting a canvas for each face and
* converting them to tensors individually.
*
* @param imageTensor The image tensor that face detection has been performed on.
* @param detections The face detection results or face bounding boxes for that image.
* @returns Tensors of the corresponding image region for each detected face.
*/
async function extractFaceTensors(imageTensor, detections) {
if (!utils_1.isTensor3D(imageTensor) && !utils_1.isTensor4D(imageTensor)) {
2020-08-18 14:04:33 +02:00
throw new Error('extractFaceTensors - expected image tensor to be 3D or 4D');
}
if (utils_1.isTensor4D(imageTensor) && imageTensor.shape[0] > 1) {
2020-08-18 14:04:33 +02:00
throw new Error('extractFaceTensors - batchSize > 1 not supported');
}
return tf.tidy(() => {
const [imgHeight, imgWidth, numChannels] = imageTensor.shape.slice(utils_1.isTensor4D(imageTensor) ? 1 : 0);
const boxes = detections.map(det => det instanceof FaceDetection_1.FaceDetection
2020-08-18 14:04:33 +02:00
? det.forSize(imgWidth, imgHeight).box
: det)
.map(box => box.clipAtImageBorders(imgWidth, imgHeight));
const faceTensors = boxes.map(({ x, y, width, height }) => tf.slice3d(imageTensor.as3D(imgHeight, imgWidth, numChannels), [y, x, 0], [height, width, numChannels]));
return faceTensors;
});
}
exports.extractFaceTensors = extractFaceTensors;
2020-08-18 14:04:33 +02:00
//# sourceMappingURL=extractFaceTensors.js.map