face-api/build/dom/extractFaces.js

39 lines
1.7 KiB
JavaScript
Raw Normal View History

2020-08-18 14:04:33 +02:00
import { FaceDetection } from '../classes/FaceDetection';
import { env } from '../env';
import { createCanvas } from './createCanvas';
import { getContext2dOrThrow } from './getContext2dOrThrow';
import { imageTensorToCanvas } from './imageTensorToCanvas';
import { toNetInput } from './toNetInput';
/**
* Extracts the image regions containing the detected faces.
*
* @param input The image that face detection has been performed on.
* @param detections The face detection results or face bounding boxes for that image.
* @returns The Canvases of the corresponding image region for each detected face.
*/
export async function extractFaces(input, detections) {
const { Canvas } = env.getEnv();
let canvas = input;
if (!(input instanceof Canvas)) {
const netInput = await toNetInput(input);
if (netInput.batchSize > 1) {
throw new Error('extractFaces - batchSize > 1 not supported');
}
const tensorOrCanvas = netInput.getInput(0);
canvas = tensorOrCanvas instanceof Canvas
? tensorOrCanvas
: await imageTensorToCanvas(tensorOrCanvas);
}
const ctx = getContext2dOrThrow(canvas);
const boxes = detections.map(det => det instanceof FaceDetection
? det.forSize(canvas.width, canvas.height).box.floor()
: det)
.map(box => box.clipAtImageBorders(canvas.width, canvas.height));
return boxes.map(({ x, y, width, height }) => {
const faceImg = createCanvas({ width, height });
getContext2dOrThrow(faceImg)
.putImageData(ctx.getImageData(x, y, width, height), 0, 0);
return faceImg;
});
}
//# sourceMappingURL=extractFaces.js.map