refactoring

pull/46/head
Vladimir Mandic 2021-03-19 21:39:45 -04:00
parent 09698a891b
commit 9ccaf781ab
35 changed files with 137 additions and 148 deletions

View File

@ -11,6 +11,7 @@ Repository: **<git+https://github.com/vladmandic/face-api.git>**
### **HEAD -> master** 2021/03/19 mandic00@live.com
- refactoring
### **1.1.4** 2021/03/18 mandic00@live.com

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

70
dist/face-api.esm.js vendored

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

70
dist/face-api.js vendored

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -64,8 +64,8 @@ export class AgeGenderNet extends NeuralNetwork<NetParams> {
const predictionsByBatch = await Promise.all(
ageAndGenderTensors.map(async ({ ageTensor, genderTensor }) => {
const age = (await ageTensor.data())[0];
const probMale = (await genderTensor.data())[0];
const age = (ageTensor.dataSync())[0];
const probMale = (genderTensor.dataSync())[0];
const isMale = probMale > 0.5;
const gender = isMale ? Gender.MALE : Gender.FEMALE;
const genderProbability = isMale ? probMale : (1 - probMale);

View File

@ -14,10 +14,7 @@ import { isTensor3D, isTensor4D } from '../utils/index';
* @param detections The face detection results or face bounding boxes for that image.
* @returns Tensors of the corresponding image region for each detected face.
*/
export async function extractFaceTensors(
imageTensor: tf.Tensor3D | tf.Tensor4D,
detections: Array<FaceDetection | Rect>,
): Promise<tf.Tensor3D[]> {
export async function extractFaceTensors(imageTensor: tf.Tensor3D | tf.Tensor4D, detections: Array<FaceDetection | Rect>): Promise<tf.Tensor3D[]> {
if (!isTensor3D(imageTensor) && !isTensor4D(imageTensor)) {
throw new Error('extractFaceTensors - expected image tensor to be 3D or 4D');
}
@ -29,11 +26,10 @@ export async function extractFaceTensors(
return tf.tidy(() => {
const [imgHeight, imgWidth, numChannels] = imageTensor.shape.slice(isTensor4D(imageTensor) ? 1 : 0);
const boxes = detections.map(
(det) => (det instanceof FaceDetection
const boxes = detections
.map((det) => (det instanceof FaceDetection
? det.forSize(imgWidth, imgHeight).box
: det),
)
: det))
.map((box) => box.clipAtImageBorders(imgWidth, imgHeight));
const faceTensors = boxes.map(({

View File

@ -23,7 +23,7 @@ export class FaceExpressionNet extends FaceProcessor<FaceFeatureExtractorParams>
const netInput = await toNetInput(input);
const out = await this.forwardInput(netInput);
const probabilitesByBatch = await Promise.all(tf.unstack(out).map(async (t) => {
const data = await t.data();
const data = t.dataSync();
t.dispose();
return data;
}));

View File

@ -23,7 +23,7 @@ export class FaceFeatureExtractor extends NeuralNetwork<FaceFeatureExtractorPara
return tf.tidy(() => {
const batchTensor = tf.cast(input.toBatchTensor(112, true), 'float32');
const meanRgb = [122.782, 117.001, 104.298];
const normalized = normalize(batchTensor, meanRgb).div(tf.scalar(255)) as tf.Tensor4D;
const normalized = normalize(batchTensor, meanRgb).div(255) as tf.Tensor4D;
let out = denseBlock4(normalized, params.dense0, true);
out = denseBlock4(out, params.dense1);

View File

@ -23,7 +23,7 @@ export class TinyFaceFeatureExtractor extends NeuralNetwork<TinyFaceFeatureExtra
return tf.tidy(() => {
const batchTensor = tf.cast(input.toBatchTensor(112, true), 'float32');
const meanRgb = [122.782, 117.001, 104.298];
const normalized = normalize(batchTensor, meanRgb).div(tf.scalar(255)) as tf.Tensor4D;
const normalized = normalize(batchTensor, meanRgb).div(255) as tf.Tensor4D;
let out = denseBlock3(normalized, params.dense0, true);
out = denseBlock3(out, params.dense1);

View File

@ -72,7 +72,7 @@ export abstract class FaceLandmark68NetBase<
const landmarksForBatch = await Promise.all(landmarkTensors.map(
async (landmarkTensor, batchIdx) => {
const landmarksArray = Array.from(await landmarkTensor.data());
const landmarksArray = Array.from(landmarkTensor.dataSync());
const xCoords = landmarksArray.filter((_, i) => isEven(i));
const yCoords = landmarksArray.filter((_, i) => !isEven(i));

View File

@ -25,7 +25,7 @@ export class FaceRecognitionNet extends NeuralNetwork<NetParams> {
const batchTensor = tf.cast(input.toBatchTensor(150, true), 'float32');
const meanRgb = [122.782, 117.001, 104.298];
const normalized = normalize(batchTensor, meanRgb).div(tf.scalar(256)) as tf.Tensor4D;
const normalized = normalize(batchTensor, meanRgb).div(255) as tf.Tensor4D;
let out = convDown(normalized, params.conv32_down);
out = tf.maxPool(out, 3, 2, 'valid');
@ -62,9 +62,7 @@ export class FaceRecognitionNet extends NeuralNetwork<NetParams> {
public async computeFaceDescriptor(input: TNetInput): Promise<Float32Array|Float32Array[]> {
if (input?.shape?.some((dim) => dim <= 0)) return new Float32Array(128);
const netInput = await toNetInput(input);
const faceDescriptorTensors = tf.tidy(
() => tf.unstack(this.forwardInput(netInput)),
);
const faceDescriptorTensors = tf.tidy(() => tf.unstack(this.forwardInput(netInput)));
const faceDescriptorsForBatch = await Promise.all(faceDescriptorTensors.map((t) => t.data())) as Float32Array[];
faceDescriptorTensors.forEach((t) => t.dispose());
return netInput.isBatchInput ? faceDescriptorsForBatch : faceDescriptorsForBatch[0];

View File

@ -8,12 +8,7 @@ export function isWithFaceDetection(obj: any): obj is WithFaceDetection<{}> {
return obj.detection instanceof FaceDetection;
}
export function extendWithFaceDetection<
TSource
>(
sourceObj: TSource,
detection: FaceDetection,
): WithFaceDetection<TSource> {
export function extendWithFaceDetection<TSource>(sourceObj: TSource, detection: FaceDetection): WithFaceDetection<TSource> {
const extension = { detection };
return { ...sourceObj, ...extension };
}

View File

@ -65,9 +65,7 @@ export class DetectAllFaceLandmarksTask<
}
}
export class DetectSingleFaceLandmarksTask<
TSource extends WithFaceDetection<{}>
> extends DetectFaceLandmarksTaskBase<WithFaceLandmarks<TSource> | undefined, TSource | undefined> {
export class DetectSingleFaceLandmarksTask<TSource extends WithFaceDetection<{}>> extends DetectFaceLandmarksTaskBase<WithFaceLandmarks<TSource> | undefined, TSource | undefined> {
public async run(): Promise<WithFaceLandmarks<TSource> | undefined> {
const parentResult = await this.parentTask;
if (!parentResult) {

View File

@ -26,7 +26,6 @@ export class DetectFacesTaskBase<TReturn> extends ComposableTask<TReturn> {
export class DetectAllFacesTask extends DetectFacesTaskBase<FaceDetection[]> {
public async run(): Promise<FaceDetection[]> {
const { input, options } = this;
let result;
if (options instanceof TinyFaceDetectorOptions) result = nets.tinyFaceDetector.locateFaces(input, options);
else if (options instanceof SsdMobilenetv1Options) result = nets.ssdMobilenetv1.locateFaces(input, options);

View File

@ -17,6 +17,7 @@ export async function extractAllFacesAndComputeResults<TSource extends WithFaceD
const faceBoxes = parentResults.map((parentResult) => (isWithFaceLandmarks(parentResult)
? getRectForAlignment(parentResult)
: parentResult.detection));
const faces: Array<HTMLCanvasElement | tf.Tensor3D> = extractedFaces || (
input instanceof tf.Tensor
? await extractFaceTensors(input, faceBoxes)

View File

@ -29,7 +29,6 @@ export class SsdMobilenetv1 extends NeuralNetwork<NetParams> {
const batchTensor = tf.cast(input.toBatchTensor(512, false), 'float32');
const x = tf.sub(tf.div(batchTensor, 127.5), 1) as tf.Tensor4D; // input is normalized -1..1
const features = mobileNetV1(x, params.mobilenetv1);
const { boxPredictions, classPredictions } = predictionLayer(features.out, features.conv11, params.prediction_layer);
return outputLayer(boxPredictions, classPredictions, params.output_layer);
@ -42,7 +41,6 @@ export class SsdMobilenetv1 extends NeuralNetwork<NetParams> {
public async locateFaces(input: TNetInput, options: ISsdMobilenetv1Options = {}): Promise<FaceDetection[]> {
const { maxResults, minConfidence } = new SsdMobilenetv1Options(options);
const netInput = await toNetInput(input);
const {
@ -57,7 +55,7 @@ export class SsdMobilenetv1 extends NeuralNetwork<NetParams> {
_scores[i].dispose();
}
const scoresData = Array.from(await scores.data());
const scoresData = Array.from(scores.dataSync());
const iouThreshold = 0.5;
const indices = nonMaxSuppression(
boxes,

View File

@ -50,9 +50,7 @@ export function mobileNetV1(x: tf.Tensor4D, params: MobileNetV1.Params) {
const depthwiseConvStrides = getStridesForLayerIdx(layerIdx);
out = depthwiseConvLayer(out, param.depthwise_conv, depthwiseConvStrides);
out = pointwiseConvLayer(out, param.pointwise_conv, [1, 1]);
if (layerIdx === 11) {
conv11 = out;
}
if (layerIdx === 11) conv11 = out;
});
if (conv11 === null) {

View File

@ -10,8 +10,8 @@ function getCenterCoordinatesAndSizesLayer(x: tf.Tensor2D) {
tf.sub(vec[3], vec[1]),
];
const centers = [
tf.add(vec[0], tf.div(sizes[0], tf.scalar(2))),
tf.add(vec[1], tf.div(sizes[1], tf.scalar(2))),
tf.add(vec[0], tf.div(sizes[0], 2)),
tf.add(vec[1], tf.div(sizes[1], 2)),
];
return { sizes, centers };
}
@ -20,10 +20,10 @@ function decodeBoxesLayer(x0: tf.Tensor2D, x1: tf.Tensor2D) {
const { sizes, centers } = getCenterCoordinatesAndSizesLayer(x0);
const vec = tf.unstack(tf.transpose(x1, [1, 0]));
const div0_out = tf.div(tf.mul(tf.exp(tf.div(vec[2], tf.scalar(5))), sizes[0]), tf.scalar(2));
const add0_out = tf.add(tf.mul(tf.div(vec[0], tf.scalar(10)), sizes[0]), centers[0]);
const div1_out = tf.div(tf.mul(tf.exp(tf.div(vec[3], tf.scalar(5))), sizes[1]), tf.scalar(2));
const add1_out = tf.add(tf.mul(tf.div(vec[1], tf.scalar(10)), sizes[1]), centers[1]);
const div0_out = tf.div(tf.mul(tf.exp(tf.div(vec[2], 5)), sizes[0]), 2);
const add0_out = tf.add(tf.mul(tf.div(vec[0], 10), sizes[0]), centers[0]);
const div1_out = tf.div(tf.mul(tf.exp(tf.div(vec[3], 5)), sizes[1]), 2);
const add1_out = tf.add(tf.mul(tf.div(vec[1], 10), sizes[1]), centers[1]);
return tf.transpose(
tf.stack([

View File

@ -2,13 +2,18 @@ import * as tf from '../../dist/tfjs.esm';
import { PointwiseConvParams } from './types';
export function pointwiseConvLayer(
x: tf.Tensor4D,
params: PointwiseConvParams,
strides: [number, number],
) {
export function pointwiseConvLayer(x: tf.Tensor4D, params: PointwiseConvParams, strides: [number, number]) {
return tf.tidy(() => {
let out = tf.conv2d(x, params.filters, strides, 'same');
/*
if (x.shape[1] === 512 && x.shape[3] === 3) {
console.log('Input:', x.shape, x.size); // input does not change (checked values)
console.log('Filter:', params.filters.shape, params.filters.size); // params do not change (checked values)
console.log('Strides', strides);
console.log('Conv2d Output:', out.shape, out.size, out.dataSync()[0]); // output has different values!
console.log('Sum of all Conv2D values:', tf.reshape(out, [2097152]).sum().dataSync()[0]); // silly sum just to see how much results diverged
}
*/
out = tf.add(out, params.batch_norm_offset);
return tf.clipByValue(out, 0, 6);
});

View File

@ -94,7 +94,7 @@ export class TinyYolov2Base extends NeuralNetwork<TinyYolov2NetParams> {
batchTensor = this.config.meanRgb
? normalize(batchTensor, this.config.meanRgb)
: batchTensor;
batchTensor = batchTensor.div(tf.scalar(256)) as tf.Tensor4D;
batchTensor = batchTensor.div(255) as tf.Tensor4D;
return this.config.withSeparableConvs
? this.runMobilenet(batchTensor, params as MobilenetParams)
: this.runTinyYolov2(batchTensor, params as DefaultTinyYolov2NetParams);

View File

@ -46,7 +46,7 @@ export class TinyXception extends NeuralNetwork<TinyXceptionParams> {
return tf.tidy(() => {
const batchTensor = tf.cast(input.toBatchTensor(112, true), 'float32');
const meanRgb = [122.782, 117.001, 104.298];
const normalized = normalize(batchTensor, meanRgb).div(tf.scalar(256)) as tf.Tensor4D;
const normalized = normalize(batchTensor, meanRgb).div(255) as tf.Tensor4D;
let out = tf.relu(conv(normalized, params.entry_flow.conv_in, [2, 2]));
out = reductionBlock(out, params.entry_flow.reduction_block_0, false);
out = reductionBlock(out, params.entry_flow.reduction_block_1);

View File

@ -201,7 +201,7 @@
<li class="tsd-description">
<aside class="tsd-sources">
<ul>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/globalApi/DetectFacesTasks.ts#L62">globalApi/DetectFacesTasks.ts:62</a></li>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/globalApi/DetectFacesTasks.ts#L61">globalApi/DetectFacesTasks.ts:61</a></li>
</ul>
</aside>
<h4 class="tsd-returns-title">Returns <span class="tsd-signature-type">PredictAllAgeAndGenderTask</span><span class="tsd-signature-symbol">&lt;</span><span class="tsd-signature-symbol">{ </span>detection<span class="tsd-signature-symbol">: </span><a href="facedetection.html" class="tsd-signature-type" data-tsd-kind="Class">FaceDetection</a><span class="tsd-signature-symbol"> }</span><span class="tsd-signature-symbol">&gt;</span></h4>
@ -218,7 +218,7 @@
<li class="tsd-description">
<aside class="tsd-sources">
<ul>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/globalApi/DetectFacesTasks.ts#L55">globalApi/DetectFacesTasks.ts:55</a></li>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/globalApi/DetectFacesTasks.ts#L54">globalApi/DetectFacesTasks.ts:54</a></li>
</ul>
</aside>
<h4 class="tsd-returns-title">Returns <span class="tsd-signature-type">PredictAllFaceExpressionsTask</span><span class="tsd-signature-symbol">&lt;</span><span class="tsd-signature-symbol">{ </span>detection<span class="tsd-signature-symbol">: </span><a href="facedetection.html" class="tsd-signature-type" data-tsd-kind="Class">FaceDetection</a><span class="tsd-signature-symbol"> }</span><span class="tsd-signature-symbol">&gt;</span></h4>
@ -235,7 +235,7 @@
<li class="tsd-description">
<aside class="tsd-sources">
<ul>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/globalApi/DetectFacesTasks.ts#L47">globalApi/DetectFacesTasks.ts:47</a></li>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/globalApi/DetectFacesTasks.ts#L46">globalApi/DetectFacesTasks.ts:46</a></li>
</ul>
</aside>
<h4 class="tsd-parameters-title">Parameters</h4>

View File

@ -159,7 +159,7 @@
<aside class="tsd-sources">
<p>Overrides <a href="detectfacelandmarkstaskbase.html">DetectFaceLandmarksTaskBase</a>.<a href="detectfacelandmarkstaskbase.html#run">run</a></p>
<ul>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/globalApi/DetectFaceLandmarksTasks.ts#L71">globalApi/DetectFaceLandmarksTasks.ts:71</a></li>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/globalApi/DetectFaceLandmarksTasks.ts#L69">globalApi/DetectFaceLandmarksTasks.ts:69</a></li>
</ul>
</aside>
<h4 class="tsd-returns-title">Returns <span class="tsd-signature-type">Promise</span><span class="tsd-signature-symbol">&lt;</span><span class="tsd-signature-type">undefined</span><span class="tsd-signature-symbol"> | </span><a href="../index.html#withfacelandmarks" class="tsd-signature-type" data-tsd-kind="Type alias">WithFaceLandmarks</a><span class="tsd-signature-symbol">&lt;</span><span class="tsd-signature-type">TSource</span><span class="tsd-signature-symbol">, </span><a href="facelandmarks68.html" class="tsd-signature-type" data-tsd-kind="Class">FaceLandmarks68</a><span class="tsd-signature-symbol">&gt;</span><span class="tsd-signature-symbol">&gt;</span></h4>
@ -218,7 +218,7 @@
<li class="tsd-description">
<aside class="tsd-sources">
<ul>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/globalApi/DetectFaceLandmarksTasks.ts#L93">globalApi/DetectFaceLandmarksTasks.ts:93</a></li>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/globalApi/DetectFaceLandmarksTasks.ts#L91">globalApi/DetectFaceLandmarksTasks.ts:91</a></li>
</ul>
</aside>
<h4 class="tsd-returns-title">Returns <span class="tsd-signature-type">PredictSingleAgeAndGenderWithFaceAlignmentTask</span><span class="tsd-signature-symbol">&lt;</span><a href="../index.html#withfacelandmarks" class="tsd-signature-type" data-tsd-kind="Type alias">WithFaceLandmarks</a><span class="tsd-signature-symbol">&lt;</span><span class="tsd-signature-type">TSource</span><span class="tsd-signature-symbol">, </span><a href="facelandmarks68.html" class="tsd-signature-type" data-tsd-kind="Class">FaceLandmarks68</a><span class="tsd-signature-symbol">&gt;</span><span class="tsd-signature-symbol">&gt;</span></h4>
@ -235,7 +235,7 @@
<li class="tsd-description">
<aside class="tsd-sources">
<ul>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/globalApi/DetectFaceLandmarksTasks.ts#L97">globalApi/DetectFaceLandmarksTasks.ts:97</a></li>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/globalApi/DetectFaceLandmarksTasks.ts#L95">globalApi/DetectFaceLandmarksTasks.ts:95</a></li>
</ul>
</aside>
<h4 class="tsd-returns-title">Returns <a href="computesinglefacedescriptortask.html" class="tsd-signature-type" data-tsd-kind="Class">ComputeSingleFaceDescriptorTask</a><span class="tsd-signature-symbol">&lt;</span><a href="../index.html#withfacelandmarks" class="tsd-signature-type" data-tsd-kind="Type alias">WithFaceLandmarks</a><span class="tsd-signature-symbol">&lt;</span><span class="tsd-signature-type">TSource</span><span class="tsd-signature-symbol">, </span><a href="facelandmarks68.html" class="tsd-signature-type" data-tsd-kind="Class">FaceLandmarks68</a><span class="tsd-signature-symbol">&gt;</span><span class="tsd-signature-symbol">&gt;</span></h4>
@ -252,7 +252,7 @@
<li class="tsd-description">
<aside class="tsd-sources">
<ul>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/globalApi/DetectFaceLandmarksTasks.ts#L89">globalApi/DetectFaceLandmarksTasks.ts:89</a></li>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/globalApi/DetectFaceLandmarksTasks.ts#L87">globalApi/DetectFaceLandmarksTasks.ts:87</a></li>
</ul>
</aside>
<h4 class="tsd-returns-title">Returns <span class="tsd-signature-type">PredictSingleFaceExpressionsWithFaceAlignmentTask</span><span class="tsd-signature-symbol">&lt;</span><a href="../index.html#withfacelandmarks" class="tsd-signature-type" data-tsd-kind="Type alias">WithFaceLandmarks</a><span class="tsd-signature-symbol">&lt;</span><span class="tsd-signature-type">TSource</span><span class="tsd-signature-symbol">, </span><a href="facelandmarks68.html" class="tsd-signature-type" data-tsd-kind="Class">FaceLandmarks68</a><span class="tsd-signature-symbol">&gt;</span><span class="tsd-signature-symbol">&gt;</span></h4>

View File

@ -142,7 +142,7 @@
<aside class="tsd-sources">
<p>Overrides <a href="detectfacestaskbase.html">DetectFacesTaskBase</a>.<a href="detectfacestaskbase.html#run">run</a></p>
<ul>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/globalApi/DetectFacesTasks.ts#L71">globalApi/DetectFacesTasks.ts:71</a></li>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/globalApi/DetectFacesTasks.ts#L70">globalApi/DetectFacesTasks.ts:70</a></li>
</ul>
</aside>
<h4 class="tsd-returns-title">Returns <span class="tsd-signature-type">Promise</span><span class="tsd-signature-symbol">&lt;</span><span class="tsd-signature-type">undefined</span><span class="tsd-signature-symbol"> | </span><a href="facedetection.html" class="tsd-signature-type" data-tsd-kind="Class">FaceDetection</a><span class="tsd-signature-symbol">&gt;</span></h4>
@ -201,7 +201,7 @@
<li class="tsd-description">
<aside class="tsd-sources">
<ul>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/globalApi/DetectFacesTasks.ts#L103">globalApi/DetectFacesTasks.ts:103</a></li>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/globalApi/DetectFacesTasks.ts#L102">globalApi/DetectFacesTasks.ts:102</a></li>
</ul>
</aside>
<h4 class="tsd-returns-title">Returns <span class="tsd-signature-type">PredictSingleAgeAndGenderTask</span><span class="tsd-signature-symbol">&lt;</span><span class="tsd-signature-symbol">{ </span>detection<span class="tsd-signature-symbol">: </span><a href="facedetection.html" class="tsd-signature-type" data-tsd-kind="Class">FaceDetection</a><span class="tsd-signature-symbol"> }</span><span class="tsd-signature-symbol">&gt;</span></h4>
@ -218,7 +218,7 @@
<li class="tsd-description">
<aside class="tsd-sources">
<ul>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/globalApi/DetectFacesTasks.ts#L96">globalApi/DetectFacesTasks.ts:96</a></li>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/globalApi/DetectFacesTasks.ts#L95">globalApi/DetectFacesTasks.ts:95</a></li>
</ul>
</aside>
<h4 class="tsd-returns-title">Returns <span class="tsd-signature-type">PredictSingleFaceExpressionsTask</span><span class="tsd-signature-symbol">&lt;</span><span class="tsd-signature-symbol">{ </span>detection<span class="tsd-signature-symbol">: </span><a href="facedetection.html" class="tsd-signature-type" data-tsd-kind="Class">FaceDetection</a><span class="tsd-signature-symbol"> }</span><span class="tsd-signature-symbol">&gt;</span></h4>
@ -235,7 +235,7 @@
<li class="tsd-description">
<aside class="tsd-sources">
<ul>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/globalApi/DetectFacesTasks.ts#L88">globalApi/DetectFacesTasks.ts:88</a></li>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/globalApi/DetectFacesTasks.ts#L87">globalApi/DetectFacesTasks.ts:87</a></li>
</ul>
</aside>
<h4 class="tsd-parameters-title">Parameters</h4>

View File

@ -275,7 +275,7 @@
<aside class="tsd-sources">
<p>Inherited from <a href="ssdmobilenetv1.html">SsdMobilenetv1</a>.<a href="ssdmobilenetv1.html#forward">forward</a></p>
<ul>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/ssdMobilenetv1/SsdMobilenetv1.ts#L39">ssdMobilenetv1/SsdMobilenetv1.ts:39</a></li>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/ssdMobilenetv1/SsdMobilenetv1.ts#L38">ssdMobilenetv1/SsdMobilenetv1.ts:38</a></li>
</ul>
</aside>
<h4 class="tsd-parameters-title">Parameters</h4>
@ -515,7 +515,7 @@
<aside class="tsd-sources">
<p>Inherited from <a href="ssdmobilenetv1.html">SsdMobilenetv1</a>.<a href="ssdmobilenetv1.html#locatefaces">locateFaces</a></p>
<ul>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/ssdMobilenetv1/SsdMobilenetv1.ts#L43">ssdMobilenetv1/SsdMobilenetv1.ts:43</a></li>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/ssdMobilenetv1/SsdMobilenetv1.ts#L42">ssdMobilenetv1/SsdMobilenetv1.ts:42</a></li>
</ul>
</aside>
<h4 class="tsd-parameters-title">Parameters</h4>

View File

@ -279,7 +279,7 @@
<li class="tsd-description">
<aside class="tsd-sources">
<ul>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/ssdMobilenetv1/SsdMobilenetv1.ts#L39">ssdMobilenetv1/SsdMobilenetv1.ts:39</a></li>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/ssdMobilenetv1/SsdMobilenetv1.ts#L38">ssdMobilenetv1/SsdMobilenetv1.ts:38</a></li>
</ul>
</aside>
<h4 class="tsd-parameters-title">Parameters</h4>
@ -517,7 +517,7 @@
<li class="tsd-description">
<aside class="tsd-sources">
<ul>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/ssdMobilenetv1/SsdMobilenetv1.ts#L43">ssdMobilenetv1/SsdMobilenetv1.ts:43</a></li>
<li>Defined in <a href="https://github.com/vladmandic/face-api/blob/main/src/ssdMobilenetv1/SsdMobilenetv1.ts#L42">ssdMobilenetv1/SsdMobilenetv1.ts:42</a></li>
</ul>
</aside>
<h4 class="tsd-parameters-title">Parameters</h4>