diff --git a/build.js b/build.js
index 60021eb..33b2a4f 100644
--- a/build.js
+++ b/build.js
@@ -2,6 +2,7 @@
const fs = require('fs');
const esbuild = require('esbuild');
+const ts = require('typescript');
const log = require('@vladmandic/pilogger');
// keeps esbuild service instance cached
@@ -14,6 +15,25 @@ const banner = `
*/
`;
+// tsc configuration
+
+const tsconfig = {
+ noEmitOnError: false,
+ target: ts.ScriptTarget.ES2018,
+ module: ts.ModuleKind.ES2020,
+ outFile: "dist/face-api.d.ts",
+ declaration: true,
+ emitDeclarationOnly: true,
+ emitDecoratorMetadata: true,
+ experimentalDecorators: true,
+ skipLibCheck: true,
+ strictNullChecks: true,
+ baseUrl: './',
+ paths: {
+ tslib: ["node_modules/tslib/tslib.d.ts"]
+ },
+};
+
// common configuration
const common = {
banner,
@@ -137,6 +157,25 @@ async function getStats(metafile) {
return stats;
}
+function compile(fileNames, options) {
+ log.info('Compile:', fileNames);
+ const program = ts.createProgram(fileNames, options);
+ const emit = program.emit();
+ const diag = ts
+ .getPreEmitDiagnostics(program)
+ .concat(emit.diagnostics);
+ for (const info of diag) {
+ const msg = info.messageText.messageText || info.messageText;
+ if (msg.includes('package.json')) continue;
+ if (info.file) {
+ const pos = info.file.getLineAndCharacterOfPosition(info.start);
+ log.error(`TSC: ${info.file.fileName} [${pos.line + 1},${pos.character + 1}]:`, msg);
+ } else {
+ log.error('TSCC:', msg);
+ }
+ }
+}
+
// rebuild on file change
async function build(f, msg) {
log.info('Build: file', msg, f, 'target:', common.target);
@@ -153,12 +192,16 @@ async function build(f, msg) {
log.state(`Build for: ${targetGroupName} type: ${targetName}:`, stats);
}
}
- if (require.main === module) process.exit(0);
} catch (err) {
// catch errors and print where it occured
log.error('Build error', JSON.stringify(err.errors || err, null, 2));
if (require.main === module) process.exit(1);
}
+
+ // generate typings
+ compile(targets.browserBundle.esm.entryPoints, tsconfig);
+
+ if (require.main === module) process.exit(0);
}
if (require.main === module) {
diff --git a/dist/face-api.d.ts b/dist/face-api.d.ts
new file mode 100644
index 0000000..b1ae253
--- /dev/null
+++ b/dist/face-api.d.ts
@@ -0,0 +1,2109 @@
+///
+declare module "classes/Dimensions" {
+ export interface IDimensions {
+ width: number;
+ height: number;
+ }
+ export class Dimensions implements IDimensions {
+ private _width;
+ private _height;
+ constructor(width: number, height: number);
+ get width(): number;
+ get height(): number;
+ reverse(): Dimensions;
+ }
+}
+declare module "utils/index" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { Point } from "classes/index";
+ import { Dimensions, IDimensions } from "classes/Dimensions";
+ export function isTensor(tensor: any, dim: number): boolean;
+ export function isTensor1D(tensor: any): tensor is tf.Tensor1D;
+ export function isTensor2D(tensor: any): tensor is tf.Tensor2D;
+ export function isTensor3D(tensor: any): tensor is tf.Tensor3D;
+ export function isTensor4D(tensor: any): tensor is tf.Tensor4D;
+ export function isFloat(num: number): boolean;
+ export function isEven(num: number): boolean;
+ export function round(num: number, prec?: number): number;
+ export function isDimensions(obj: any): boolean;
+ export function computeReshapedDimensions({ width, height }: IDimensions, inputSize: number): Dimensions;
+ export function getCenterPoint(pts: Point[]): Point;
+ export function range(num: number, start: number, step: number): number[];
+ export function isValidNumber(num: any): boolean;
+ export function isValidProbablitiy(num: any): boolean;
+}
+declare module "classes/Point" {
+ export interface IPoint {
+ x: number;
+ y: number;
+ }
+ export class Point implements IPoint {
+ private _x;
+ private _y;
+ constructor(x: number, y: number);
+ get x(): number;
+ get y(): number;
+ add(pt: IPoint): Point;
+ sub(pt: IPoint): Point;
+ mul(pt: IPoint): Point;
+ div(pt: IPoint): Point;
+ abs(): Point;
+ magnitude(): number;
+ floor(): Point;
+ }
+}
+declare module "classes/Rect" {
+ import { Box } from "classes/Box";
+ export interface IRect {
+ x: number;
+ y: number;
+ width: number;
+ height: number;
+ }
+ export class Rect extends Box implements IRect {
+ constructor(x: number, y: number, width: number, height: number, allowNegativeDimensions?: boolean);
+ }
+}
+declare module "classes/Box" {
+ import { IBoundingBox } from "classes/BoundingBox";
+ import { IDimensions } from "classes/Dimensions";
+ import { Point } from "classes/Point";
+ import { IRect } from "classes/Rect";
+ export class Box implements IBoundingBox, IRect {
+ static isRect(rect: any): boolean;
+ static assertIsValidBox(box: any, callee: string, allowNegativeDimensions?: boolean): void;
+ private _x;
+ private _y;
+ private _width;
+ private _height;
+ constructor(_box: IBoundingBox | IRect, allowNegativeDimensions?: boolean);
+ get x(): number;
+ get y(): number;
+ get width(): number;
+ get height(): number;
+ get left(): number;
+ get top(): number;
+ get right(): number;
+ get bottom(): number;
+ get area(): number;
+ get topLeft(): Point;
+ get topRight(): Point;
+ get bottomLeft(): Point;
+ get bottomRight(): Point;
+ round(): Box;
+ floor(): Box;
+ toSquare(): Box;
+ rescale(s: IDimensions | number): Box;
+ pad(padX: number, padY: number): Box;
+ clipAtImageBorders(imgWidth: number, imgHeight: number): Box;
+ shift(sx: number, sy: number): Box;
+ padAtBorders(imageHeight: number, imageWidth: number): {
+ dy: number;
+ edy: number;
+ dx: number;
+ edx: number;
+ y: number;
+ ey: number;
+ x: number;
+ ex: number;
+ w: number;
+ h: number;
+ };
+ calibrate(region: Box): Box;
+ }
+}
+declare module "classes/BoundingBox" {
+ import { Box } from "classes/Box";
+ export interface IBoundingBox {
+ left: number;
+ top: number;
+ right: number;
+ bottom: number;
+ }
+ export class BoundingBox extends Box implements IBoundingBox {
+ constructor(left: number, top: number, right: number, bottom: number, allowNegativeDimensions?: boolean);
+ }
+}
+declare module "classes/ObjectDetection" {
+ import { Box } from "classes/Box";
+ import { Dimensions, IDimensions } from "classes/Dimensions";
+ import { IRect } from "classes/Rect";
+ export class ObjectDetection {
+ private _score;
+ private _classScore;
+ private _className;
+ private _box;
+ private _imageDims;
+ constructor(score: number, classScore: number, className: string, relativeBox: IRect, imageDims: IDimensions);
+ get score(): number;
+ get classScore(): number;
+ get className(): string;
+ get box(): Box;
+ get imageDims(): Dimensions;
+ get imageWidth(): number;
+ get imageHeight(): number;
+ get relativeBox(): Box;
+ forSize(width: number, height: number): ObjectDetection;
+ }
+}
+declare module "classes/FaceDetection" {
+ import { Box } from "classes/Box";
+ import { IDimensions } from "classes/Dimensions";
+ import { ObjectDetection } from "classes/ObjectDetection";
+ import { Rect } from "classes/Rect";
+ export interface IFaceDetecion {
+ score: number;
+ box: Box;
+ }
+ export class FaceDetection extends ObjectDetection implements IFaceDetecion {
+ constructor(score: number, relativeBox: Rect, imageDims: IDimensions);
+ forSize(width: number, height: number): FaceDetection;
+ }
+}
+declare module "ops/iou" {
+ import { Box } from "classes/Box";
+ export function iou(box1: Box, box2: Box, isIOU?: boolean): number;
+}
+declare module "ops/minBbox" {
+ import { BoundingBox, IPoint } from "classes/index";
+ export function minBbox(pts: IPoint[]): BoundingBox;
+}
+declare module "ops/nonMaxSuppression" {
+ import { Box } from "classes/Box";
+ export function nonMaxSuppression(boxes: Box[], scores: number[], iouThreshold: number, isIOU?: boolean): number[];
+}
+declare module "ops/normalize" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ export function normalize(x: tf.Tensor4D, meanRgb: number[]): tf.Tensor4D;
+}
+declare module "ops/padToSquare" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ /**
+ * Pads the smaller dimension of an image tensor with zeros, such that width === height.
+ *
+ * @param imgTensor The image tensor.
+ * @param isCenterImage (optional, default: false) If true, add an equal amount of padding on
+ * both sides of the minor dimension oof the image.
+ * @returns The padded tensor with width === height.
+ */
+ export function padToSquare(imgTensor: tf.Tensor4D, isCenterImage?: boolean): tf.Tensor4D;
+}
+declare module "ops/shuffleArray" {
+ export function shuffleArray(inputArray: any[]): any[];
+}
+declare module "ops/index" {
+ export * from "ops/iou";
+ export * from "ops/minBbox";
+ export * from "ops/nonMaxSuppression";
+ export * from "ops/normalize";
+ export * from "ops/padToSquare";
+ export * from "ops/shuffleArray";
+ export function sigmoid(x: number): number;
+ export function inverseSigmoid(x: number): number;
+}
+declare module "classes/FaceLandmarks" {
+ import { IBoundingBox } from "classes/BoundingBox";
+ import { Box } from "classes/Box";
+ import { Dimensions, IDimensions } from "classes/Dimensions";
+ import { FaceDetection } from "classes/FaceDetection";
+ import { Point } from "classes/Point";
+ import { IRect } from "classes/Rect";
+ export interface IFaceLandmarks {
+ positions: Point[];
+ shift: Point;
+ }
+ export class FaceLandmarks implements IFaceLandmarks {
+ protected _shift: Point;
+ protected _positions: Point[];
+ protected _imgDims: Dimensions;
+ constructor(relativeFaceLandmarkPositions: Point[], imgDims: IDimensions, shift?: Point);
+ get shift(): Point;
+ get imageWidth(): number;
+ get imageHeight(): number;
+ get positions(): Point[];
+ get relativePositions(): Point[];
+ forSize(width: number, height: number): T;
+ shiftBy(x: number, y: number): T;
+ shiftByPoint(pt: Point): T;
+ /**
+ * Aligns the face landmarks after face detection from the relative positions of the faces
+ * bounding box, or it's current shift. This function should be used to align the face images
+ * after face detection has been performed, before they are passed to the face recognition net.
+ * This will make the computed face descriptor more accurate.
+ *
+ * @param detection (optional) The bounding box of the face or the face detection result. If
+ * no argument was passed the position of the face landmarks are assumed to be relative to
+ * it's current shift.
+ * @returns The bounding box of the aligned face.
+ */
+ align(detection?: FaceDetection | IRect | IBoundingBox | null, options?: {
+ useDlibAlignment?: boolean;
+ minBoxPadding?: number;
+ }): Box;
+ private alignDlib;
+ private alignMinBbox;
+ protected getRefPointsForAlignment(): Point[];
+ }
+}
+declare module "classes/FaceLandmarks5" {
+ import { FaceLandmarks } from "classes/FaceLandmarks";
+ import { Point } from "classes/Point";
+ export class FaceLandmarks5 extends FaceLandmarks {
+ protected getRefPointsForAlignment(): Point[];
+ }
+}
+declare module "classes/FaceLandmarks68" {
+ import { FaceLandmarks } from "classes/FaceLandmarks";
+ import { Point } from "classes/Point";
+ export class FaceLandmarks68 extends FaceLandmarks {
+ getJawOutline(): Point[];
+ getLeftEyeBrow(): Point[];
+ getRightEyeBrow(): Point[];
+ getNose(): Point[];
+ getLeftEye(): Point[];
+ getRightEye(): Point[];
+ getMouth(): Point[];
+ protected getRefPointsForAlignment(): Point[];
+ }
+}
+declare module "classes/FaceMatch" {
+ export interface IFaceMatch {
+ label: string;
+ distance: number;
+ }
+ export class FaceMatch implements IFaceMatch {
+ private _label;
+ private _distance;
+ constructor(label: string, distance: number);
+ get label(): string;
+ get distance(): number;
+ toString(withDistance?: boolean): string;
+ }
+}
+declare module "classes/LabeledBox" {
+ import { IBoundingBox } from "classes/BoundingBox";
+ import { Box } from "classes/Box";
+ import { IRect } from "classes/Rect";
+ export class LabeledBox extends Box {
+ static assertIsValidLabeledBox(box: any, callee: string): void;
+ private _label;
+ constructor(box: IBoundingBox | IRect | any, label: number);
+ get label(): number;
+ }
+}
+declare module "classes/LabeledFaceDescriptors" {
+ export class LabeledFaceDescriptors {
+ private _label;
+ private _descriptors;
+ constructor(label: string, descriptors: Float32Array[]);
+ get label(): string;
+ get descriptors(): Float32Array[];
+ toJSON(): any;
+ static fromJSON(json: any): LabeledFaceDescriptors;
+ }
+}
+declare module "classes/PredictedBox" {
+ import { IBoundingBox } from "classes/BoundingBox";
+ import { LabeledBox } from "classes/LabeledBox";
+ import { IRect } from "classes/Rect";
+ export class PredictedBox extends LabeledBox {
+ static assertIsValidPredictedBox(box: any, callee: string): void;
+ private _score;
+ private _classScore;
+ constructor(box: IBoundingBox | IRect | any, label: number, score: number, classScore: number);
+ get score(): number;
+ get classScore(): number;
+ }
+}
+declare module "classes/index" {
+ export * from "classes/BoundingBox";
+ export * from "classes/Box";
+ export * from "classes/Dimensions";
+ export * from "classes/FaceDetection";
+ export * from "classes/FaceLandmarks";
+ export * from "classes/FaceLandmarks5";
+ export * from "classes/FaceLandmarks68";
+ export * from "classes/FaceMatch";
+ export * from "classes/LabeledBox";
+ export * from "classes/LabeledFaceDescriptors";
+ export * from "classes/ObjectDetection";
+ export * from "classes/Point";
+ export * from "classes/PredictedBox";
+ export * from "classes/Rect";
+}
+declare module "draw/drawContour" {
+ import { Point } from "classes/index";
+ export function drawContour(ctx: CanvasRenderingContext2D, points: Point[], isClosed?: boolean): void;
+}
+declare module "factories/WithFaceDetection" {
+ import { FaceDetection } from "classes/FaceDetection";
+ export type WithFaceDetection = TSource & {
+ detection: FaceDetection;
+ };
+ export function isWithFaceDetection(obj: any): obj is WithFaceDetection<{}>;
+ export function extendWithFaceDetection(sourceObj: TSource, detection: FaceDetection): WithFaceDetection;
+}
+declare module "env/types" {
+ export type FileSystem = {
+ readFile: (filePath: string) => Promise;
+ };
+ export type Environment = FileSystem & {
+ Canvas: typeof HTMLCanvasElement;
+ CanvasRenderingContext2D: typeof CanvasRenderingContext2D;
+ Image: typeof HTMLImageElement;
+ ImageData: typeof ImageData;
+ Video: typeof HTMLVideoElement;
+ createCanvasElement: () => HTMLCanvasElement;
+ createImageElement: () => HTMLImageElement;
+ fetch: (url: string, init?: RequestInit) => Promise;
+ };
+}
+declare module "env/createBrowserEnv" {
+ import { Environment } from "env/types";
+ export function createBrowserEnv(): Environment;
+}
+declare module "env/createFileSystem" {
+ import { FileSystem } from "env/types";
+ export function createFileSystem(fs?: any): FileSystem;
+}
+declare module "env/createNodejsEnv" {
+ import { Environment } from "env/types";
+ export function createNodejsEnv(): Environment;
+}
+declare module "env/isBrowser" {
+ export function isBrowser(): boolean;
+}
+declare module "env/isNodejs" {
+ export function isNodejs(): boolean;
+}
+declare module "env/index" {
+ import { createBrowserEnv } from "env/createBrowserEnv";
+ import { createFileSystem } from "env/createFileSystem";
+ import { createNodejsEnv } from "env/createNodejsEnv";
+ import { isBrowser } from "env/isBrowser";
+ import { isNodejs } from "env/isNodejs";
+ import { Environment } from "env/types";
+ function getEnv(): Environment;
+ function setEnv(env: Environment): void;
+ function initialize(): void;
+ function monkeyPatch(env: Partial): void;
+ export const env: {
+ getEnv: typeof getEnv;
+ setEnv: typeof setEnv;
+ initialize: typeof initialize;
+ createBrowserEnv: typeof createBrowserEnv;
+ createFileSystem: typeof createFileSystem;
+ createNodejsEnv: typeof createNodejsEnv;
+ monkeyPatch: typeof monkeyPatch;
+ isBrowser: typeof isBrowser;
+ isNodejs: typeof isNodejs;
+ };
+ export * from "env/types";
+}
+declare module "dom/resolveInput" {
+ export function resolveInput(arg: string | any): any;
+}
+declare module "dom/getContext2dOrThrow" {
+ export function getContext2dOrThrow(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D): CanvasRenderingContext2D;
+}
+declare module "draw/DrawTextField" {
+ import { IDimensions, IPoint } from "classes/index";
+ export enum AnchorPosition {
+ TOP_LEFT = "TOP_LEFT",
+ TOP_RIGHT = "TOP_RIGHT",
+ BOTTOM_LEFT = "BOTTOM_LEFT",
+ BOTTOM_RIGHT = "BOTTOM_RIGHT"
+ }
+ export interface IDrawTextFieldOptions {
+ anchorPosition?: AnchorPosition;
+ backgroundColor?: string;
+ fontColor?: string;
+ fontSize?: number;
+ fontStyle?: string;
+ padding?: number;
+ }
+ export class DrawTextFieldOptions implements IDrawTextFieldOptions {
+ anchorPosition: AnchorPosition;
+ backgroundColor: string;
+ fontColor: string;
+ fontSize: number;
+ fontStyle: string;
+ padding: number;
+ constructor(options?: IDrawTextFieldOptions);
+ }
+ export class DrawTextField {
+ text: string[];
+ anchor: IPoint;
+ options: DrawTextFieldOptions;
+ constructor(text: string | string[] | DrawTextField, anchor: IPoint, options?: IDrawTextFieldOptions);
+ measureWidth(ctx: CanvasRenderingContext2D): number;
+ measureHeight(): number;
+ getUpperLeft(ctx: CanvasRenderingContext2D, canvasDims?: IDimensions): IPoint;
+ draw(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D): void;
+ }
+}
+declare module "draw/DrawBox" {
+ import { Box, IBoundingBox, IRect } from "classes/index";
+ import { DrawTextFieldOptions, IDrawTextFieldOptions } from "draw/DrawTextField";
+ export interface IDrawBoxOptions {
+ boxColor?: string;
+ lineWidth?: number;
+ drawLabelOptions?: IDrawTextFieldOptions;
+ label?: string;
+ }
+ export class DrawBoxOptions {
+ boxColor: string;
+ lineWidth: number;
+ drawLabelOptions: DrawTextFieldOptions;
+ label?: string;
+ constructor(options?: IDrawBoxOptions);
+ }
+ export class DrawBox {
+ box: Box;
+ options: DrawBoxOptions;
+ constructor(box: IBoundingBox | IRect, options?: IDrawBoxOptions);
+ draw(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D): void;
+ }
+}
+declare module "draw/drawDetections" {
+ import { IBoundingBox, IRect } from "classes/index";
+ import { FaceDetection } from "classes/FaceDetection";
+ import { WithFaceDetection } from "factories/WithFaceDetection";
+ export type TDrawDetectionsInput = IRect | IBoundingBox | FaceDetection | WithFaceDetection<{}>;
+ export function drawDetections(canvasArg: string | HTMLCanvasElement, detections: TDrawDetectionsInput | Array): void;
+}
+declare module "dom/isMediaLoaded" {
+ export function isMediaLoaded(media: HTMLImageElement | HTMLVideoElement): boolean;
+}
+declare module "dom/awaitMediaLoaded" {
+ export function awaitMediaLoaded(media: HTMLImageElement | HTMLVideoElement | HTMLCanvasElement): Promise;
+}
+declare module "dom/bufferToImage" {
+ export function bufferToImage(buf: Blob): Promise;
+}
+declare module "dom/getMediaDimensions" {
+ import { Dimensions, IDimensions } from "classes/Dimensions";
+ export function getMediaDimensions(input: HTMLImageElement | HTMLCanvasElement | HTMLVideoElement | IDimensions): Dimensions;
+}
+declare module "dom/createCanvas" {
+ import { IDimensions } from "classes/Dimensions";
+ export function createCanvas({ width, height }: IDimensions): HTMLCanvasElement;
+ export function createCanvasFromMedia(media: HTMLImageElement | HTMLVideoElement | ImageData, dims?: IDimensions): HTMLCanvasElement;
+}
+declare module "dom/imageTensorToCanvas" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ export function imageTensorToCanvas(imgTensor: tf.Tensor, canvas?: HTMLCanvasElement): Promise;
+}
+declare module "dom/isMediaElement" {
+ export function isMediaElement(input: any): boolean;
+}
+declare module "dom/imageToSquare" {
+ export function imageToSquare(input: HTMLImageElement | HTMLCanvasElement, inputSize: number, centerImage?: boolean): HTMLCanvasElement;
+}
+declare module "dom/types" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { NetInput } from "dom/NetInput";
+ export type TMediaElement = HTMLImageElement | HTMLVideoElement | HTMLCanvasElement;
+ export type TResolvedNetInput = TMediaElement | tf.Tensor3D | tf.Tensor4D;
+ export type TNetInputArg = string | TResolvedNetInput;
+ export type TNetInput = TNetInputArg | Array | NetInput | tf.Tensor4D;
+}
+declare module "dom/NetInput" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { Dimensions } from "classes/Dimensions";
+ import { TResolvedNetInput } from "dom/types";
+ export class NetInput {
+ private _imageTensors;
+ private _canvases;
+ private _batchSize;
+ private _treatAsBatchInput;
+ private _inputDimensions;
+ private _inputSize;
+ constructor(inputs: Array, treatAsBatchInput?: boolean);
+ get imageTensors(): Array;
+ get canvases(): HTMLCanvasElement[];
+ get isBatchInput(): boolean;
+ get batchSize(): number;
+ get inputDimensions(): number[][];
+ get inputSize(): number | undefined;
+ get reshapedInputDimensions(): Dimensions[];
+ getInput(batchIdx: number): tf.Tensor3D | tf.Tensor4D | HTMLCanvasElement;
+ getInputDimensions(batchIdx: number): number[];
+ getInputHeight(batchIdx: number): number;
+ getInputWidth(batchIdx: number): number;
+ getReshapedInputDimensions(batchIdx: number): Dimensions;
+ /**
+ * Create a batch tensor from all input canvases and tensors
+ * with size [batchSize, inputSize, inputSize, 3].
+ *
+ * @param inputSize Height and width of the tensor.
+ * @param isCenterImage (optional, default: false) If true, add an equal amount of padding on
+ * both sides of the minor dimension oof the image.
+ * @returns The batch tensor.
+ */
+ toBatchTensor(inputSize: number, isCenterInputs?: boolean): tf.Tensor4D;
+ }
+}
+declare module "dom/toNetInput" {
+ import { NetInput } from "dom/NetInput";
+ import { TNetInput } from "dom/types";
+ /**
+ * Validates the input to make sure, they are valid net inputs and awaits all media elements
+ * to be finished loading.
+ *
+ * @param input The input, which can be a media element or an array of different media elements.
+ * @returns A NetInput instance, which can be passed into one of the neural networks.
+ */
+ export function toNetInput(inputs: TNetInput): Promise;
+}
+declare module "dom/extractFaces" {
+ import { FaceDetection } from "classes/FaceDetection";
+ import { Rect } from "classes/Rect";
+ import { TNetInput } from "dom/types";
+ /**
+ * Extracts the image regions containing the detected faces.
+ *
+ * @param input The image that face detection has been performed on.
+ * @param detections The face detection results or face bounding boxes for that image.
+ * @returns The Canvases of the corresponding image region for each detected face.
+ */
+ export function extractFaces(input: TNetInput, detections: Array): Promise;
+}
+declare module "dom/extractFaceTensors" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { Rect } from "classes/index";
+ import { FaceDetection } from "classes/FaceDetection";
+ /**
+ * Extracts the tensors of the image regions containing the detected faces.
+ * Useful if you want to compute the face descriptors for the face images.
+ * Using this method is faster then extracting a canvas for each face and
+ * converting them to tensors individually.
+ *
+ * @param imageTensor The image tensor that face detection has been performed on.
+ * @param detections The face detection results or face bounding boxes for that image.
+ * @returns Tensors of the corresponding image region for each detected face.
+ */
+ export function extractFaceTensors(imageTensor: tf.Tensor3D | tf.Tensor4D, detections: Array): Promise;
+}
+declare module "dom/fetchOrThrow" {
+ export function fetchOrThrow(url: string, init?: RequestInit): Promise;
+}
+declare module "dom/fetchImage" {
+ export function fetchImage(uri: string): Promise;
+}
+declare module "dom/fetchJson" {
+ export function fetchJson(uri: string): Promise;
+}
+declare module "dom/fetchNetWeights" {
+ export function fetchNetWeights(uri: string): Promise;
+}
+declare module "common/getModelUris" {
+ export function getModelUris(uri: string | undefined, defaultModelName: string): {
+ modelBaseUri: string;
+ manifestUri: string;
+ };
+}
+declare module "dom/loadWeightMap" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ export function loadWeightMap(uri: string | undefined, defaultModelName: string): Promise;
+}
+declare module "dom/matchDimensions" {
+ import { IDimensions } from "classes/index";
+ export function matchDimensions(input: IDimensions, reference: IDimensions, useMediaDimensions?: boolean): {
+ width: number;
+ height: number;
+ };
+}
+declare module "dom/index" {
+ export * from "dom/awaitMediaLoaded";
+ export * from "dom/bufferToImage";
+ export * from "dom/createCanvas";
+ export * from "dom/extractFaces";
+ export * from "dom/extractFaceTensors";
+ export * from "dom/fetchImage";
+ export * from "dom/fetchJson";
+ export * from "dom/fetchNetWeights";
+ export * from "dom/fetchOrThrow";
+ export * from "dom/getContext2dOrThrow";
+ export * from "dom/getMediaDimensions";
+ export * from "dom/imageTensorToCanvas";
+ export * from "dom/imageToSquare";
+ export * from "dom/isMediaElement";
+ export * from "dom/isMediaLoaded";
+ export * from "dom/loadWeightMap";
+ export * from "dom/matchDimensions";
+ export * from "dom/NetInput";
+ export * from "dom/resolveInput";
+ export * from "dom/toNetInput";
+ export * from "dom/types";
+}
+declare module "common/types" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ export type ExtractWeightsFunction = (numWeights: number) => Float32Array;
+ export type ParamMapping = {
+ originalPath?: string;
+ paramPath: string;
+ };
+ export type ConvParams = {
+ filters: tf.Tensor4D;
+ bias: tf.Tensor1D;
+ };
+ export type FCParams = {
+ weights: tf.Tensor2D;
+ bias: tf.Tensor1D;
+ };
+ export class SeparableConvParams {
+ depthwise_filter: tf.Tensor4D;
+ pointwise_filter: tf.Tensor4D;
+ bias: tf.Tensor1D;
+ constructor(depthwise_filter: tf.Tensor4D, pointwise_filter: tf.Tensor4D, bias: tf.Tensor1D);
+ }
+}
+declare module "common/convLayer" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { ConvParams } from "common/types";
+ export function convLayer(x: tf.Tensor4D, params: ConvParams, padding?: 'valid' | 'same', withRelu?: boolean): tf.Tensor4D;
+}
+declare module "common/depthwiseSeparableConv" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { SeparableConvParams } from "common/types";
+ export function depthwiseSeparableConv(x: tf.Tensor4D, params: SeparableConvParams, stride: [number, number]): tf.Tensor4D;
+}
+declare module "common/disposeUnusedWeightTensors" {
+ import { ParamMapping } from "common/types";
+ export function disposeUnusedWeightTensors(weightMap: any, paramMappings: ParamMapping[]): void;
+}
+declare module "common/extractConvParamsFactory" {
+ import { ConvParams, ExtractWeightsFunction, ParamMapping } from "common/types";
+ export function extractConvParamsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]): (channelsIn: number, channelsOut: number, filterSize: number, mappedPrefix: string) => ConvParams;
+}
+declare module "common/extractFCParamsFactory" {
+ import { ExtractWeightsFunction, FCParams, ParamMapping } from "common/types";
+ export function extractFCParamsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]): (channelsIn: number, channelsOut: number, mappedPrefix: string) => FCParams;
+}
+declare module "common/extractSeparableConvParamsFactory" {
+ import { ExtractWeightsFunction, ParamMapping, SeparableConvParams } from "common/types";
+ export function extractSeparableConvParamsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]): (channelsIn: number, channelsOut: number, mappedPrefix: string) => SeparableConvParams;
+ export function loadSeparableConvParamsFactory(extractWeightEntry: (originalPath: string, paramRank: number) => T): (prefix: string) => SeparableConvParams;
+}
+declare module "common/extractWeightEntryFactory" {
+ import { ParamMapping } from "common/types";
+ export function extractWeightEntryFactory(weightMap: any, paramMappings: ParamMapping[]): (originalPath: string, paramRank: number, mappedPath?: string | undefined) => T;
+}
+declare module "common/extractWeightsFactory" {
+ export function extractWeightsFactory(weights: Float32Array): {
+ extractWeights: (numWeights: number) => Float32Array;
+ getRemainingWeights: () => Float32Array;
+ };
+}
+declare module "common/index" {
+ export * from "common/convLayer";
+ export * from "common/depthwiseSeparableConv";
+ export * from "common/disposeUnusedWeightTensors";
+ export * from "common/extractConvParamsFactory";
+ export * from "common/extractFCParamsFactory";
+ export * from "common/extractSeparableConvParamsFactory";
+ export * from "common/extractWeightEntryFactory";
+ export * from "common/extractWeightsFactory";
+ export * from "common/getModelUris";
+ export * from "common/types";
+}
+declare module "NeuralNetwork" {
+ import * as tf from '../dist/tfjs.esm.js';
+ import { ParamMapping } from "common/index";
+ export abstract class NeuralNetwork {
+ protected _name: string;
+ protected _params: TNetParams | undefined;
+ protected _paramMappings: ParamMapping[];
+ constructor(_name: string);
+ get params(): TNetParams | undefined;
+ get paramMappings(): ParamMapping[];
+ get isLoaded(): boolean;
+ getParamFromPath(paramPath: string): tf.Tensor;
+ reassignParamFromPath(paramPath: string, tensor: tf.Tensor): void;
+ getParamList(): {
+ path: string;
+ tensor: any;
+ }[];
+ getTrainableParams(): {
+ path: string;
+ tensor: any;
+ }[];
+ getFrozenParams(): {
+ path: string;
+ tensor: any;
+ }[];
+ variable(): void;
+ freeze(): void;
+ dispose(throwOnRedispose?: boolean): void;
+ serializeParams(): Float32Array;
+ load(weightsOrUrl: Float32Array | string | undefined): Promise;
+ loadFromUri(uri: string | undefined): Promise;
+ loadFromDisk(filePath: string | undefined): Promise;
+ loadFromWeightMap(weightMap: tf.NamedTensorMap): void;
+ extractWeights(weights: Float32Array): void;
+ private traversePropertyPath;
+ protected abstract getDefaultModelName(): string;
+ protected abstract extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap): {
+ params: TNetParams;
+ paramMappings: ParamMapping[];
+ };
+ protected abstract extractParams(weights: Float32Array): {
+ params: TNetParams;
+ paramMappings: ParamMapping[];
+ };
+ }
+}
+declare module "faceFeatureExtractor/types" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { NetInput, TNetInput } from "index";
+ import { ConvParams, SeparableConvParams } from "common/index";
+ import { NeuralNetwork } from "NeuralNetwork";
+ export type ConvWithBatchNormParams = BatchNormParams & {
+ filter: tf.Tensor4D;
+ };
+ export type BatchNormParams = {
+ mean: tf.Tensor1D;
+ variance: tf.Tensor1D;
+ scale: tf.Tensor1D;
+ offset: tf.Tensor1D;
+ };
+ export type SeparableConvWithBatchNormParams = {
+ depthwise: ConvWithBatchNormParams;
+ pointwise: ConvWithBatchNormParams;
+ };
+ export type DenseBlock3Params = {
+ conv0: SeparableConvParams | ConvParams;
+ conv1: SeparableConvParams;
+ conv2: SeparableConvParams;
+ };
+ export type DenseBlock4Params = DenseBlock3Params & {
+ conv3: SeparableConvParams;
+ };
+ export type TinyFaceFeatureExtractorParams = {
+ dense0: DenseBlock3Params;
+ dense1: DenseBlock3Params;
+ dense2: DenseBlock3Params;
+ };
+ export type FaceFeatureExtractorParams = {
+ dense0: DenseBlock4Params;
+ dense1: DenseBlock4Params;
+ dense2: DenseBlock4Params;
+ dense3: DenseBlock4Params;
+ };
+ export interface IFaceFeatureExtractor extends NeuralNetwork {
+ forwardInput(input: NetInput): tf.Tensor4D;
+ forward(input: TNetInput): Promise;
+ }
+}
+declare module "faceFeatureExtractor/denseBlock" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { DenseBlock3Params, DenseBlock4Params } from "faceFeatureExtractor/types";
+ export function denseBlock3(x: tf.Tensor4D, denseBlockParams: DenseBlock3Params, isFirstLayer?: boolean): tf.Tensor4D;
+ export function denseBlock4(x: tf.Tensor4D, denseBlockParams: DenseBlock4Params, isFirstLayer?: boolean, isScaleDown?: boolean): tf.Tensor4D;
+}
+declare module "faceFeatureExtractor/extractorsFactory" {
+ import { ExtractWeightsFunction, ParamMapping } from "common/index";
+ import { DenseBlock3Params, DenseBlock4Params } from "faceFeatureExtractor/types";
+ export function extractorsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]): {
+ extractDenseBlock3Params: (channelsIn: number, channelsOut: number, mappedPrefix: string, isFirstLayer?: boolean) => DenseBlock3Params;
+ extractDenseBlock4Params: (channelsIn: number, channelsOut: number, mappedPrefix: string, isFirstLayer?: boolean) => DenseBlock4Params;
+ };
+}
+declare module "faceFeatureExtractor/extractParams" {
+ import { ParamMapping } from "common/index";
+ import { FaceFeatureExtractorParams } from "faceFeatureExtractor/types";
+ export function extractParams(weights: Float32Array): {
+ params: FaceFeatureExtractorParams;
+ paramMappings: ParamMapping[];
+ };
+}
+declare module "common/loadConvParamsFactory" {
+ import { ConvParams } from "common/types";
+ export function loadConvParamsFactory(extractWeightEntry: (originalPath: string, paramRank: number) => T): (prefix: string) => ConvParams;
+}
+declare module "faceFeatureExtractor/loadParamsFactory" {
+ import { ParamMapping } from "common/index";
+ import { DenseBlock3Params, DenseBlock4Params } from "faceFeatureExtractor/types";
+ export function loadParamsFactory(weightMap: any, paramMappings: ParamMapping[]): {
+ extractDenseBlock3Params: (prefix: string, isFirstLayer?: boolean) => DenseBlock3Params;
+ extractDenseBlock4Params: (prefix: string, isFirstLayer?: boolean) => DenseBlock4Params;
+ };
+}
+declare module "faceFeatureExtractor/extractParamsFromWeigthMap" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { ParamMapping } from "common/index";
+ import { FaceFeatureExtractorParams } from "faceFeatureExtractor/types";
+ export function extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap): {
+ params: FaceFeatureExtractorParams;
+ paramMappings: ParamMapping[];
+ };
+}
+declare module "faceFeatureExtractor/FaceFeatureExtractor" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { NetInput, TNetInput } from "dom/index";
+ import { NeuralNetwork } from "NeuralNetwork";
+ import { FaceFeatureExtractorParams, IFaceFeatureExtractor } from "faceFeatureExtractor/types";
+ export class FaceFeatureExtractor extends NeuralNetwork implements IFaceFeatureExtractor {
+ constructor();
+ forwardInput(input: NetInput): tf.Tensor4D;
+ forward(input: TNetInput): Promise;
+ protected getDefaultModelName(): string;
+ protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap): {
+ params: FaceFeatureExtractorParams;
+ paramMappings: import("common").ParamMapping[];
+ };
+ protected extractParams(weights: Float32Array): {
+ params: FaceFeatureExtractorParams;
+ paramMappings: import("common").ParamMapping[];
+ };
+ }
+}
+declare module "common/fullyConnectedLayer" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { FCParams } from "common/types";
+ export function fullyConnectedLayer(x: tf.Tensor2D, params: FCParams): tf.Tensor2D;
+}
+declare module "faceProcessor/types" {
+ import { FCParams } from "common/index";
+ export type NetParams = {
+ fc: FCParams;
+ };
+}
+declare module "faceProcessor/extractParams" {
+ import { ParamMapping } from "common/index";
+ import { NetParams } from "faceProcessor/types";
+ export function extractParams(weights: Float32Array, channelsIn: number, channelsOut: number): {
+ params: NetParams;
+ paramMappings: ParamMapping[];
+ };
+}
+declare module "faceProcessor/extractParamsFromWeigthMap" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { ParamMapping } from "common/index";
+ import { NetParams } from "faceProcessor/types";
+ export function extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap): {
+ params: NetParams;
+ paramMappings: ParamMapping[];
+ };
+}
+declare module "faceProcessor/util" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ export function seperateWeightMaps(weightMap: tf.NamedTensorMap): {
+ featureExtractorMap: any;
+ classifierMap: any;
+ };
+}
+declare module "faceProcessor/FaceProcessor" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { NetInput } from "dom/index";
+ import { FaceFeatureExtractorParams, IFaceFeatureExtractor, TinyFaceFeatureExtractorParams } from "faceFeatureExtractor/types";
+ import { NeuralNetwork } from "NeuralNetwork";
+ import { NetParams } from "faceProcessor/types";
+ export abstract class FaceProcessor extends NeuralNetwork {
+ protected _faceFeatureExtractor: IFaceFeatureExtractor;
+ constructor(_name: string, faceFeatureExtractor: IFaceFeatureExtractor);
+ get faceFeatureExtractor(): IFaceFeatureExtractor;
+ protected abstract getDefaultModelName(): string;
+ protected abstract getClassifierChannelsIn(): number;
+ protected abstract getClassifierChannelsOut(): number;
+ runNet(input: NetInput | tf.Tensor4D): tf.Tensor2D;
+ dispose(throwOnRedispose?: boolean): void;
+ loadClassifierParams(weights: Float32Array): void;
+ extractClassifierParams(weights: Float32Array): {
+ params: NetParams;
+ paramMappings: import("common").ParamMapping[];
+ };
+ protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap): {
+ params: NetParams;
+ paramMappings: import("common").ParamMapping[];
+ };
+ protected extractParams(weights: Float32Array): {
+ params: NetParams;
+ paramMappings: import("common").ParamMapping[];
+ };
+ }
+}
+declare module "faceExpressionNet/FaceExpressions" {
+ export const FACE_EXPRESSION_LABELS: string[];
+ export class FaceExpressions {
+ neutral: number;
+ happy: number;
+ sad: number;
+ angry: number;
+ fearful: number;
+ disgusted: number;
+ surprised: number;
+ constructor(probabilities: number[] | Float32Array);
+ asSortedArray(): {
+ expression: string;
+ probability: number;
+ }[];
+ }
+}
+declare module "faceExpressionNet/FaceExpressionNet" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { NetInput, TNetInput } from "dom/index";
+ import { FaceFeatureExtractor } from "faceFeatureExtractor/FaceFeatureExtractor";
+ import { FaceFeatureExtractorParams } from "faceFeatureExtractor/types";
+ import { FaceProcessor } from "faceProcessor/FaceProcessor";
+ import { FaceExpressions } from "faceExpressionNet/FaceExpressions";
+ export class FaceExpressionNet extends FaceProcessor {
+ constructor(faceFeatureExtractor?: FaceFeatureExtractor);
+ forwardInput(input: NetInput | tf.Tensor4D): tf.Tensor2D;
+ forward(input: TNetInput): Promise;
+ predictExpressions(input: TNetInput): Promise;
+ protected getDefaultModelName(): string;
+ protected getClassifierChannelsIn(): number;
+ protected getClassifierChannelsOut(): number;
+ }
+}
+declare module "faceExpressionNet/index" {
+ export * from "faceExpressionNet/FaceExpressionNet";
+ export * from "faceExpressionNet/FaceExpressions";
+}
+declare module "factories/WithFaceExpressions" {
+ import { FaceExpressions } from "faceExpressionNet/FaceExpressions";
+ export type WithFaceExpressions = TSource & {
+ expressions: FaceExpressions;
+ };
+ export function isWithFaceExpressions(obj: any): obj is WithFaceExpressions<{}>;
+ export function extendWithFaceExpressions(sourceObj: TSource, expressions: FaceExpressions): WithFaceExpressions;
+}
+declare module "draw/drawFaceExpressions" {
+ import { IPoint } from "classes/index";
+ import { FaceExpressions } from "faceExpressionNet/index";
+ import { WithFaceExpressions } from "factories/WithFaceExpressions";
+ export type DrawFaceExpressionsInput = FaceExpressions | WithFaceExpressions<{}>;
+ export function drawFaceExpressions(canvasArg: string | HTMLCanvasElement, faceExpressions: DrawFaceExpressionsInput | Array, minConfidence?: number, textFieldAnchor?: IPoint): void;
+}
+declare module "factories/WithFaceLandmarks" {
+ import { FaceDetection } from "classes/FaceDetection";
+ import { FaceLandmarks } from "classes/FaceLandmarks";
+ import { FaceLandmarks68 } from "classes/FaceLandmarks68";
+ import { WithFaceDetection } from "factories/WithFaceDetection";
+ export type WithFaceLandmarks, TFaceLandmarks extends FaceLandmarks = FaceLandmarks68> = TSource & {
+ landmarks: TFaceLandmarks;
+ unshiftedLandmarks: TFaceLandmarks;
+ alignedRect: FaceDetection;
+ };
+ export function isWithFaceLandmarks(obj: any): obj is WithFaceLandmarks, FaceLandmarks>;
+ export function extendWithFaceLandmarks, TFaceLandmarks extends FaceLandmarks = FaceLandmarks68>(sourceObj: TSource, unshiftedLandmarks: TFaceLandmarks): WithFaceLandmarks;
+}
+declare module "draw/DrawFaceLandmarks" {
+ import { FaceLandmarks } from "classes/FaceLandmarks";
+ import { WithFaceDetection } from "factories/WithFaceDetection";
+ import { WithFaceLandmarks } from "factories/WithFaceLandmarks";
+ export interface IDrawFaceLandmarksOptions {
+ drawLines?: boolean;
+ drawPoints?: boolean;
+ lineWidth?: number;
+ pointSize?: number;
+ lineColor?: string;
+ pointColor?: string;
+ }
+ export class DrawFaceLandmarksOptions {
+ drawLines: boolean;
+ drawPoints: boolean;
+ lineWidth: number;
+ pointSize: number;
+ lineColor: string;
+ pointColor: string;
+ constructor(options?: IDrawFaceLandmarksOptions);
+ }
+ export class DrawFaceLandmarks {
+ faceLandmarks: FaceLandmarks;
+ options: DrawFaceLandmarksOptions;
+ constructor(faceLandmarks: FaceLandmarks, options?: IDrawFaceLandmarksOptions);
+ draw(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D): void;
+ }
+ export type DrawFaceLandmarksInput = FaceLandmarks | WithFaceLandmarks>;
+ export function drawFaceLandmarks(canvasArg: string | HTMLCanvasElement, faceLandmarks: DrawFaceLandmarksInput | Array): void;
+}
+declare module "draw/index" {
+ export * from "draw/drawContour";
+ export * from "draw/drawDetections";
+ export * from "draw/drawFaceExpressions";
+ export * from "draw/DrawBox";
+ export * from "draw/DrawFaceLandmarks";
+ export * from "draw/DrawTextField";
+}
+declare module "xception/types" {
+ import { ConvParams, SeparableConvParams } from "common/index";
+ export type ReductionBlockParams = {
+ separable_conv0: SeparableConvParams;
+ separable_conv1: SeparableConvParams;
+ expansion_conv: ConvParams;
+ };
+ export type MainBlockParams = {
+ separable_conv0: SeparableConvParams;
+ separable_conv1: SeparableConvParams;
+ separable_conv2: SeparableConvParams;
+ };
+ export type TinyXceptionParams = {
+ entry_flow: {
+ conv_in: ConvParams;
+ reduction_block_0: ReductionBlockParams;
+ reduction_block_1: ReductionBlockParams;
+ };
+ middle_flow: any;
+ exit_flow: {
+ reduction_block: ReductionBlockParams;
+ separable_conv: SeparableConvParams;
+ };
+ };
+}
+declare module "xception/extractParams" {
+ import { ParamMapping } from "common/types";
+ import { TinyXceptionParams } from "xception/types";
+ export function extractParams(weights: Float32Array, numMainBlocks: number): {
+ params: TinyXceptionParams;
+ paramMappings: ParamMapping[];
+ };
+}
+declare module "xception/extractParamsFromWeigthMap" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { ParamMapping } from "common/index";
+ import { TinyXceptionParams } from "xception/types";
+ export function extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap, numMainBlocks: number): {
+ params: TinyXceptionParams;
+ paramMappings: ParamMapping[];
+ };
+}
+declare module "xception/TinyXception" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { NetInput, TNetInput } from "dom/index";
+ import { NeuralNetwork } from "NeuralNetwork";
+ import { TinyXceptionParams } from "xception/types";
+ export class TinyXception extends NeuralNetwork {
+ private _numMainBlocks;
+ constructor(numMainBlocks: number);
+ forwardInput(input: NetInput): tf.Tensor4D;
+ forward(input: TNetInput): Promise;
+ protected getDefaultModelName(): string;
+ protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap): {
+ params: TinyXceptionParams;
+ paramMappings: import("common").ParamMapping[];
+ };
+ protected extractParams(weights: Float32Array): {
+ params: TinyXceptionParams;
+ paramMappings: import("common").ParamMapping[];
+ };
+ }
+}
+declare module "ageGenderNet/types" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { FCParams } from "common/index";
+ export type AgeAndGenderPrediction = {
+ age: number;
+ gender: Gender;
+ genderProbability: number;
+ };
+ export enum Gender {
+ FEMALE = "female",
+ MALE = "male"
+ }
+ export type NetOutput = {
+ age: tf.Tensor1D;
+ gender: tf.Tensor2D;
+ };
+ export type NetParams = {
+ fc: {
+ age: FCParams;
+ gender: FCParams;
+ };
+ };
+}
+declare module "ageGenderNet/extractParams" {
+ import { ParamMapping } from "common/index";
+ import { NetParams } from "ageGenderNet/types";
+ export function extractParams(weights: Float32Array): {
+ params: NetParams;
+ paramMappings: ParamMapping[];
+ };
+}
+declare module "ageGenderNet/extractParamsFromWeigthMap" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { ParamMapping } from "common/index";
+ import { NetParams } from "ageGenderNet/types";
+ export function extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap): {
+ params: NetParams;
+ paramMappings: ParamMapping[];
+ };
+}
+declare module "ageGenderNet/AgeGenderNet" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { TinyXception } from "xception/TinyXception";
+ import { AgeAndGenderPrediction, NetOutput, NetParams } from "ageGenderNet/types";
+ import { NeuralNetwork } from "NeuralNetwork";
+ import { NetInput, TNetInput } from "dom/index";
+ export class AgeGenderNet extends NeuralNetwork {
+ private _faceFeatureExtractor;
+ constructor(faceFeatureExtractor?: TinyXception);
+ get faceFeatureExtractor(): TinyXception;
+ runNet(input: NetInput | tf.Tensor4D): NetOutput;
+ forwardInput(input: NetInput | tf.Tensor4D): NetOutput;
+ forward(input: TNetInput): Promise;
+ predictAgeAndGender(input: TNetInput): Promise;
+ protected getDefaultModelName(): string;
+ dispose(throwOnRedispose?: boolean): void;
+ loadClassifierParams(weights: Float32Array): void;
+ extractClassifierParams(weights: Float32Array): {
+ params: NetParams;
+ paramMappings: import("common").ParamMapping[];
+ };
+ protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap): {
+ params: NetParams;
+ paramMappings: import("common").ParamMapping[];
+ };
+ protected extractParams(weights: Float32Array): {
+ params: NetParams;
+ paramMappings: import("common").ParamMapping[];
+ };
+ }
+}
+declare module "ageGenderNet/index" {
+ export * from "ageGenderNet/AgeGenderNet";
+ export * from "ageGenderNet/types";
+}
+declare module "faceLandmarkNet/FaceLandmark68NetBase" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { IDimensions } from "classes/index";
+ import { FaceLandmarks68 } from "classes/FaceLandmarks68";
+ import { NetInput, TNetInput } from "dom/index";
+ import { FaceFeatureExtractorParams, TinyFaceFeatureExtractorParams } from "faceFeatureExtractor/types";
+ import { FaceProcessor } from "faceProcessor/FaceProcessor";
+ export abstract class FaceLandmark68NetBase extends FaceProcessor {
+ postProcess(output: tf.Tensor2D, inputSize: number, originalDimensions: IDimensions[]): tf.Tensor2D;
+ forwardInput(input: NetInput): tf.Tensor2D;
+ forward(input: TNetInput): Promise;
+ detectLandmarks(input: TNetInput): Promise;
+ protected getClassifierChannelsOut(): number;
+ }
+}
+declare module "faceLandmarkNet/FaceLandmark68Net" {
+ import { FaceFeatureExtractor } from "faceFeatureExtractor/FaceFeatureExtractor";
+ import { FaceFeatureExtractorParams } from "faceFeatureExtractor/types";
+ import { FaceLandmark68NetBase } from "faceLandmarkNet/FaceLandmark68NetBase";
+ export class FaceLandmark68Net extends FaceLandmark68NetBase {
+ constructor(faceFeatureExtractor?: FaceFeatureExtractor);
+ protected getDefaultModelName(): string;
+ protected getClassifierChannelsIn(): number;
+ }
+}
+declare module "faceFeatureExtractor/extractParamsFromWeigthMapTiny" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { ParamMapping } from "common/index";
+ import { TinyFaceFeatureExtractorParams } from "faceFeatureExtractor/types";
+ export function extractParamsFromWeigthMapTiny(weightMap: tf.NamedTensorMap): {
+ params: TinyFaceFeatureExtractorParams;
+ paramMappings: ParamMapping[];
+ };
+}
+declare module "faceFeatureExtractor/extractParamsTiny" {
+ import { ParamMapping } from "common/index";
+ import { TinyFaceFeatureExtractorParams } from "faceFeatureExtractor/types";
+ export function extractParamsTiny(weights: Float32Array): {
+ params: TinyFaceFeatureExtractorParams;
+ paramMappings: ParamMapping[];
+ };
+}
+declare module "faceFeatureExtractor/TinyFaceFeatureExtractor" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { NetInput, TNetInput } from "dom/index";
+ import { NeuralNetwork } from "NeuralNetwork";
+ import { IFaceFeatureExtractor, TinyFaceFeatureExtractorParams } from "faceFeatureExtractor/types";
+ export class TinyFaceFeatureExtractor extends NeuralNetwork implements IFaceFeatureExtractor {
+ constructor();
+ forwardInput(input: NetInput): tf.Tensor4D;
+ forward(input: TNetInput): Promise;
+ protected getDefaultModelName(): string;
+ protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap): {
+ params: TinyFaceFeatureExtractorParams;
+ paramMappings: import("common").ParamMapping[];
+ };
+ protected extractParams(weights: Float32Array): {
+ params: TinyFaceFeatureExtractorParams;
+ paramMappings: import("common").ParamMapping[];
+ };
+ }
+}
+declare module "faceLandmarkNet/FaceLandmark68TinyNet" {
+ import { TinyFaceFeatureExtractor } from "faceFeatureExtractor/TinyFaceFeatureExtractor";
+ import { TinyFaceFeatureExtractorParams } from "faceFeatureExtractor/types";
+ import { FaceLandmark68NetBase } from "faceLandmarkNet/FaceLandmark68NetBase";
+ export class FaceLandmark68TinyNet extends FaceLandmark68NetBase {
+ constructor(faceFeatureExtractor?: TinyFaceFeatureExtractor);
+ protected getDefaultModelName(): string;
+ protected getClassifierChannelsIn(): number;
+ }
+}
+declare module "faceLandmarkNet/index" {
+ import { FaceLandmark68Net } from "faceLandmarkNet/FaceLandmark68Net";
+ export * from "faceLandmarkNet/FaceLandmark68Net";
+ export * from "faceLandmarkNet/FaceLandmark68TinyNet";
+ export class FaceLandmarkNet extends FaceLandmark68Net {
+ }
+}
+declare module "faceRecognitionNet/types" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { ConvParams } from "common/index";
+ export type ScaleLayerParams = {
+ weights: tf.Tensor1D;
+ biases: tf.Tensor1D;
+ };
+ export type ResidualLayerParams = {
+ conv1: ConvLayerParams;
+ conv2: ConvLayerParams;
+ };
+ export type ConvLayerParams = {
+ conv: ConvParams;
+ scale: ScaleLayerParams;
+ };
+ export type NetParams = {
+ conv32_down: ConvLayerParams;
+ conv32_1: ResidualLayerParams;
+ conv32_2: ResidualLayerParams;
+ conv32_3: ResidualLayerParams;
+ conv64_down: ResidualLayerParams;
+ conv64_1: ResidualLayerParams;
+ conv64_2: ResidualLayerParams;
+ conv64_3: ResidualLayerParams;
+ conv128_down: ResidualLayerParams;
+ conv128_1: ResidualLayerParams;
+ conv128_2: ResidualLayerParams;
+ conv256_down: ResidualLayerParams;
+ conv256_1: ResidualLayerParams;
+ conv256_2: ResidualLayerParams;
+ conv256_down_out: ResidualLayerParams;
+ fc: tf.Tensor2D;
+ };
+}
+declare module "faceRecognitionNet/scaleLayer" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { ScaleLayerParams } from "faceRecognitionNet/types";
+ export function scale(x: tf.Tensor4D, params: ScaleLayerParams): tf.Tensor4D;
+}
+declare module "faceRecognitionNet/convLayer" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { ConvLayerParams } from "faceRecognitionNet/types";
+ export function conv(x: tf.Tensor4D, params: ConvLayerParams): any;
+ export function convNoRelu(x: tf.Tensor4D, params: ConvLayerParams): any;
+ export function convDown(x: tf.Tensor4D, params: ConvLayerParams): any;
+}
+declare module "faceRecognitionNet/extractParams" {
+ import { ParamMapping } from "common/index";
+ import { NetParams } from "faceRecognitionNet/types";
+ export function extractParams(weights: Float32Array): {
+ params: NetParams;
+ paramMappings: ParamMapping[];
+ };
+}
+declare module "faceRecognitionNet/extractParamsFromWeigthMap" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { ParamMapping } from "common/index";
+ import { NetParams } from "faceRecognitionNet/types";
+ export function extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap): {
+ params: NetParams;
+ paramMappings: ParamMapping[];
+ };
+}
+declare module "faceRecognitionNet/residualLayer" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { ResidualLayerParams } from "faceRecognitionNet/types";
+ export function residual(x: tf.Tensor4D, params: ResidualLayerParams): tf.Tensor4D;
+ export function residualDown(x: tf.Tensor4D, params: ResidualLayerParams): tf.Tensor4D;
+}
+declare module "faceRecognitionNet/FaceRecognitionNet" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { NetInput, TNetInput } from "dom/index";
+ import { NeuralNetwork } from "NeuralNetwork";
+ import { NetParams } from "faceRecognitionNet/types";
+ export class FaceRecognitionNet extends NeuralNetwork {
+ constructor();
+ forwardInput(input: NetInput): tf.Tensor2D;
+ forward(input: TNetInput): Promise;
+ computeFaceDescriptor(input: TNetInput): Promise;
+ protected getDefaultModelName(): string;
+ protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap): {
+ params: NetParams;
+ paramMappings: import("common").ParamMapping[];
+ };
+ protected extractParams(weights: Float32Array): {
+ params: NetParams;
+ paramMappings: import("common").ParamMapping[];
+ };
+ }
+}
+declare module "faceRecognitionNet/index" {
+ import { FaceRecognitionNet } from "faceRecognitionNet/FaceRecognitionNet";
+ export * from "faceRecognitionNet/FaceRecognitionNet";
+ export function createFaceRecognitionNet(weights: Float32Array): FaceRecognitionNet;
+}
+declare module "factories/WithFaceDescriptor" {
+ export type WithFaceDescriptor = TSource & {
+ descriptor: Float32Array;
+ };
+ export function extendWithFaceDescriptor(sourceObj: TSource, descriptor: Float32Array): WithFaceDescriptor;
+}
+declare module "factories/WithAge" {
+ export type WithAge = TSource & {
+ age: number;
+ };
+ export function isWithAge(obj: any): obj is WithAge<{}>;
+ export function extendWithAge(sourceObj: TSource, age: number): WithAge;
+}
+declare module "factories/WithGender" {
+ import { Gender } from "ageGenderNet/types";
+ export type WithGender = TSource & {
+ gender: Gender;
+ genderProbability: number;
+ };
+ export function isWithGender(obj: any): obj is WithGender<{}>;
+ export function extendWithGender(sourceObj: TSource, gender: Gender, genderProbability: number): WithGender;
+}
+declare module "factories/index" {
+ export * from "factories/WithFaceDescriptor";
+ export * from "factories/WithFaceDetection";
+ export * from "factories/WithFaceExpressions";
+ export * from "factories/WithFaceLandmarks";
+ export * from "factories/WithAge";
+ export * from "factories/WithGender";
+}
+declare module "ssdMobilenetv1/types" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { ConvParams } from "common/index";
+ export type PointwiseConvParams = {
+ filters: tf.Tensor4D;
+ batch_norm_offset: tf.Tensor1D;
+ };
+ export namespace MobileNetV1 {
+ type DepthwiseConvParams = {
+ filters: tf.Tensor4D;
+ batch_norm_scale: tf.Tensor1D;
+ batch_norm_offset: tf.Tensor1D;
+ batch_norm_mean: tf.Tensor1D;
+ batch_norm_variance: tf.Tensor1D;
+ };
+ type ConvPairParams = {
+ depthwise_conv: DepthwiseConvParams;
+ pointwise_conv: PointwiseConvParams;
+ };
+ type Params = {
+ conv_0: PointwiseConvParams;
+ conv_1: ConvPairParams;
+ conv_2: ConvPairParams;
+ conv_3: ConvPairParams;
+ conv_4: ConvPairParams;
+ conv_5: ConvPairParams;
+ conv_6: ConvPairParams;
+ conv_7: ConvPairParams;
+ conv_8: ConvPairParams;
+ conv_9: ConvPairParams;
+ conv_10: ConvPairParams;
+ conv_11: ConvPairParams;
+ conv_12: ConvPairParams;
+ conv_13: ConvPairParams;
+ };
+ }
+ export type BoxPredictionParams = {
+ box_encoding_predictor: ConvParams;
+ class_predictor: ConvParams;
+ };
+ export type PredictionLayerParams = {
+ conv_0: PointwiseConvParams;
+ conv_1: PointwiseConvParams;
+ conv_2: PointwiseConvParams;
+ conv_3: PointwiseConvParams;
+ conv_4: PointwiseConvParams;
+ conv_5: PointwiseConvParams;
+ conv_6: PointwiseConvParams;
+ conv_7: PointwiseConvParams;
+ box_predictor_0: BoxPredictionParams;
+ box_predictor_1: BoxPredictionParams;
+ box_predictor_2: BoxPredictionParams;
+ box_predictor_3: BoxPredictionParams;
+ box_predictor_4: BoxPredictionParams;
+ box_predictor_5: BoxPredictionParams;
+ };
+ export type OutputLayerParams = {
+ extra_dim: tf.Tensor3D;
+ };
+ export type NetParams = {
+ mobilenetv1: MobileNetV1.Params;
+ prediction_layer: PredictionLayerParams;
+ output_layer: OutputLayerParams;
+ };
+}
+declare module "ssdMobilenetv1/extractParams" {
+ import { ParamMapping } from "common/index";
+ import { NetParams } from "ssdMobilenetv1/types";
+ export function extractParams(weights: Float32Array): {
+ params: NetParams;
+ paramMappings: ParamMapping[];
+ };
+}
+declare module "ssdMobilenetv1/extractParamsFromWeigthMap" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { ParamMapping } from "common/index";
+ import { NetParams } from "ssdMobilenetv1/types";
+ export function extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap): {
+ params: NetParams;
+ paramMappings: ParamMapping[];
+ };
+}
+declare module "ssdMobilenetv1/pointwiseConvLayer" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { PointwiseConvParams } from "ssdMobilenetv1/types";
+ export function pointwiseConvLayer(x: tf.Tensor4D, params: PointwiseConvParams, strides: [number, number]): any;
+}
+declare module "ssdMobilenetv1/mobileNetV1" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { MobileNetV1 } from "ssdMobilenetv1/types";
+ export function mobileNetV1(x: tf.Tensor4D, params: MobileNetV1.Params): any;
+}
+declare module "ssdMobilenetv1/nonMaxSuppression" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ export function nonMaxSuppression(boxes: tf.Tensor2D, scores: number[], maxOutputSize: number, iouThreshold: number, scoreThreshold: number): number[];
+}
+declare module "ssdMobilenetv1/outputLayer" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { OutputLayerParams } from "ssdMobilenetv1/types";
+ export function outputLayer(boxPredictions: tf.Tensor4D, classPredictions: tf.Tensor4D, params: OutputLayerParams): any;
+}
+declare module "ssdMobilenetv1/boxPredictionLayer" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { BoxPredictionParams } from "ssdMobilenetv1/types";
+ export function boxPredictionLayer(x: tf.Tensor4D, params: BoxPredictionParams): any;
+}
+declare module "ssdMobilenetv1/predictionLayer" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { PredictionLayerParams } from "ssdMobilenetv1/types";
+ export function predictionLayer(x: tf.Tensor4D, conv11: tf.Tensor4D, params: PredictionLayerParams): any;
+}
+declare module "ssdMobilenetv1/SsdMobilenetv1Options" {
+ export interface ISsdMobilenetv1Options {
+ minConfidence?: number;
+ maxResults?: number;
+ }
+ export class SsdMobilenetv1Options {
+ protected _name: string;
+ private _minConfidence;
+ private _maxResults;
+ constructor({ minConfidence, maxResults }?: ISsdMobilenetv1Options);
+ get minConfidence(): number;
+ get maxResults(): number;
+ }
+}
+declare module "ssdMobilenetv1/SsdMobilenetv1" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { FaceDetection } from "classes/FaceDetection";
+ import { NetInput, TNetInput } from "dom/index";
+ import { NeuralNetwork } from "NeuralNetwork";
+ import { ISsdMobilenetv1Options } from "ssdMobilenetv1/SsdMobilenetv1Options";
+ import { NetParams } from "ssdMobilenetv1/types";
+ export class SsdMobilenetv1 extends NeuralNetwork {
+ constructor();
+ forwardInput(input: NetInput): any;
+ forward(input: TNetInput): Promise;
+ locateFaces(input: TNetInput, options?: ISsdMobilenetv1Options): Promise;
+ protected getDefaultModelName(): string;
+ protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap): {
+ params: NetParams;
+ paramMappings: import("common").ParamMapping[];
+ };
+ protected extractParams(weights: Float32Array): {
+ params: NetParams;
+ paramMappings: import("common").ParamMapping[];
+ };
+ }
+}
+declare module "ssdMobilenetv1/index" {
+ import { SsdMobilenetv1 } from "ssdMobilenetv1/SsdMobilenetv1";
+ export * from "ssdMobilenetv1/SsdMobilenetv1";
+ export * from "ssdMobilenetv1/SsdMobilenetv1Options";
+ export function createSsdMobilenetv1(weights: Float32Array): SsdMobilenetv1;
+ export function createFaceDetectionNet(weights: Float32Array): SsdMobilenetv1;
+ export class FaceDetectionNet extends SsdMobilenetv1 {
+ }
+}
+declare module "tinyYolov2/const" {
+ import { Point } from "classes/index";
+ export const IOU_THRESHOLD = 0.4;
+ export const BOX_ANCHORS: Point[];
+ export const BOX_ANCHORS_SEPARABLE: Point[];
+ export const MEAN_RGB_SEPARABLE: [number, number, number];
+ export const DEFAULT_MODEL_NAME = "tiny_yolov2_model";
+ export const DEFAULT_MODEL_NAME_SEPARABLE_CONV = "tiny_yolov2_separable_conv_model";
+}
+declare module "tinyYolov2/config" {
+ import { Point } from "classes/Point";
+ export type TinyYolov2Config = {
+ withSeparableConvs: boolean;
+ iouThreshold: number;
+ anchors: Point[];
+ classes: string[];
+ meanRgb?: [number, number, number];
+ withClassScores?: boolean;
+ filterSizes?: number[];
+ isFirstLayerConv2d?: boolean;
+ };
+ export function validateConfig(config: any): void;
+}
+declare module "tinyYolov2/leaky" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ export function leaky(x: tf.Tensor4D): tf.Tensor4D;
+}
+declare module "tinyYolov2/types" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { ConvParams } from "common/index";
+ import { SeparableConvParams } from "common/types";
+ export type BatchNorm = {
+ sub: tf.Tensor1D;
+ truediv: tf.Tensor1D;
+ };
+ export type ConvWithBatchNorm = {
+ conv: ConvParams;
+ bn: BatchNorm;
+ };
+ export type MobilenetParams = {
+ conv0: SeparableConvParams | ConvParams;
+ conv1: SeparableConvParams;
+ conv2: SeparableConvParams;
+ conv3: SeparableConvParams;
+ conv4: SeparableConvParams;
+ conv5: SeparableConvParams;
+ conv6?: SeparableConvParams;
+ conv7?: SeparableConvParams;
+ conv8: ConvParams;
+ };
+ export type DefaultTinyYolov2NetParams = {
+ conv0: ConvWithBatchNorm;
+ conv1: ConvWithBatchNorm;
+ conv2: ConvWithBatchNorm;
+ conv3: ConvWithBatchNorm;
+ conv4: ConvWithBatchNorm;
+ conv5: ConvWithBatchNorm;
+ conv6: ConvWithBatchNorm;
+ conv7: ConvWithBatchNorm;
+ conv8: ConvParams;
+ };
+ export type TinyYolov2NetParams = DefaultTinyYolov2NetParams | MobilenetParams;
+}
+declare module "tinyYolov2/convWithBatchNorm" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { ConvWithBatchNorm } from "tinyYolov2/types";
+ export function convWithBatchNorm(x: tf.Tensor4D, params: ConvWithBatchNorm): tf.Tensor4D;
+}
+declare module "tinyYolov2/depthwiseSeparableConv" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { SeparableConvParams } from "common/types";
+ export function depthwiseSeparableConv(x: tf.Tensor4D, params: SeparableConvParams): tf.Tensor4D;
+}
+declare module "tinyYolov2/extractParams" {
+ import { ParamMapping } from "common/types";
+ import { TinyYolov2Config } from "tinyYolov2/config";
+ import { TinyYolov2NetParams } from "tinyYolov2/types";
+ export function extractParams(weights: Float32Array, config: TinyYolov2Config, boxEncodingSize: number, filterSizes: number[]): {
+ params: TinyYolov2NetParams;
+ paramMappings: ParamMapping[];
+ };
+}
+declare module "tinyYolov2/extractParamsFromWeigthMap" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { ParamMapping } from "common/types";
+ import { TinyYolov2Config } from "tinyYolov2/config";
+ import { TinyYolov2NetParams } from "tinyYolov2/types";
+ export function extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap, config: TinyYolov2Config): {
+ params: TinyYolov2NetParams;
+ paramMappings: ParamMapping[];
+ };
+}
+declare module "tinyYolov2/TinyYolov2Options" {
+ export enum TinyYolov2SizeType {
+ XS = 224,
+ SM = 320,
+ MD = 416,
+ LG = 608
+ }
+ export interface ITinyYolov2Options {
+ inputSize?: number;
+ scoreThreshold?: number;
+ }
+ export class TinyYolov2Options {
+ protected _name: string;
+ private _inputSize;
+ private _scoreThreshold;
+ constructor({ inputSize, scoreThreshold }?: ITinyYolov2Options);
+ get inputSize(): number;
+ get scoreThreshold(): number;
+ }
+}
+declare module "tinyYolov2/TinyYolov2Base" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { Dimensions } from "classes/Dimensions";
+ import { ObjectDetection } from "classes/ObjectDetection";
+ import { NetInput } from "dom/NetInput";
+ import { TNetInput } from "dom/types";
+ import { NeuralNetwork } from "NeuralNetwork";
+ import { TinyYolov2Config } from "tinyYolov2/config";
+ import { ITinyYolov2Options } from "tinyYolov2/TinyYolov2Options";
+ import { DefaultTinyYolov2NetParams, MobilenetParams, TinyYolov2NetParams } from "tinyYolov2/types";
+ export class TinyYolov2Base extends NeuralNetwork {
+ static DEFAULT_FILTER_SIZES: number[];
+ private _config;
+ constructor(config: TinyYolov2Config);
+ get config(): TinyYolov2Config;
+ get withClassScores(): boolean;
+ get boxEncodingSize(): number;
+ runTinyYolov2(x: tf.Tensor4D, params: DefaultTinyYolov2NetParams): tf.Tensor4D;
+ runMobilenet(x: tf.Tensor4D, params: MobilenetParams): tf.Tensor4D;
+ forwardInput(input: NetInput, inputSize: number): tf.Tensor4D;
+ forward(input: TNetInput, inputSize: number): Promise;
+ detect(input: TNetInput, forwardParams?: ITinyYolov2Options): Promise;
+ protected getDefaultModelName(): string;
+ protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap): {
+ params: TinyYolov2NetParams;
+ paramMappings: import("common").ParamMapping[];
+ };
+ protected extractParams(weights: Float32Array): {
+ params: TinyYolov2NetParams;
+ paramMappings: import("common").ParamMapping[];
+ };
+ protected extractBoxes(outputTensor: tf.Tensor4D, inputBlobDimensions: Dimensions, scoreThreshold?: number): Promise;
+ private extractPredictedClass;
+ }
+}
+declare module "tinyYolov2/TinyYolov2" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { FaceDetection, Point } from "classes/index";
+ import { ParamMapping } from "common/types";
+ import { TNetInput } from "dom/types";
+ import { TinyYolov2Base } from "tinyYolov2/TinyYolov2Base";
+ import { ITinyYolov2Options } from "tinyYolov2/TinyYolov2Options";
+ import { TinyYolov2NetParams } from "tinyYolov2/types";
+ export class TinyYolov2 extends TinyYolov2Base {
+ constructor(withSeparableConvs?: boolean);
+ get withSeparableConvs(): boolean;
+ get anchors(): Point[];
+ locateFaces(input: TNetInput, forwardParams: ITinyYolov2Options): Promise;
+ protected getDefaultModelName(): string;
+ protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap): {
+ params: TinyYolov2NetParams;
+ paramMappings: ParamMapping[];
+ };
+ }
+}
+declare module "tinyYolov2/index" {
+ import { TinyYolov2 } from "tinyYolov2/TinyYolov2";
+ export * from "tinyYolov2/TinyYolov2Options";
+ export * from "tinyYolov2/config";
+ export * from "tinyYolov2/types";
+ export { TinyYolov2 };
+ export function createTinyYolov2(weights: Float32Array, withSeparableConvs?: boolean): TinyYolov2;
+}
+declare module "tinyFaceDetector/TinyFaceDetectorOptions" {
+ import { ITinyYolov2Options, TinyYolov2Options } from "tinyYolov2/index";
+ export interface ITinyFaceDetectorOptions extends ITinyYolov2Options {
+ }
+ export class TinyFaceDetectorOptions extends TinyYolov2Options {
+ protected _name: string;
+ }
+}
+declare module "globalApi/ComposableTask" {
+ export class ComposableTask {
+ then(onfulfilled: (value: T) => T | PromiseLike): Promise;
+ run(): Promise;
+ }
+}
+declare module "globalApi/extractFacesAndComputeResults" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { FaceDetection } from "classes/FaceDetection";
+ import { TNetInput } from "dom/index";
+ import { WithFaceDetection } from "factories/WithFaceDetection";
+ import { WithFaceLandmarks } from "factories/WithFaceLandmarks";
+ export function extractAllFacesAndComputeResults, TResult>(parentResults: TSource[], input: TNetInput, computeResults: (faces: Array) => Promise, extractedFaces?: Array | null, getRectForAlignment?: (parentResult: WithFaceLandmarks) => FaceDetection): Promise;
+ export function extractSingleFaceAndComputeResult, TResult>(parentResult: TSource, input: TNetInput, computeResult: (face: HTMLCanvasElement | tf.Tensor3D) => Promise, extractedFaces?: Array | null, getRectForAlignment?: (parentResult: WithFaceLandmarks) => FaceDetection): Promise;
+}
+declare module "tinyFaceDetector/const" {
+ import { Point } from "classes/index";
+ export const IOU_THRESHOLD = 0.4;
+ export const BOX_ANCHORS: Point[];
+ export const MEAN_RGB: [number, number, number];
+}
+declare module "tinyFaceDetector/TinyFaceDetector" {
+ import * as tf from '../../dist/tfjs.esm.js';
+ import { FaceDetection, Point } from "classes/index";
+ import { ParamMapping } from "common/index";
+ import { TNetInput } from "dom/index";
+ import { ITinyYolov2Options } from "tinyYolov2/index";
+ import { TinyYolov2Base } from "tinyYolov2/TinyYolov2Base";
+ import { TinyYolov2NetParams } from "tinyYolov2/types";
+ export class TinyFaceDetector extends TinyYolov2Base {
+ constructor();
+ get anchors(): Point[];
+ locateFaces(input: TNetInput, forwardParams: ITinyYolov2Options): Promise;
+ protected getDefaultModelName(): string;
+ protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap): {
+ params: TinyYolov2NetParams;
+ paramMappings: ParamMapping[];
+ };
+ }
+}
+declare module "globalApi/nets" {
+ import { AgeGenderNet } from "ageGenderNet/AgeGenderNet";
+ import { AgeAndGenderPrediction } from "ageGenderNet/types";
+ import { FaceDetection } from "classes/FaceDetection";
+ import { FaceLandmarks68 } from "classes/FaceLandmarks68";
+ import { TNetInput } from "dom/index";
+ import { FaceExpressionNet } from "faceExpressionNet/FaceExpressionNet";
+ import { FaceExpressions } from "faceExpressionNet/FaceExpressions";
+ import { FaceLandmark68Net } from "faceLandmarkNet/FaceLandmark68Net";
+ import { FaceLandmark68TinyNet } from "faceLandmarkNet/FaceLandmark68TinyNet";
+ import { FaceRecognitionNet } from "faceRecognitionNet/FaceRecognitionNet";
+ import { SsdMobilenetv1 } from "ssdMobilenetv1/SsdMobilenetv1";
+ import { SsdMobilenetv1Options } from "ssdMobilenetv1/SsdMobilenetv1Options";
+ import { TinyFaceDetector } from "tinyFaceDetector/TinyFaceDetector";
+ import { TinyFaceDetectorOptions } from "tinyFaceDetector/TinyFaceDetectorOptions";
+ import { ITinyYolov2Options, TinyYolov2 } from "tinyYolov2/index";
+ export const nets: {
+ ssdMobilenetv1: SsdMobilenetv1;
+ tinyFaceDetector: TinyFaceDetector;
+ tinyYolov2: TinyYolov2;
+ faceLandmark68Net: FaceLandmark68Net;
+ faceLandmark68TinyNet: FaceLandmark68TinyNet;
+ faceRecognitionNet: FaceRecognitionNet;
+ faceExpressionNet: FaceExpressionNet;
+ ageGenderNet: AgeGenderNet;
+ };
+ /**
+ * Attempts to detect all faces in an image using SSD Mobilenetv1 Network.
+ *
+ * @param input The input image.
+ * @param options (optional, default: see SsdMobilenetv1Options constructor for default parameters).
+ * @returns Bounding box of each face with score.
+ */
+ export const ssdMobilenetv1: (input: TNetInput, options: SsdMobilenetv1Options) => Promise;
+ /**
+ * Attempts to detect all faces in an image using the Tiny Face Detector.
+ *
+ * @param input The input image.
+ * @param options (optional, default: see TinyFaceDetectorOptions constructor for default parameters).
+ * @returns Bounding box of each face with score.
+ */
+ export const tinyFaceDetector: (input: TNetInput, options: TinyFaceDetectorOptions) => Promise;
+ /**
+ * Attempts to detect all faces in an image using the Tiny Yolov2 Network.
+ *
+ * @param input The input image.
+ * @param options (optional, default: see TinyYolov2Options constructor for default parameters).
+ * @returns Bounding box of each face with score.
+ */
+ export const tinyYolov2: (input: TNetInput, options: ITinyYolov2Options) => Promise;
+ /**
+ * Detects the 68 point face landmark positions of the face shown in an image.
+ *
+ * @param inputs The face image extracted from the bounding box of a face. Can
+ * also be an array of input images, which will be batch processed.
+ * @returns 68 point face landmarks or array thereof in case of batch input.
+ */
+ export const detectFaceLandmarks: (input: TNetInput) => Promise;
+ /**
+ * Detects the 68 point face landmark positions of the face shown in an image
+ * using a tinier version of the 68 point face landmark model, which is slightly
+ * faster at inference, but also slightly less accurate.
+ *
+ * @param inputs The face image extracted from the bounding box of a face. Can
+ * also be an array of input images, which will be batch processed.
+ * @returns 68 point face landmarks or array thereof in case of batch input.
+ */
+ export const detectFaceLandmarksTiny: (input: TNetInput) => Promise;
+ /**
+ * Computes a 128 entry vector (face descriptor / face embeddings) from the face shown in an image,
+ * which uniquely represents the features of that persons face. The computed face descriptor can
+ * be used to measure the similarity between faces, by computing the euclidean distance of two
+ * face descriptors.
+ *
+ * @param inputs The face image extracted from the aligned bounding box of a face. Can
+ * also be an array of input images, which will be batch processed.
+ * @returns Face descriptor with 128 entries or array thereof in case of batch input.
+ */
+ export const computeFaceDescriptor: (input: TNetInput) => Promise;
+ /**
+ * Recognizes the facial expressions from a face image.
+ *
+ * @param inputs The face image extracted from the bounding box of a face. Can
+ * also be an array of input images, which will be batch processed.
+ * @returns Facial expressions with corresponding probabilities or array thereof in case of batch input.
+ */
+ export const recognizeFaceExpressions: (input: TNetInput) => Promise;
+ /**
+ * Predicts age and gender from a face image.
+ *
+ * @param inputs The face image extracted from the bounding box of a face. Can
+ * also be an array of input images, which will be batch processed.
+ * @returns Predictions with age, gender and gender probability or array thereof in case of batch input.
+ */
+ export const predictAgeAndGender: (input: TNetInput) => Promise;
+ export const loadSsdMobilenetv1Model: (url: string) => Promise;
+ export const loadTinyFaceDetectorModel: (url: string) => Promise;
+ export const loadTinyYolov2Model: (url: string) => Promise;
+ export const loadFaceLandmarkModel: (url: string) => Promise;
+ export const loadFaceLandmarkTinyModel: (url: string) => Promise;
+ export const loadFaceRecognitionModel: (url: string) => Promise;
+ export const loadFaceExpressionModel: (url: string) => Promise;
+ export const loadAgeGenderModel: (url: string) => Promise;
+ export const loadFaceDetectionModel: (url: string) => Promise;
+ export const locateFaces: (input: TNetInput, options: SsdMobilenetv1Options) => Promise;
+ export const detectLandmarks: (input: TNetInput) => Promise;
+}
+declare module "globalApi/PredictFaceExpressionsTask" {
+ import { TNetInput } from "dom/index";
+ import { WithFaceDetection } from "factories/WithFaceDetection";
+ import { WithFaceExpressions } from "factories/WithFaceExpressions";
+ import { WithFaceLandmarks } from "factories/WithFaceLandmarks";
+ import { ComposableTask } from "globalApi/ComposableTask";
+ import { ComputeAllFaceDescriptorsTask, ComputeSingleFaceDescriptorTask } from "globalApi/ComputeFaceDescriptorsTasks";
+ import { PredictAllAgeAndGenderTask, PredictAllAgeAndGenderWithFaceAlignmentTask, PredictSingleAgeAndGenderTask, PredictSingleAgeAndGenderWithFaceAlignmentTask } from "globalApi/PredictAgeAndGenderTask";
+ export class PredictFaceExpressionsTaskBase extends ComposableTask {
+ protected parentTask: ComposableTask | Promise;
+ protected input: TNetInput;
+ protected extractedFaces?: any[] | undefined;
+ constructor(parentTask: ComposableTask | Promise, input: TNetInput, extractedFaces?: any[] | undefined);
+ }
+ export class PredictAllFaceExpressionsTask> extends PredictFaceExpressionsTaskBase[], TSource[]> {
+ run(): Promise[]>;
+ withAgeAndGender(): PredictAllAgeAndGenderTask>;
+ }
+ export class PredictSingleFaceExpressionsTask> extends PredictFaceExpressionsTaskBase | undefined, TSource | undefined> {
+ run(): Promise | undefined>;
+ withAgeAndGender(): PredictSingleAgeAndGenderTask>;
+ }
+ export class PredictAllFaceExpressionsWithFaceAlignmentTask>> extends PredictAllFaceExpressionsTask {
+ withAgeAndGender(): PredictAllAgeAndGenderWithFaceAlignmentTask>;
+ withFaceDescriptors(): ComputeAllFaceDescriptorsTask>;
+ }
+ export class PredictSingleFaceExpressionsWithFaceAlignmentTask>> extends PredictSingleFaceExpressionsTask {
+ withAgeAndGender(): PredictSingleAgeAndGenderWithFaceAlignmentTask>;
+ withFaceDescriptor(): ComputeSingleFaceDescriptorTask>;
+ }
+}
+declare module "globalApi/PredictAgeAndGenderTask" {
+ import { TNetInput } from "dom/index";
+ import { WithAge } from "factories/WithAge";
+ import { WithFaceDetection } from "factories/WithFaceDetection";
+ import { WithFaceLandmarks } from "factories/WithFaceLandmarks";
+ import { WithGender } from "factories/WithGender";
+ import { ComposableTask } from "globalApi/ComposableTask";
+ import { ComputeAllFaceDescriptorsTask, ComputeSingleFaceDescriptorTask } from "globalApi/ComputeFaceDescriptorsTasks";
+ import { PredictAllFaceExpressionsTask, PredictAllFaceExpressionsWithFaceAlignmentTask, PredictSingleFaceExpressionsTask, PredictSingleFaceExpressionsWithFaceAlignmentTask } from "globalApi/PredictFaceExpressionsTask";
+ export class PredictAgeAndGenderTaskBase extends ComposableTask {
+ protected parentTask: ComposableTask | Promise;
+ protected input: TNetInput;
+ protected extractedFaces?: any[] | undefined;
+ constructor(parentTask: ComposableTask | Promise, input: TNetInput, extractedFaces?: any[] | undefined);
+ }
+ export class PredictAllAgeAndGenderTask> extends PredictAgeAndGenderTaskBase>[], TSource[]> {
+ run(): Promise>[]>;
+ withFaceExpressions(): PredictAllFaceExpressionsTask>>;
+ }
+ export class PredictSingleAgeAndGenderTask> extends PredictAgeAndGenderTaskBase> | undefined, TSource | undefined> {
+ run(): Promise> | undefined>;
+ withFaceExpressions(): PredictSingleFaceExpressionsTask>>;
+ }
+ export class PredictAllAgeAndGenderWithFaceAlignmentTask>> extends PredictAllAgeAndGenderTask {
+ withFaceExpressions(): PredictAllFaceExpressionsWithFaceAlignmentTask>>;
+ withFaceDescriptors(): ComputeAllFaceDescriptorsTask>>;
+ }
+ export class PredictSingleAgeAndGenderWithFaceAlignmentTask>> extends PredictSingleAgeAndGenderTask {
+ withFaceExpressions(): PredictSingleFaceExpressionsWithFaceAlignmentTask>>;
+ withFaceDescriptor(): ComputeSingleFaceDescriptorTask>>;
+ }
+}
+declare module "globalApi/ComputeFaceDescriptorsTasks" {
+ import { TNetInput } from "dom/index";
+ import { WithFaceDescriptor } from "factories/WithFaceDescriptor";
+ import { WithFaceDetection } from "factories/WithFaceDetection";
+ import { WithFaceLandmarks } from "factories/WithFaceLandmarks";
+ import { ComposableTask } from "globalApi/ComposableTask";
+ import { PredictAllAgeAndGenderWithFaceAlignmentTask, PredictSingleAgeAndGenderWithFaceAlignmentTask } from "globalApi/PredictAgeAndGenderTask";
+ import { PredictAllFaceExpressionsWithFaceAlignmentTask, PredictSingleFaceExpressionsWithFaceAlignmentTask } from "globalApi/PredictFaceExpressionsTask";
+ export class ComputeFaceDescriptorsTaskBase extends ComposableTask {
+ protected parentTask: ComposableTask | Promise;
+ protected input: TNetInput;
+ constructor(parentTask: ComposableTask | Promise, input: TNetInput);
+ }
+ export class ComputeAllFaceDescriptorsTask>> extends ComputeFaceDescriptorsTaskBase[], TSource[]> {
+ run(): Promise[]>;
+ withFaceExpressions(): PredictAllFaceExpressionsWithFaceAlignmentTask>;
+ withAgeAndGender(): PredictAllAgeAndGenderWithFaceAlignmentTask>;
+ }
+ export class ComputeSingleFaceDescriptorTask>> extends ComputeFaceDescriptorsTaskBase | undefined, TSource | undefined> {
+ run(): Promise | undefined>;
+ withFaceExpressions(): PredictSingleFaceExpressionsWithFaceAlignmentTask>;
+ withAgeAndGender(): PredictSingleAgeAndGenderWithFaceAlignmentTask>;
+ }
+}
+declare module "globalApi/DetectFaceLandmarksTasks" {
+ import { FaceLandmarks68 } from "classes/FaceLandmarks68";
+ import { TNetInput } from "dom/index";
+ import { FaceLandmark68Net } from "faceLandmarkNet/FaceLandmark68Net";
+ import { FaceLandmark68TinyNet } from "faceLandmarkNet/FaceLandmark68TinyNet";
+ import { WithFaceDetection } from "factories/WithFaceDetection";
+ import { WithFaceLandmarks } from "factories/WithFaceLandmarks";
+ import { ComposableTask } from "globalApi/ComposableTask";
+ import { ComputeAllFaceDescriptorsTask, ComputeSingleFaceDescriptorTask } from "globalApi/ComputeFaceDescriptorsTasks";
+ import { PredictAllAgeAndGenderWithFaceAlignmentTask, PredictSingleAgeAndGenderWithFaceAlignmentTask } from "globalApi/PredictAgeAndGenderTask";
+ import { PredictAllFaceExpressionsWithFaceAlignmentTask, PredictSingleFaceExpressionsWithFaceAlignmentTask } from "globalApi/PredictFaceExpressionsTask";
+ export class DetectFaceLandmarksTaskBase extends ComposableTask {
+ protected parentTask: ComposableTask | Promise;
+ protected input: TNetInput;
+ protected useTinyLandmarkNet: boolean;
+ constructor(parentTask: ComposableTask | Promise, input: TNetInput, useTinyLandmarkNet: boolean);
+ protected get landmarkNet(): FaceLandmark68Net | FaceLandmark68TinyNet;
+ }
+ export class DetectAllFaceLandmarksTask> extends DetectFaceLandmarksTaskBase[], TSource[]> {
+ run(): Promise[]>;
+ withFaceExpressions(): PredictAllFaceExpressionsWithFaceAlignmentTask>;
+ withAgeAndGender(): PredictAllAgeAndGenderWithFaceAlignmentTask>;
+ withFaceDescriptors(): ComputeAllFaceDescriptorsTask>;
+ }
+ export class DetectSingleFaceLandmarksTask> extends DetectFaceLandmarksTaskBase | undefined, TSource | undefined> {
+ run(): Promise | undefined>;
+ withFaceExpressions(): PredictSingleFaceExpressionsWithFaceAlignmentTask>;
+ withAgeAndGender(): PredictSingleAgeAndGenderWithFaceAlignmentTask>;
+ withFaceDescriptor(): ComputeSingleFaceDescriptorTask>;
+ }
+}
+declare module "globalApi/types" {
+ import { FaceDetection } from "classes/FaceDetection";
+ import { TNetInput } from "dom/index";
+ import { SsdMobilenetv1Options } from "ssdMobilenetv1/SsdMobilenetv1Options";
+ import { TinyFaceDetectorOptions } from "tinyFaceDetector/TinyFaceDetectorOptions";
+ import { TinyYolov2Options } from "tinyYolov2/index";
+ export type FaceDetectionOptions = TinyFaceDetectorOptions | SsdMobilenetv1Options | TinyYolov2Options;
+ export type FaceDetectionFunction = (input: TNetInput) => Promise;
+}
+declare module "globalApi/DetectFacesTasks" {
+ import { FaceDetection } from "classes/FaceDetection";
+ import { TNetInput } from "dom/index";
+ import { ComposableTask } from "globalApi/ComposableTask";
+ import { DetectAllFaceLandmarksTask, DetectSingleFaceLandmarksTask } from "globalApi/DetectFaceLandmarksTasks";
+ import { PredictAllAgeAndGenderTask, PredictSingleAgeAndGenderTask } from "globalApi/PredictAgeAndGenderTask";
+ import { PredictAllFaceExpressionsTask, PredictSingleFaceExpressionsTask } from "globalApi/PredictFaceExpressionsTask";
+ import { FaceDetectionOptions } from "globalApi/types";
+ export class DetectFacesTaskBase extends ComposableTask {
+ protected input: TNetInput;
+ protected options: FaceDetectionOptions;
+ constructor(input: TNetInput, options?: FaceDetectionOptions);
+ }
+ export class DetectAllFacesTask extends DetectFacesTaskBase {
+ run(): Promise;
+ private runAndExtendWithFaceDetections;
+ withFaceLandmarks(useTinyLandmarkNet?: boolean): DetectAllFaceLandmarksTask<{
+ detection: FaceDetection;
+ }>;
+ withFaceExpressions(): PredictAllFaceExpressionsTask<{
+ detection: FaceDetection;
+ }>;
+ withAgeAndGender(): PredictAllAgeAndGenderTask<{
+ detection: FaceDetection;
+ }>;
+ }
+ export class DetectSingleFaceTask extends DetectFacesTaskBase {
+ run(): Promise;
+ private runAndExtendWithFaceDetection;
+ withFaceLandmarks(useTinyLandmarkNet?: boolean): DetectSingleFaceLandmarksTask<{
+ detection: FaceDetection;
+ }>;
+ withFaceExpressions(): PredictSingleFaceExpressionsTask<{
+ detection: FaceDetection;
+ }>;
+ withAgeAndGender(): PredictSingleAgeAndGenderTask<{
+ detection: FaceDetection;
+ }>;
+ }
+}
+declare module "globalApi/detectFaces" {
+ import { TNetInput } from "dom/index";
+ import { DetectAllFacesTask, DetectSingleFaceTask } from "globalApi/DetectFacesTasks";
+ import { FaceDetectionOptions } from "globalApi/types";
+ export function detectSingleFace(input: TNetInput, options?: FaceDetectionOptions): DetectSingleFaceTask;
+ export function detectAllFaces(input: TNetInput, options?: FaceDetectionOptions): DetectAllFacesTask;
+}
+declare module "globalApi/allFaces" {
+ import { TNetInput } from "dom/index";
+ import { WithFaceDescriptor, WithFaceDetection, WithFaceLandmarks } from "factories/index";
+ import { ITinyYolov2Options } from "tinyYolov2/index";
+ export function allFacesSsdMobilenetv1(input: TNetInput, minConfidence?: number): Promise>>[]>;
+ export function allFacesTinyYolov2(input: TNetInput, forwardParams?: ITinyYolov2Options): Promise>>[]>;
+ export const allFaces: typeof allFacesSsdMobilenetv1;
+}
+declare module "euclideanDistance" {
+ export function euclideanDistance(arr1: number[] | Float32Array, arr2: number[] | Float32Array): number;
+}
+declare module "globalApi/FaceMatcher" {
+ import { FaceMatch } from "classes/FaceMatch";
+ import { LabeledFaceDescriptors } from "classes/LabeledFaceDescriptors";
+ import { WithFaceDescriptor } from "factories/index";
+ export class FaceMatcher {
+ private _labeledDescriptors;
+ private _distanceThreshold;
+ constructor(inputs: LabeledFaceDescriptors | WithFaceDescriptor | Float32Array | Array | Float32Array>, distanceThreshold?: number);
+ get labeledDescriptors(): LabeledFaceDescriptors[];
+ get distanceThreshold(): number;
+ computeMeanDistance(queryDescriptor: Float32Array, descriptors: Float32Array[]): number;
+ matchDescriptor(queryDescriptor: Float32Array): FaceMatch;
+ findBestMatch(queryDescriptor: Float32Array): FaceMatch;
+ toJSON(): any;
+ static fromJSON(json: any): FaceMatcher;
+ }
+}
+declare module "globalApi/index" {
+ export * from "globalApi/allFaces";
+ export * from "globalApi/ComposableTask";
+ export * from "globalApi/ComputeFaceDescriptorsTasks";
+ export * from "globalApi/detectFaces";
+ export * from "globalApi/DetectFacesTasks";
+ export * from "globalApi/DetectFaceLandmarksTasks";
+ export * from "globalApi/FaceMatcher";
+ export * from "globalApi/nets";
+ export * from "globalApi/types";
+}
+declare module "tinyFaceDetector/index" {
+ import { TinyFaceDetector } from "tinyFaceDetector/TinyFaceDetector";
+ export * from "tinyFaceDetector/TinyFaceDetector";
+ export * from "tinyFaceDetector/TinyFaceDetectorOptions";
+ export function createTinyFaceDetector(weights: Float32Array): TinyFaceDetector;
+}
+declare module "resizeResults" {
+ import { IDimensions } from "classes/index";
+ export function resizeResults(results: T, dimensions: IDimensions): T;
+}
+declare module "index" {
+ import * as tf from '../dist/tfjs.esm.js';
+ import * as draw from "draw/index";
+ import * as utils from "utils/index";
+ export { tf, draw, utils };
+ export * from "ageGenderNet/index";
+ export * from "classes/index";
+ export * from "dom/index";
+ export * from "env/index";
+ export * from "faceExpressionNet/index";
+ export * from "faceLandmarkNet/index";
+ export * from "faceRecognitionNet/index";
+ export * from "factories/index";
+ export * from "globalApi/index";
+ export * from "ops/index";
+ export * from "ssdMobilenetv1/index";
+ export * from "tinyFaceDetector/index";
+ export * from "tinyYolov2/index";
+ export * from "euclideanDistance";
+ export * from "NeuralNetwork";
+ export * from "resizeResults";
+ export const version: {
+ faceapi: string;
+ node: boolean;
+ browser: boolean;
+ };
+}
diff --git a/dist/face-api.esm-nobundle.js b/dist/face-api.esm-nobundle.js
index 516bfaf..311c30a 100644
--- a/dist/face-api.esm-nobundle.js
+++ b/dist/face-api.esm-nobundle.js
@@ -5,5 +5,5 @@
author: '
*/
-var __create=Object.create,__defProp=Object.defineProperty,__getProtoOf=Object.getPrototypeOf,__hasOwnProp=Object.prototype.hasOwnProperty,__getOwnPropNames=Object.getOwnPropertyNames,__getOwnPropDesc=Object.getOwnPropertyDescriptor,__markAsModule=target=>__defProp(target,"__esModule",{value:!0}),__commonJS=(callback,module)=>()=>(module||(module={exports:{}},callback(module.exports,module)),module.exports),__export=(target,all)=>{__markAsModule(target);for(var name in all)__defProp(target,name,{get:all[name],enumerable:!0})},__exportStar=(target,module,desc)=>{if(__markAsModule(target),module&&typeof module=="object"||typeof module=="function")for(let key of __getOwnPropNames(module))!__hasOwnProp.call(target,key)&&key!=="default"&&__defProp(target,key,{get:()=>module[key],enumerable:!(desc=__getOwnPropDesc(module,key))||desc.enumerable});return target},__toModule=module=>module&&module.__esModule?module:__exportStar(__defProp(module!=null?__create(__getProtoOf(module)):{},"default",{value:module,enumerable:!0}),module);import*as dist_star from"@tensorflow/tfjs/dist/index.js";import*as tfjs_backend_wasm_star from"@tensorflow/tfjs-backend-wasm";var require_tfjs_esm=__commonJS(exports=>{__exportStar(exports,dist_star);__exportStar(exports,tfjs_backend_wasm_star)}),require_isNodejs=__commonJS((exports,module)=>{__export(exports,{isNodejs:()=>isNodejs3});function isNodejs3(){return typeof global=="object"&&!0&&typeof module!="undefined"&&typeof process!="undefined"&&!!process.version}}),tf42=__toModule(require_tfjs_esm()),draw_exports={};__export(draw_exports,{AnchorPosition:()=>AnchorPosition,DrawBox:()=>DrawBox,DrawBoxOptions:()=>DrawBoxOptions,DrawFaceLandmarks:()=>DrawFaceLandmarks,DrawFaceLandmarksOptions:()=>DrawFaceLandmarksOptions,DrawTextField:()=>DrawTextField,DrawTextFieldOptions:()=>DrawTextFieldOptions,drawContour:()=>drawContour,drawDetections:()=>drawDetections,drawFaceExpressions:()=>drawFaceExpressions,drawFaceLandmarks:()=>drawFaceLandmarks});function drawContour(ctx,points,isClosed=!1){if(ctx.beginPath(),points.slice(1).forEach(({x,y},prevIdx)=>{let from=points[prevIdx];ctx.moveTo(from.x,from.y),ctx.lineTo(x,y)}),isClosed){let from=points[points.length-1],to=points[0];if(!from||!to)return;ctx.moveTo(from.x,from.y),ctx.lineTo(to.x,to.y)}ctx.stroke()}var utils_exports={};__export(utils_exports,{computeReshapedDimensions:()=>computeReshapedDimensions,getCenterPoint:()=>getCenterPoint,isDimensions:()=>isDimensions,isEven:()=>isEven,isFloat:()=>isFloat,isTensor:()=>isTensor,isTensor1D:()=>isTensor1D,isTensor2D:()=>isTensor2D,isTensor3D:()=>isTensor3D,isTensor4D:()=>isTensor4D,isValidNumber:()=>isValidNumber,isValidProbablitiy:()=>isValidProbablitiy,range:()=>range,round:()=>round});var tf=__toModule(require_tfjs_esm()),Dimensions=class{constructor(width,height){if(!isValidNumber(width)||!isValidNumber(height))throw new Error(`Dimensions.constructor - expected width and height to be valid numbers, instead have ${JSON.stringify({width,height})}`);this._width=width,this._height=height}get width(){return this._width}get height(){return this._height}reverse(){return new Dimensions(1/this.width,1/this.height)}};function isTensor(tensor2,dim){return tensor2 instanceof tf.Tensor&&tensor2.shape.length===dim}function isTensor1D(tensor2){return isTensor(tensor2,1)}function isTensor2D(tensor2){return isTensor(tensor2,2)}function isTensor3D(tensor2){return isTensor(tensor2,3)}function isTensor4D(tensor2){return isTensor(tensor2,4)}function isFloat(num){return num%1!==0}function isEven(num){return num%2===0}function round(num,prec=2){let f=Math.pow(10,prec);return Math.floor(num*f)/f}function isDimensions(obj){return obj&&obj.width&&obj.height}function computeReshapedDimensions({width,height},inputSize){let scale2=inputSize/Math.max(height,width);return new Dimensions(Math.round(width*scale2),Math.round(height*scale2))}function getCenterPoint(pts){return pts.reduce((sum,pt)=>sum.add(pt),new Point(0,0)).div(new Point(pts.length,pts.length))}function range(num,start,step){return Array(num).fill(0).map((_,i)=>start+i*step)}function isValidNumber(num){return!!num&&num!==Infinity&&num!==-Infinity&&!isNaN(num)||num===0}function isValidProbablitiy(num){return isValidNumber(num)&&0<=num&&num<=1}var Point=class{constructor(x,y){this._x=x,this._y=y}get x(){return this._x}get y(){return this._y}add(pt){return new Point(this.x+pt.x,this.y+pt.y)}sub(pt){return new Point(this.x-pt.x,this.y-pt.y)}mul(pt){return new Point(this.x*pt.x,this.y*pt.y)}div(pt){return new Point(this.x/pt.x,this.y/pt.y)}abs(){return new Point(Math.abs(this.x),Math.abs(this.y))}magnitude(){return Math.sqrt(Math.pow(this.x,2)+Math.pow(this.y,2))}floor(){return new Point(Math.floor(this.x),Math.floor(this.y))}},Box=class{static isRect(rect){return!!rect&&[rect.x,rect.y,rect.width,rect.height].every(isValidNumber)}static assertIsValidBox(box,callee,allowNegativeDimensions=!1){if(!Box.isRect(box))throw new Error(`${callee} - invalid box: ${JSON.stringify(box)}, expected object with properties x, y, width, height`);if(!allowNegativeDimensions&&(box.width<0||box.height<0))throw new Error(`${callee} - width (${box.width}) and height (${box.height}) must be positive numbers`)}constructor(_box,allowNegativeDimensions=!0){let box=_box||{},isBbox=[box.left,box.top,box.right,box.bottom].every(isValidNumber),isRect=[box.x,box.y,box.width,box.height].every(isValidNumber);if(!isRect&&!isBbox)throw new Error(`Box.constructor - expected box to be IBoundingBox | IRect, instead have ${JSON.stringify(box)}`);let[x,y,width,height]=isRect?[box.x,box.y,box.width,box.height]:[box.left,box.top,box.right-box.left,box.bottom-box.top];Box.assertIsValidBox({x,y,width,height},"Box.constructor",allowNegativeDimensions),this._x=x,this._y=y,this._width=width,this._height=height}get x(){return this._x}get y(){return this._y}get width(){return this._width}get height(){return this._height}get left(){return this.x}get top(){return this.y}get right(){return this.x+this.width}get bottom(){return this.y+this.height}get area(){return this.width*this.height}get topLeft(){return new Point(this.left,this.top)}get topRight(){return new Point(this.right,this.top)}get bottomLeft(){return new Point(this.left,this.bottom)}get bottomRight(){return new Point(this.right,this.bottom)}round(){let[x,y,width,height]=[this.x,this.y,this.width,this.height].map(val=>Math.round(val));return new Box({x,y,width,height})}floor(){let[x,y,width,height]=[this.x,this.y,this.width,this.height].map(val=>Math.floor(val));return new Box({x,y,width,height})}toSquare(){let{x,y,width,height}=this,diff=Math.abs(width-height);return widthimageWidth&&(edx=-ex+imageWidth+w,ex=imageWidth),ey>imageHeight&&(edy=-ey+imageHeight+h,ey=imageHeight),x<1&&(edy=2-x,x=1),y<1&&(edy=2-y,y=1),{dy,edy,dx,edx,y,ey,x,ex,w,h}}calibrate(region){return new Box({left:this.left+region.left*this.width,top:this.top+region.top*this.height,right:this.right+region.right*this.width,bottom:this.bottom+region.bottom*this.height}).toSquare().round()}},BoundingBox=class extends Box{constructor(left,top,right,bottom,allowNegativeDimensions=!1){super({left,top,right,bottom},allowNegativeDimensions)}};var ObjectDetection=class{constructor(score,classScore,className,relativeBox,imageDims){this._imageDims=new Dimensions(imageDims.width,imageDims.height),this._score=score,this._classScore=classScore,this._className=className,this._box=new Box(relativeBox).rescale(this._imageDims)}get score(){return this._score}get classScore(){return this._classScore}get className(){return this._className}get box(){return this._box}get imageDims(){return this._imageDims}get imageWidth(){return this.imageDims.width}get imageHeight(){return this.imageDims.height}get relativeBox(){return new Box(this._box).rescale(this.imageDims.reverse())}forSize(width,height){return new ObjectDetection(this.score,this.classScore,this.className,this.relativeBox,{width,height})}},FaceDetection=class extends ObjectDetection{constructor(score,relativeBox,imageDims){super(score,score,"",relativeBox,imageDims)}forSize(width,height){let{score,relativeBox,imageDims}=super.forSize(width,height);return new FaceDetection(score,relativeBox,imageDims)}};function iou(box1,box2,isIOU=!0){let width=Math.max(0,Math.min(box1.right,box2.right)-Math.max(box1.left,box2.left)),height=Math.max(0,Math.min(box1.bottom,box2.bottom)-Math.max(box1.top,box2.top)),interSection=width*height;return isIOU?interSection/(box1.area+box2.area-interSection):interSection/Math.min(box1.area,box2.area)}function minBbox(pts){let xs=pts.map(pt=>pt.x),ys=pts.map(pt=>pt.y),minX=xs.reduce((min,x)=>xymaxmax({score,boxIndex})).sort((c1,c2)=>c1.score-c2.score).map(c=>c.boxIndex),pick=[];for(;indicesSortedByScore.length>0;){let curr=indicesSortedByScore.pop();pick.push(curr);let indices=indicesSortedByScore,outputs=[];for(let i=0;ioutputs[j]<=iouThreshold)}return pick}var tf2=__toModule(require_tfjs_esm());function normalize(x,meanRgb){return tf2.tidy(()=>{let[r,g,b]=meanRgb,avg_r=tf2.fill([...x.shape.slice(0,3),1],r,"float32"),avg_g=tf2.fill([...x.shape.slice(0,3),1],g,"float32"),avg_b=tf2.fill([...x.shape.slice(0,3),1],b,"float32"),avg_rgb=tf2.concat([avg_r,avg_g,avg_b],3);return tf2.sub(x,avg_rgb)})}var tf3=__toModule(require_tfjs_esm());function padToSquare(imgTensor,isCenterImage=!1){return tf3.tidy(()=>{let[height,width]=imgTensor.shape.slice(1);if(height===width)return imgTensor;let dimDiff=Math.abs(height-width),paddingAmount=Math.round(dimDiff*(isCenterImage?.5:1)),paddingAxis=height>width?2:1,createPaddingTensor=paddingAmount2=>{let paddingTensorShape=imgTensor.shape.slice();return paddingTensorShape[paddingAxis]=paddingAmount2,tf3.fill(paddingTensorShape,0,"float32")},paddingTensorAppend=createPaddingTensor(paddingAmount),remainingPaddingAmount=dimDiff-paddingTensorAppend.shape[paddingAxis],paddingTensorPrepend=isCenterImage&&remainingPaddingAmount?createPaddingTensor(remainingPaddingAmount):null,tensorsToStack=[paddingTensorPrepend,imgTensor,paddingTensorAppend].filter(t=>!!t).map(t=>tf3.cast(t,"float32"));return tf3.concat(tensorsToStack,paddingAxis)})}function shuffleArray(inputArray){let array=inputArray.slice();for(let i=array.length-1;i>0;i--){let j=Math.floor(Math.random()*(i+1)),x=array[i];array[i]=array[j],array[j]=x}return array}function sigmoid(x){return 1/(1+Math.exp(-x))}function inverseSigmoid(x){return Math.log(x/(1-x))}var Rect=class extends Box{constructor(x,y,width,height,allowNegativeDimensions=!1){super({x,y,width,height},allowNegativeDimensions)}},relX=.5,relY=.43,relScale=.45,FaceLandmarks=class{constructor(relativeFaceLandmarkPositions,imgDims,shift=new Point(0,0)){let{width,height}=imgDims;this._imgDims=new Dimensions(width,height),this._shift=shift,this._positions=relativeFaceLandmarkPositions.map(pt=>pt.mul(new Point(width,height)).add(shift))}get shift(){return new Point(this._shift.x,this._shift.y)}get imageWidth(){return this._imgDims.width}get imageHeight(){return this._imgDims.height}get positions(){return this._positions}get relativePositions(){return this._positions.map(pt=>pt.sub(this._shift).div(new Point(this.imageWidth,this.imageHeight)))}forSize(width,height){return new this.constructor(this.relativePositions,{width,height})}shiftBy(x,y){return new this.constructor(this.relativePositions,this._imgDims,new Point(x,y))}shiftByPoint(pt){return this.shiftBy(pt.x,pt.y)}align(detection,options={}){if(detection){let box=detection instanceof FaceDetection?detection.box.floor():new Box(detection);return this.shiftBy(box.x,box.y).align(null,options)}let{useDlibAlignment,minBoxPadding}=Object.assign({},{useDlibAlignment:!1,minBoxPadding:.2},options);return useDlibAlignment?this.alignDlib():this.alignMinBbox(minBoxPadding)}alignDlib(){let centers=this.getRefPointsForAlignment(),[leftEyeCenter,rightEyeCenter,mouthCenter]=centers,distToMouth=pt=>mouthCenter.sub(pt).magnitude(),eyeToMouthDist=(distToMouth(leftEyeCenter)+distToMouth(rightEyeCenter))/2,size=Math.floor(eyeToMouthDist/relScale),refPoint=getCenterPoint(centers),x=Math.floor(Math.max(0,refPoint.x-relX*size)),y=Math.floor(Math.max(0,refPoint.y-relY*size));return new Rect(x,y,Math.min(size,this.imageWidth+x),Math.min(size,this.imageHeight+y))}alignMinBbox(padding){let box=minBbox(this.positions);return box.pad(box.width*padding,box.height*padding)}getRefPointsForAlignment(){throw new Error("getRefPointsForAlignment not implemented by base class")}};var FaceLandmarks5=class extends FaceLandmarks{getRefPointsForAlignment(){let pts=this.positions;return[pts[0],pts[1],getCenterPoint([pts[3],pts[4]])]}};var FaceLandmarks68=class extends FaceLandmarks{getJawOutline(){return this.positions.slice(0,17)}getLeftEyeBrow(){return this.positions.slice(17,22)}getRightEyeBrow(){return this.positions.slice(22,27)}getNose(){return this.positions.slice(27,36)}getLeftEye(){return this.positions.slice(36,42)}getRightEye(){return this.positions.slice(42,48)}getMouth(){return this.positions.slice(48,68)}getRefPointsForAlignment(){return[this.getLeftEye(),this.getRightEye(),this.getMouth()].map(getCenterPoint)}};var FaceMatch=class{constructor(label,distance){this._label=label,this._distance=distance}get label(){return this._label}get distance(){return this._distance}toString(withDistance=!0){return`${this.label}${withDistance?` (${round(this.distance)})`:""}`}};var LabeledBox=class extends Box{static assertIsValidLabeledBox(box,callee){if(Box.assertIsValidBox(box,callee),!isValidNumber(box.label))throw new Error(`${callee} - expected property label (${box.label}) to be a number`)}constructor(box,label){super(box);this._label=label}get label(){return this._label}};var LabeledFaceDescriptors=class{constructor(label,descriptors){if(!(typeof label=="string"))throw new Error("LabeledFaceDescriptors - constructor expected label to be a string");if(!Array.isArray(descriptors)||descriptors.some(desc=>!(desc instanceof Float32Array)))throw new Error("LabeledFaceDescriptors - constructor expected descriptors to be an array of Float32Array");this._label=label,this._descriptors=descriptors}get label(){return this._label}get descriptors(){return this._descriptors}toJSON(){return{label:this.label,descriptors:this.descriptors.map(d=>Array.from(d))}}static fromJSON(json){let descriptors=json.descriptors.map(d=>new Float32Array(d));return new LabeledFaceDescriptors(json.label,descriptors)}};var PredictedBox=class extends LabeledBox{static assertIsValidPredictedBox(box,callee){if(LabeledBox.assertIsValidLabeledBox(box,callee),!isValidProbablitiy(box.score)||!isValidProbablitiy(box.classScore))throw new Error(`${callee} - expected properties score (${box.score}) and (${box.classScore}) to be a number between [0, 1]`)}constructor(box,label,score,classScore){super(box,label);this._score=score,this._classScore=classScore}get score(){return this._score}get classScore(){return this._classScore}};function isWithFaceDetection(obj){return obj.detection instanceof FaceDetection}function extendWithFaceDetection(sourceObj,detection){let extension={detection};return Object.assign({},sourceObj,extension)}function createBrowserEnv(){let fetch=window.fetch||function(){throw new Error("fetch - missing fetch implementation for browser environment")},readFile=function(){throw new Error("readFile - filesystem not available for browser environment")};return{Canvas:HTMLCanvasElement,CanvasRenderingContext2D,Image:HTMLImageElement,ImageData,Video:HTMLVideoElement,createCanvasElement:()=>document.createElement("canvas"),createImageElement:()=>document.createElement("img"),fetch,readFile}}function createFileSystem(fs){let requireFsError="";if(!fs)try{fs=require("fs")}catch(err){requireFsError=err.toString()}let readFile=fs?function(filePath){return new Promise((res,rej)=>{fs.readFile(filePath,function(err,buffer){return err?rej(err):res(buffer)})})}:function(){throw new Error(`readFile - failed to require fs in nodejs environment with error: ${requireFsError}`)};return{readFile}}function createNodejsEnv(){let Canvas=global.Canvas||global.HTMLCanvasElement,Image=global.Image||global.HTMLImageElement,createCanvasElement=function(){if(Canvas)return new Canvas;throw new Error("createCanvasElement - missing Canvas implementation for nodejs environment")},createImageElement=function(){if(Image)return new Image;throw new Error("createImageElement - missing Image implementation for nodejs environment")},fetch=global.fetch||function(){throw new Error("fetch - missing fetch implementation for nodejs environment")},fileSystem=createFileSystem();return{Canvas:Canvas||class{},CanvasRenderingContext2D:global.CanvasRenderingContext2D||class{},Image:Image||class{},ImageData:global.ImageData||class{},Video:global.HTMLVideoElement||class{},createCanvasElement,createImageElement,fetch,...fileSystem}}function isBrowser(){return typeof window=="object"&&typeof document!="undefined"&&typeof HTMLImageElement!="undefined"&&typeof HTMLCanvasElement!="undefined"&&typeof HTMLVideoElement!="undefined"&&typeof ImageData!="undefined"&&typeof CanvasRenderingContext2D!="undefined"}var isNodejs=__toModule(require_isNodejs()),environment;function getEnv(){if(!environment)throw new Error("getEnv - environment is not defined, check isNodejs() and isBrowser()");return environment}function setEnv(env16){environment=env16}function initialize(){if(isBrowser())return setEnv(createBrowserEnv());if(isNodejs.isNodejs())return setEnv(createNodejsEnv())}function monkeyPatch(env16){if(environment||initialize(),!environment)throw new Error("monkeyPatch - environment is not defined, check isNodejs() and isBrowser()");let{Canvas=environment.Canvas,Image=environment.Image}=env16;environment.Canvas=Canvas,environment.Image=Image,environment.createCanvasElement=env16.createCanvasElement||(()=>new Canvas),environment.createImageElement=env16.createImageElement||(()=>new Image),environment.ImageData=env16.ImageData||environment.ImageData,environment.Video=env16.Video||environment.Video,environment.fetch=env16.fetch||environment.fetch,environment.readFile=env16.readFile||environment.readFile}var env={getEnv,setEnv,initialize,createBrowserEnv,createFileSystem,createNodejsEnv,monkeyPatch,isBrowser,isNodejs:isNodejs.isNodejs};initialize();function resolveInput(arg){return!env.isNodejs()&&typeof arg=="string"?document.getElementById(arg):arg}function getContext2dOrThrow(canvasArg){let{Canvas,CanvasRenderingContext2D:CanvasRenderingContext2D2}=env.getEnv();if(canvasArg instanceof CanvasRenderingContext2D2)return canvasArg;let canvas=resolveInput(canvasArg);if(!(canvas instanceof Canvas))throw new Error("resolveContext2d - expected canvas to be of instance of Canvas");let ctx=canvas.getContext("2d");if(!ctx)throw new Error("resolveContext2d - canvas 2d context is null");return ctx}var AnchorPosition;(function(AnchorPosition2){AnchorPosition2.TOP_LEFT="TOP_LEFT",AnchorPosition2.TOP_RIGHT="TOP_RIGHT",AnchorPosition2.BOTTOM_LEFT="BOTTOM_LEFT",AnchorPosition2.BOTTOM_RIGHT="BOTTOM_RIGHT"})(AnchorPosition||(AnchorPosition={}));var DrawTextFieldOptions=class{constructor(options={}){let{anchorPosition,backgroundColor,fontColor,fontSize,fontStyle,padding}=options;this.anchorPosition=anchorPosition||AnchorPosition.TOP_LEFT,this.backgroundColor=backgroundColor||"rgba(0, 0, 0, 0.5)",this.fontColor=fontColor||"rgba(255, 255, 255, 1)",this.fontSize=fontSize||14,this.fontStyle=fontStyle||"Georgia",this.padding=padding||4}},DrawTextField=class{constructor(text,anchor,options={}){this.text=typeof text=="string"?[text]:text instanceof DrawTextField?text.text:text,this.anchor=anchor,this.options=new DrawTextFieldOptions(options)}measureWidth(ctx){let{padding}=this.options;return this.text.map(l=>ctx.measureText(l).width).reduce((w0,w1)=>w0{let x=padding+upperLeft.x,y=padding+upperLeft.y+(i+1)*fontSize;ctx.fillText(textLine,x,y)})}},DrawBoxOptions=class{constructor(options={}){let{boxColor,lineWidth,label,drawLabelOptions}=options;this.boxColor=boxColor||"rgba(0, 0, 255, 1)",this.lineWidth=lineWidth||2,this.label=label;let defaultDrawLabelOptions={anchorPosition:AnchorPosition.BOTTOM_LEFT,backgroundColor:this.boxColor};this.drawLabelOptions=new DrawTextFieldOptions(Object.assign({},defaultDrawLabelOptions,drawLabelOptions))}},DrawBox=class{constructor(box,options={}){this.box=new Box(box),this.options=new DrawBoxOptions(options)}draw(canvasArg){let ctx=getContext2dOrThrow(canvasArg),{boxColor,lineWidth}=this.options,{x,y,width,height}=this.box;ctx.strokeStyle=boxColor,ctx.lineWidth=lineWidth,ctx.strokeRect(x,y,width,height);let{label}=this.options;label&&new DrawTextField([label],{x:x-lineWidth/2,y},this.options.drawLabelOptions).draw(canvasArg)}};function drawDetections(canvasArg,detections){let detectionsArray=Array.isArray(detections)?detections:[detections];detectionsArray.forEach(det=>{let score=det instanceof FaceDetection?det.score:isWithFaceDetection(det)?det.detection.score:void 0,box=det instanceof FaceDetection?det.box:isWithFaceDetection(det)?det.detection.box:new Box(det),label=score?`${round(score)}`:void 0;new DrawBox(box,{label}).draw(canvasArg)})}var tf18=__toModule(require_tfjs_esm());function isMediaLoaded(media){let{Image,Video}=env.getEnv();return media instanceof Image&&media.complete||media instanceof Video&&media.readyState>=3}function awaitMediaLoaded(media){return new Promise((resolve,reject)=>{if(media instanceof env.getEnv().Canvas||isMediaLoaded(media))return resolve(null);function onLoad(e){if(!e.currentTarget)return;e.currentTarget.removeEventListener("load",onLoad),e.currentTarget.removeEventListener("error",onError),resolve(e)}function onError(e){if(!e.currentTarget)return;e.currentTarget.removeEventListener("load",onLoad),e.currentTarget.removeEventListener("error",onError),reject(e)}media.addEventListener("load",onLoad),media.addEventListener("error",onError)})}function bufferToImage(buf){return new Promise((resolve,reject)=>{if(!(buf instanceof Blob))return reject("bufferToImage - expected buf to be of type: Blob");let reader=new FileReader;reader.onload=()=>{if(typeof reader.result!="string")return reject("bufferToImage - expected reader.result to be a string, in onload");let img=env.getEnv().createImageElement();img.onload=()=>resolve(img),img.onerror=reject,img.src=reader.result},reader.onerror=reject,reader.readAsDataURL(buf)})}function getMediaDimensions(input){let{Image,Video}=env.getEnv();return input instanceof Image?new Dimensions(input.naturalWidth,input.naturalHeight):input instanceof Video?new Dimensions(input.videoWidth,input.videoHeight):new Dimensions(input.width,input.height)}function createCanvas({width,height}){let{createCanvasElement}=env.getEnv(),canvas=createCanvasElement();return canvas.width=width,canvas.height=height,canvas}function createCanvasFromMedia(media,dims){let{ImageData:ImageData2}=env.getEnv();if(!(media instanceof ImageData2)&&!isMediaLoaded(media))throw new Error("createCanvasFromMedia - media has not finished loading yet");let{width,height}=dims||getMediaDimensions(media),canvas=createCanvas({width,height});return media instanceof ImageData2?getContext2dOrThrow(canvas).putImageData(media,0,0):getContext2dOrThrow(canvas).drawImage(media,0,0,width,height),canvas}var tf4=__toModule(require_tfjs_esm());async function imageTensorToCanvas(imgTensor,canvas){let targetCanvas=canvas||env.getEnv().createCanvasElement(),[height,width,numChannels]=imgTensor.shape.slice(isTensor4D(imgTensor)?1:0),imgTensor3D=tf4.tidy(()=>imgTensor.as3D(height,width,numChannels).toInt());return await tf4.browser.toPixels(imgTensor3D,targetCanvas),imgTensor3D.dispose(),targetCanvas}function isMediaElement(input){let{Image,Canvas,Video}=env.getEnv();return input instanceof Image||input instanceof Canvas||input instanceof Video}var tf5=__toModule(require_tfjs_esm());function imageToSquare(input,inputSize,centerImage=!1){let{Image,Canvas}=env.getEnv();if(!(input instanceof Image||input instanceof Canvas))throw new Error("imageToSquare - expected arg0 to be HTMLImageElement | HTMLCanvasElement");let dims=getMediaDimensions(input),scale2=inputSize/Math.max(dims.height,dims.width),width=scale2*dims.width,height=scale2*dims.height,targetCanvas=createCanvas({width:inputSize,height:inputSize}),inputCanvas=input instanceof Canvas?input:createCanvasFromMedia(input),offset=Math.abs(width-height)/2,dx=centerImage&&width{if(isTensor3D(input)){this._imageTensors[idx]=input,this._inputDimensions[idx]=input.shape;return}if(isTensor4D(input)){let batchSize=input.shape[0];if(batchSize!==1)throw new Error(`NetInput - tf.Tensor4D with batchSize ${batchSize} passed, but not supported in input array`);this._imageTensors[idx]=input,this._inputDimensions[idx]=input.shape.slice(1);return}let canvas=input instanceof env.getEnv().Canvas?input:createCanvasFromMedia(input);this._canvases[idx]=canvas,this._inputDimensions[idx]=[canvas.height,canvas.width,3]})}get imageTensors(){return this._imageTensors}get canvases(){return this._canvases}get isBatchInput(){return this.batchSize>1||this._treatAsBatchInput}get batchSize(){return this._batchSize}get inputDimensions(){return this._inputDimensions}get inputSize(){return this._inputSize}get reshapedInputDimensions(){return range(this.batchSize,0,1).map((_,batchIdx)=>this.getReshapedInputDimensions(batchIdx))}getInput(batchIdx){return this.canvases[batchIdx]||this.imageTensors[batchIdx]}getInputDimensions(batchIdx){return this._inputDimensions[batchIdx]}getInputHeight(batchIdx){return this._inputDimensions[batchIdx][0]}getInputWidth(batchIdx){return this._inputDimensions[batchIdx][1]}getReshapedInputDimensions(batchIdx){if(typeof this.inputSize!="number")throw new Error("getReshapedInputDimensions - inputSize not set, toBatchTensor has not been called yet");let width=this.getInputWidth(batchIdx),height=this.getInputHeight(batchIdx);return computeReshapedDimensions({width,height},this.inputSize)}toBatchTensor(inputSize,isCenterInputs=!0){return this._inputSize=inputSize,tf5.tidy(()=>{let inputTensors=range(this.batchSize,0,1).map(batchIdx=>{let input=this.getInput(batchIdx);if(input instanceof tf5.Tensor){let imgTensor=isTensor4D(input)?input:input.expandDims();return imgTensor=padToSquare(imgTensor,isCenterInputs),(imgTensor.shape[1]!==inputSize||imgTensor.shape[2]!==inputSize)&&(imgTensor=tf5.image.resizeBilinear(imgTensor,[inputSize,inputSize])),imgTensor.as3D(inputSize,inputSize,3)}if(input instanceof env.getEnv().Canvas)return tf5.browser.fromPixels(imageToSquare(input,inputSize,isCenterInputs));throw new Error(`toBatchTensor - at batchIdx ${batchIdx}, expected input to be instanceof tf.Tensor or instanceof HTMLCanvasElement, instead have ${input}`)}),batchTensor=tf5.stack(inputTensors.map(t=>tf5.cast(t,"float32"))).as4D(this.batchSize,inputSize,inputSize,3);return batchTensor})}};async function toNetInput(inputs){if(inputs instanceof NetInput)return inputs;let inputArgArray=Array.isArray(inputs)?inputs:[inputs];if(!inputArgArray.length)throw new Error("toNetInput - empty array passed as input");let getIdxHint=idx=>Array.isArray(inputs)?` at input index ${idx}:`:"",inputArray=inputArgArray.map(resolveInput);return inputArray.forEach((input,i)=>{if(!isMediaElement(input)&&!isTensor3D(input)&&!isTensor4D(input))throw typeof inputArgArray[i]=="string"?new Error(`toNetInput -${getIdxHint(i)} string passed, but could not resolve HTMLElement for element id ${inputArgArray[i]}`):new Error(`toNetInput -${getIdxHint(i)} expected media to be of type HTMLImageElement | HTMLVideoElement | HTMLCanvasElement | tf.Tensor3D, or to be an element id`);if(isTensor4D(input)){let batchSize=input.shape[0];if(batchSize!==1)throw new Error(`toNetInput -${getIdxHint(i)} tf.Tensor4D with batchSize ${batchSize} passed, but not supported in input array`)}}),await Promise.all(inputArray.map(input=>isMediaElement(input)&&awaitMediaLoaded(input))),new NetInput(inputArray,Array.isArray(inputs))}async function extractFaces(input,detections){let{Canvas}=env.getEnv(),canvas=input;if(!(input instanceof Canvas)){let netInput=await toNetInput(input);if(netInput.batchSize>1)throw new Error("extractFaces - batchSize > 1 not supported");let tensorOrCanvas=netInput.getInput(0);canvas=tensorOrCanvas instanceof Canvas?tensorOrCanvas:await imageTensorToCanvas(tensorOrCanvas)}let ctx=getContext2dOrThrow(canvas),boxes=detections.map(det=>det instanceof FaceDetection?det.forSize(canvas.width,canvas.height).box.floor():det).map(box=>box.clipAtImageBorders(canvas.width,canvas.height));return boxes.map(({x,y,width,height})=>{let faceImg=createCanvas({width,height});return getContext2dOrThrow(faceImg).putImageData(ctx.getImageData(x,y,width,height),0,0),faceImg})}var tf6=__toModule(require_tfjs_esm());async function extractFaceTensors(imageTensor,detections){if(!isTensor3D(imageTensor)&&!isTensor4D(imageTensor))throw new Error("extractFaceTensors - expected image tensor to be 3D or 4D");if(isTensor4D(imageTensor)&&imageTensor.shape[0]>1)throw new Error("extractFaceTensors - batchSize > 1 not supported");return tf6.tidy(()=>{let[imgHeight,imgWidth,numChannels]=imageTensor.shape.slice(isTensor4D(imageTensor)?1:0),boxes=detections.map(det=>det instanceof FaceDetection?det.forSize(imgWidth,imgHeight).box:det).map(box=>box.clipAtImageBorders(imgWidth,imgHeight)),faceTensors=boxes.map(({x,y,width,height})=>tf6.slice3d(imageTensor.as3D(imgHeight,imgWidth,numChannels),[y,x,0],[height,width,numChannels]));return faceTensors})}async function fetchOrThrow(url,init){let fetch=env.getEnv().fetch,res=await fetch(url,init);if(!(res.status<400))throw new Error(`failed to fetch: (${res.status}) ${res.statusText}, from url: ${res.url}`);return res}async function fetchImage(uri){let res=await fetchOrThrow(uri),blob=await res.blob();if(!blob.type.startsWith("image/"))throw new Error(`fetchImage - expected blob type to be of type image/*, instead have: ${blob.type}, for url: ${res.url}`);return bufferToImage(blob)}async function fetchJson(uri){return(await fetchOrThrow(uri)).json()}async function fetchNetWeights(uri){return new Float32Array(await(await fetchOrThrow(uri)).arrayBuffer())}var tf7=__toModule(require_tfjs_esm());function getModelUris(uri,defaultModelName){let defaultManifestFilename=`${defaultModelName}-weights_manifest.json`;if(!uri)return{modelBaseUri:"",manifestUri:defaultManifestFilename};if(uri==="/")return{modelBaseUri:"/",manifestUri:`/${defaultManifestFilename}`};let protocol=uri.startsWith("http://")?"http://":uri.startsWith("https://")?"https://":"";uri=uri.replace(protocol,"");let parts=uri.split("/").filter(s=>s),manifestFile=uri.endsWith(".json")?parts[parts.length-1]:defaultManifestFilename,modelBaseUri=protocol+(uri.endsWith(".json")?parts.slice(0,parts.length-1):parts).join("/");return modelBaseUri=uri.startsWith("/")?`/${modelBaseUri}`:modelBaseUri,{modelBaseUri,manifestUri:modelBaseUri==="/"?`/${manifestFile}`:`${modelBaseUri}/${manifestFile}`}}async function loadWeightMap(uri,defaultModelName){let{manifestUri,modelBaseUri}=getModelUris(uri,defaultModelName),manifest=await fetchJson(manifestUri);return tf7.io.loadWeights(manifest,modelBaseUri)}function matchDimensions(input,reference,useMediaDimensions=!1){let{width,height}=useMediaDimensions?getMediaDimensions(reference):reference;return input.width=width,input.height=height,{width,height}}var tf15=__toModule(require_tfjs_esm()),tf8=__toModule(require_tfjs_esm()),NeuralNetwork=class{constructor(_name){this._name=_name;this._params=void 0;this._paramMappings=[]}get params(){return this._params}get paramMappings(){return this._paramMappings}get isLoaded(){return!!this.params}getParamFromPath(paramPath){let{obj,objProp}=this.traversePropertyPath(paramPath);return obj[objProp]}reassignParamFromPath(paramPath,tensor2){let{obj,objProp}=this.traversePropertyPath(paramPath);obj[objProp].dispose(),obj[objProp]=tensor2}getParamList(){return this._paramMappings.map(({paramPath})=>({path:paramPath,tensor:this.getParamFromPath(paramPath)}))}getTrainableParams(){return this.getParamList().filter(param=>param.tensor instanceof tf8.Variable)}getFrozenParams(){return this.getParamList().filter(param=>!(param.tensor instanceof tf8.Variable))}variable(){this.getFrozenParams().forEach(({path,tensor:tensor2})=>{this.reassignParamFromPath(path,tensor2.variable())})}freeze(){this.getTrainableParams().forEach(({path,tensor:variable})=>{let tensor2=tf8.tensor(variable.dataSync());variable.dispose(),this.reassignParamFromPath(path,tensor2)})}dispose(throwOnRedispose=!0){this.getParamList().forEach(param=>{if(throwOnRedispose&¶m.tensor.isDisposed)throw new Error(`param tensor has already been disposed for path ${param.path}`);param.tensor.dispose()}),this._params=void 0}serializeParams(){return new Float32Array(this.getParamList().map(({tensor:tensor2})=>Array.from(tensor2.dataSync())).reduce((flat,arr)=>flat.concat(arr)))}async load(weightsOrUrl){if(weightsOrUrl instanceof Float32Array){this.extractWeights(weightsOrUrl);return}await this.loadFromUri(weightsOrUrl)}async loadFromUri(uri){if(uri&&typeof uri!="string")throw new Error(`${this._name}.loadFromUri - expected model uri`);let weightMap=await loadWeightMap(uri,this.getDefaultModelName());this.loadFromWeightMap(weightMap)}async loadFromDisk(filePath){if(filePath&&typeof filePath!="string")throw new Error(`${this._name}.loadFromDisk - expected model file path`);let{readFile}=env.getEnv(),{manifestUri,modelBaseUri}=getModelUris(filePath,this.getDefaultModelName()),fetchWeightsFromDisk=filePaths=>Promise.all(filePaths.map(filePath2=>readFile(filePath2).then(buf=>buf.buffer))),loadWeights=tf8.io.weightsLoaderFactory(fetchWeightsFromDisk),manifest=JSON.parse((await readFile(manifestUri)).toString()),weightMap=await loadWeights(manifest,modelBaseUri);this.loadFromWeightMap(weightMap)}loadFromWeightMap(weightMap){let{paramMappings,params}=this.extractParamsFromWeigthMap(weightMap);this._paramMappings=paramMappings,this._params=params}extractWeights(weights){let{paramMappings,params}=this.extractParams(weights);this._paramMappings=paramMappings,this._params=params}traversePropertyPath(paramPath){if(!this.params)throw new Error("traversePropertyPath - model has no loaded params");let result=paramPath.split("/").reduce((res,objProp2)=>{if(!res.nextObj.hasOwnProperty(objProp2))throw new Error(`traversePropertyPath - object does not have property ${objProp2}, for path ${paramPath}`);return{obj:res.nextObj,objProp:objProp2,nextObj:res.nextObj[objProp2]}},{nextObj:this.params}),{obj,objProp}=result;if(!obj||!objProp||!(obj[objProp]instanceof tf8.Tensor))throw new Error(`traversePropertyPath - parameter is not a tensor, for path ${paramPath}`);return{obj,objProp}}},tf10=__toModule(require_tfjs_esm()),tf9=__toModule(require_tfjs_esm());function depthwiseSeparableConv(x,params,stride){return tf9.tidy(()=>{let out=tf9.separableConv2d(x,params.depthwise_filter,params.pointwise_filter,stride,"same");return out=tf9.add(out,params.bias),out})}function denseBlock3(x,denseBlockParams,isFirstLayer=!1){return tf10.tidy(()=>{let out1=tf10.relu(isFirstLayer?tf10.add(tf10.conv2d(x,denseBlockParams.conv0.filters,[2,2],"same"),denseBlockParams.conv0.bias):depthwiseSeparableConv(x,denseBlockParams.conv0,[2,2])),out2=depthwiseSeparableConv(out1,denseBlockParams.conv1,[1,1]),in3=tf10.relu(tf10.add(out1,out2)),out3=depthwiseSeparableConv(in3,denseBlockParams.conv2,[1,1]);return tf10.relu(tf10.add(out1,tf10.add(out2,out3)))})}function denseBlock4(x,denseBlockParams,isFirstLayer=!1,isScaleDown=!0){return tf10.tidy(()=>{let out1=tf10.relu(isFirstLayer?tf10.add(tf10.conv2d(x,denseBlockParams.conv0.filters,isScaleDown?[2,2]:[1,1],"same"),denseBlockParams.conv0.bias):depthwiseSeparableConv(x,denseBlockParams.conv0,isScaleDown?[2,2]:[1,1])),out2=depthwiseSeparableConv(out1,denseBlockParams.conv1,[1,1]),in3=tf10.relu(tf10.add(out1,out2)),out3=depthwiseSeparableConv(in3,denseBlockParams.conv2,[1,1]),in4=tf10.relu(tf10.add(out1,tf10.add(out2,out3))),out4=depthwiseSeparableConv(in4,denseBlockParams.conv3,[1,1]);return tf10.relu(tf10.add(out1,tf10.add(out2,tf10.add(out3,out4))))})}var tf11=__toModule(require_tfjs_esm());function convLayer(x,params,padding="same",withRelu=!1){return tf11.tidy(()=>{let out=tf11.add(tf11.conv2d(x,params.filters,[1,1],padding),params.bias);return withRelu?tf11.relu(out):out})}function disposeUnusedWeightTensors(weightMap,paramMappings){Object.keys(weightMap).forEach(path=>{paramMappings.some(pm=>pm.originalPath===path)||weightMap[path].dispose()})}var tf12=__toModule(require_tfjs_esm());function extractConvParamsFactory(extractWeights,paramMappings){return function(channelsIn,channelsOut,filterSize,mappedPrefix){let filters=tf12.tensor4d(extractWeights(channelsIn*channelsOut*filterSize*filterSize),[filterSize,filterSize,channelsIn,channelsOut]),bias=tf12.tensor1d(extractWeights(channelsOut));return paramMappings.push({paramPath:`${mappedPrefix}/filters`},{paramPath:`${mappedPrefix}/bias`}),{filters,bias}}}var tf13=__toModule(require_tfjs_esm());function extractFCParamsFactory(extractWeights,paramMappings){return function(channelsIn,channelsOut,mappedPrefix){let fc_weights=tf13.tensor2d(extractWeights(channelsIn*channelsOut),[channelsIn,channelsOut]),fc_bias=tf13.tensor1d(extractWeights(channelsOut));return paramMappings.push({paramPath:`${mappedPrefix}/weights`},{paramPath:`${mappedPrefix}/bias`}),{weights:fc_weights,bias:fc_bias}}}var tf14=__toModule(require_tfjs_esm()),SeparableConvParams=class{constructor(depthwise_filter,pointwise_filter,bias){this.depthwise_filter=depthwise_filter;this.pointwise_filter=pointwise_filter;this.bias=bias}};function extractSeparableConvParamsFactory(extractWeights,paramMappings){return function(channelsIn,channelsOut,mappedPrefix){let depthwise_filter=tf14.tensor4d(extractWeights(3*3*channelsIn),[3,3,channelsIn,1]),pointwise_filter=tf14.tensor4d(extractWeights(channelsIn*channelsOut),[1,1,channelsIn,channelsOut]),bias=tf14.tensor1d(extractWeights(channelsOut));return paramMappings.push({paramPath:`${mappedPrefix}/depthwise_filter`},{paramPath:`${mappedPrefix}/pointwise_filter`},{paramPath:`${mappedPrefix}/bias`}),new SeparableConvParams(depthwise_filter,pointwise_filter,bias)}}function loadSeparableConvParamsFactory(extractWeightEntry){return function(prefix){let depthwise_filter=extractWeightEntry(`${prefix}/depthwise_filter`,4),pointwise_filter=extractWeightEntry(`${prefix}/pointwise_filter`,4),bias=extractWeightEntry(`${prefix}/bias`,1);return new SeparableConvParams(depthwise_filter,pointwise_filter,bias)}}function extractWeightEntryFactory(weightMap,paramMappings){return function(originalPath,paramRank,mappedPath){let tensor2=weightMap[originalPath];if(!isTensor(tensor2,paramRank))throw new Error(`expected weightMap[${originalPath}] to be a Tensor${paramRank}D, instead have ${tensor2}`);return paramMappings.push({originalPath,paramPath:mappedPath||originalPath}),tensor2}}function extractWeightsFactory(weights){let remainingWeights=weights;function extractWeights(numWeights){let ret=remainingWeights.slice(0,numWeights);return remainingWeights=remainingWeights.slice(numWeights),ret}function getRemainingWeights(){return remainingWeights}return{extractWeights,getRemainingWeights}}function extractorsFactory(extractWeights,paramMappings){let extractConvParams=extractConvParamsFactory(extractWeights,paramMappings),extractSeparableConvParams=extractSeparableConvParamsFactory(extractWeights,paramMappings);function extractDenseBlock3Params(channelsIn,channelsOut,mappedPrefix,isFirstLayer=!1){let conv0=isFirstLayer?extractConvParams(channelsIn,channelsOut,3,`${mappedPrefix}/conv0`):extractSeparableConvParams(channelsIn,channelsOut,`${mappedPrefix}/conv0`),conv1=extractSeparableConvParams(channelsOut,channelsOut,`${mappedPrefix}/conv1`),conv22=extractSeparableConvParams(channelsOut,channelsOut,`${mappedPrefix}/conv2`);return{conv0,conv1,conv2:conv22}}function extractDenseBlock4Params(channelsIn,channelsOut,mappedPrefix,isFirstLayer=!1){let{conv0,conv1,conv2:conv22}=extractDenseBlock3Params(channelsIn,channelsOut,mappedPrefix,isFirstLayer),conv3=extractSeparableConvParams(channelsOut,channelsOut,`${mappedPrefix}/conv3`);return{conv0,conv1,conv2:conv22,conv3}}return{extractDenseBlock3Params,extractDenseBlock4Params}}function extractParams(weights){let paramMappings=[],{extractWeights,getRemainingWeights}=extractWeightsFactory(weights),{extractDenseBlock4Params}=extractorsFactory(extractWeights,paramMappings),dense0=extractDenseBlock4Params(3,32,"dense0",!0),dense1=extractDenseBlock4Params(32,64,"dense1"),dense2=extractDenseBlock4Params(64,128,"dense2"),dense3=extractDenseBlock4Params(128,256,"dense3");if(getRemainingWeights().length!==0)throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`);return{paramMappings,params:{dense0,dense1,dense2,dense3}}}function loadConvParamsFactory(extractWeightEntry){return function(prefix){let filters=extractWeightEntry(`${prefix}/filters`,4),bias=extractWeightEntry(`${prefix}/bias`,1);return{filters,bias}}}function loadParamsFactory(weightMap,paramMappings){let extractWeightEntry=extractWeightEntryFactory(weightMap,paramMappings),extractConvParams=loadConvParamsFactory(extractWeightEntry),extractSeparableConvParams=loadSeparableConvParamsFactory(extractWeightEntry);function extractDenseBlock3Params(prefix,isFirstLayer=!1){let conv0=isFirstLayer?extractConvParams(`${prefix}/conv0`):extractSeparableConvParams(`${prefix}/conv0`),conv1=extractSeparableConvParams(`${prefix}/conv1`),conv22=extractSeparableConvParams(`${prefix}/conv2`);return{conv0,conv1,conv2:conv22}}function extractDenseBlock4Params(prefix,isFirstLayer=!1){let conv0=isFirstLayer?extractConvParams(`${prefix}/conv0`):extractSeparableConvParams(`${prefix}/conv0`),conv1=extractSeparableConvParams(`${prefix}/conv1`),conv22=extractSeparableConvParams(`${prefix}/conv2`),conv3=extractSeparableConvParams(`${prefix}/conv3`);return{conv0,conv1,conv2:conv22,conv3}}return{extractDenseBlock3Params,extractDenseBlock4Params}}function extractParamsFromWeigthMap(weightMap){let paramMappings=[],{extractDenseBlock4Params}=loadParamsFactory(weightMap,paramMappings),params={dense0:extractDenseBlock4Params("dense0",!0),dense1:extractDenseBlock4Params("dense1"),dense2:extractDenseBlock4Params("dense2"),dense3:extractDenseBlock4Params("dense3")};return disposeUnusedWeightTensors(weightMap,paramMappings),{params,paramMappings}}var FaceFeatureExtractor=class extends NeuralNetwork{constructor(){super("FaceFeatureExtractor")}forwardInput(input){let{params}=this;if(!params)throw new Error("FaceFeatureExtractor - load model before inference");return tf15.tidy(()=>{let batchTensor=tf15.cast(input.toBatchTensor(112,!0),"float32"),meanRgb=[122.782,117.001,104.298],normalized=normalize(batchTensor,meanRgb).div(tf15.scalar(255)),out=denseBlock4(normalized,params.dense0,!0);return out=denseBlock4(out,params.dense1),out=denseBlock4(out,params.dense2),out=denseBlock4(out,params.dense3),out=tf15.avgPool(out,[7,7],[2,2],"valid"),out})}async forward(input){return this.forwardInput(await toNetInput(input))}getDefaultModelName(){return"face_feature_extractor_model"}extractParamsFromWeigthMap(weightMap){return extractParamsFromWeigthMap(weightMap)}extractParams(weights){return extractParams(weights)}},tf17=__toModule(require_tfjs_esm()),tf16=__toModule(require_tfjs_esm());function fullyConnectedLayer(x,params){return tf16.tidy(()=>tf16.add(tf16.matMul(x,params.weights),params.bias))}function extractParams3(weights,channelsIn,channelsOut){let paramMappings=[],{extractWeights,getRemainingWeights}=extractWeightsFactory(weights),extractFCParams=extractFCParamsFactory(extractWeights,paramMappings),fc=extractFCParams(channelsIn,channelsOut,"fc");if(getRemainingWeights().length!==0)throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`);return{paramMappings,params:{fc}}}function extractParamsFromWeigthMap3(weightMap){let paramMappings=[],extractWeightEntry=extractWeightEntryFactory(weightMap,paramMappings);function extractFcParams(prefix){let weights=extractWeightEntry(`${prefix}/weights`,2),bias=extractWeightEntry(`${prefix}/bias`,1);return{weights,bias}}let params={fc:extractFcParams("fc")};return disposeUnusedWeightTensors(weightMap,paramMappings),{params,paramMappings}}function seperateWeightMaps(weightMap){let featureExtractorMap={},classifierMap={};return Object.keys(weightMap).forEach(key=>{let map=key.startsWith("fc")?classifierMap:featureExtractorMap;map[key]=weightMap[key]}),{featureExtractorMap,classifierMap}}var FaceProcessor=class extends NeuralNetwork{constructor(_name,faceFeatureExtractor){super(_name);this._faceFeatureExtractor=faceFeatureExtractor}get faceFeatureExtractor(){return this._faceFeatureExtractor}runNet(input){let{params}=this;if(!params)throw new Error(`${this._name} - load model before inference`);return tf17.tidy(()=>{let bottleneckFeatures=input instanceof NetInput?this.faceFeatureExtractor.forwardInput(input):input;return fullyConnectedLayer(bottleneckFeatures.as2D(bottleneckFeatures.shape[0],-1),params.fc)})}dispose(throwOnRedispose=!0){this.faceFeatureExtractor.dispose(throwOnRedispose),super.dispose(throwOnRedispose)}loadClassifierParams(weights){let{params,paramMappings}=this.extractClassifierParams(weights);this._params=params,this._paramMappings=paramMappings}extractClassifierParams(weights){return extractParams3(weights,this.getClassifierChannelsIn(),this.getClassifierChannelsOut())}extractParamsFromWeigthMap(weightMap){let{featureExtractorMap,classifierMap}=seperateWeightMaps(weightMap);return this.faceFeatureExtractor.loadFromWeightMap(featureExtractorMap),extractParamsFromWeigthMap3(classifierMap)}extractParams(weights){let cIn=this.getClassifierChannelsIn(),cOut=this.getClassifierChannelsOut(),classifierWeightSize=cOut*cIn+cOut,featureExtractorWeights=weights.slice(0,weights.length-classifierWeightSize),classifierWeights=weights.slice(weights.length-classifierWeightSize);return this.faceFeatureExtractor.extractWeights(featureExtractorWeights),this.extractClassifierParams(classifierWeights)}},FACE_EXPRESSION_LABELS=["neutral","happy","sad","angry","fearful","disgusted","surprised"],FaceExpressions=class{constructor(probabilities){if(probabilities.length!==7)throw new Error(`FaceExpressions.constructor - expected probabilities.length to be 7, have: ${probabilities.length}`);FACE_EXPRESSION_LABELS.forEach((expression,idx)=>{this[expression]=probabilities[idx]})}asSortedArray(){return FACE_EXPRESSION_LABELS.map(expression=>({expression,probability:this[expression]})).sort((e0,e1)=>e1.probability-e0.probability)}},FaceExpressionNet=class extends FaceProcessor{constructor(faceFeatureExtractor=new FaceFeatureExtractor){super("FaceExpressionNet",faceFeatureExtractor)}forwardInput(input){return tf18.tidy(()=>tf18.softmax(this.runNet(input)))}async forward(input){return this.forwardInput(await toNetInput(input))}async predictExpressions(input){let netInput=await toNetInput(input),out=await this.forwardInput(netInput),probabilitesByBatch=await Promise.all(tf18.unstack(out).map(async t=>{let data=await t.data();return t.dispose(),data}));out.dispose();let predictionsByBatch=probabilitesByBatch.map(probabilites=>new FaceExpressions(probabilites));return netInput.isBatchInput?predictionsByBatch:predictionsByBatch[0]}getDefaultModelName(){return"face_expression_model"}getClassifierChannelsIn(){return 256}getClassifierChannelsOut(){return 7}};function isWithFaceExpressions(obj){return obj.expressions instanceof FaceExpressions}function extendWithFaceExpressions(sourceObj,expressions){let extension={expressions};return Object.assign({},sourceObj,extension)}function drawFaceExpressions(canvasArg,faceExpressions,minConfidence=.1,textFieldAnchor){let faceExpressionsArray=Array.isArray(faceExpressions)?faceExpressions:[faceExpressions];faceExpressionsArray.forEach(e=>{let expr=e instanceof FaceExpressions?e:isWithFaceExpressions(e)?e.expressions:void 0;if(!expr)throw new Error("drawFaceExpressions - expected faceExpressions to be FaceExpressions | WithFaceExpressions<{}> or array thereof");let sorted=expr.asSortedArray(),resultsToDisplay=sorted.filter(expr2=>expr2.probability>minConfidence),anchor=isWithFaceDetection(e)?e.detection.box.bottomLeft:textFieldAnchor||new Point(0,0),drawTextField=new DrawTextField(resultsToDisplay.map(expr2=>`${expr2.expression} (${round(expr2.probability)})`),anchor);drawTextField.draw(canvasArg)})}function isWithFaceLandmarks(obj){return isWithFaceDetection(obj)&&obj.landmarks instanceof FaceLandmarks&&obj.unshiftedLandmarks instanceof FaceLandmarks&&obj.alignedRect instanceof FaceDetection}function extendWithFaceLandmarks(sourceObj,unshiftedLandmarks){let{box:shift}=sourceObj.detection,landmarks=unshiftedLandmarks.shiftBy(shift.x,shift.y),rect=landmarks.align(),{imageDims}=sourceObj.detection,alignedRect=new FaceDetection(sourceObj.detection.score,rect.rescale(imageDims.reverse()),imageDims),extension={landmarks,unshiftedLandmarks,alignedRect};return Object.assign({},sourceObj,extension)}var DrawFaceLandmarksOptions=class{constructor(options={}){let{drawLines=!0,drawPoints=!0,lineWidth,lineColor,pointSize,pointColor}=options;this.drawLines=drawLines,this.drawPoints=drawPoints,this.lineWidth=lineWidth||1,this.pointSize=pointSize||2,this.lineColor=lineColor||"rgba(0, 255, 255, 1)",this.pointColor=pointColor||"rgba(255, 0, 255, 1)"}},DrawFaceLandmarks=class{constructor(faceLandmarks,options={}){this.faceLandmarks=faceLandmarks,this.options=new DrawFaceLandmarksOptions(options)}draw(canvasArg){let ctx=getContext2dOrThrow(canvasArg),{drawLines,drawPoints,lineWidth,lineColor,pointSize,pointColor}=this.options;if(drawLines&&this.faceLandmarks instanceof FaceLandmarks68&&(ctx.strokeStyle=lineColor,ctx.lineWidth=lineWidth,drawContour(ctx,this.faceLandmarks.getJawOutline()),drawContour(ctx,this.faceLandmarks.getLeftEyeBrow()),drawContour(ctx,this.faceLandmarks.getRightEyeBrow()),drawContour(ctx,this.faceLandmarks.getNose()),drawContour(ctx,this.faceLandmarks.getLeftEye(),!0),drawContour(ctx,this.faceLandmarks.getRightEye(),!0),drawContour(ctx,this.faceLandmarks.getMouth(),!0)),drawPoints){ctx.strokeStyle=pointColor,ctx.fillStyle=pointColor;let drawPoint=pt=>{ctx.beginPath(),ctx.arc(pt.x,pt.y,pointSize,0,2*Math.PI),ctx.fill()};this.faceLandmarks.positions.forEach(drawPoint)}}};function drawFaceLandmarks(canvasArg,faceLandmarks){let faceLandmarksArray=Array.isArray(faceLandmarks)?faceLandmarks:[faceLandmarks];faceLandmarksArray.forEach(f=>{let landmarks=f instanceof FaceLandmarks?f:isWithFaceLandmarks(f)?f.landmarks:void 0;if(!landmarks)throw new Error("drawFaceLandmarks - expected faceExpressions to be FaceLandmarks | WithFaceLandmarks> or array thereof");new DrawFaceLandmarks(landmarks).draw(canvasArg)})}var tf20=__toModule(require_tfjs_esm()),tf19=__toModule(require_tfjs_esm());function extractorsFactory3(extractWeights,paramMappings){let extractConvParams=extractConvParamsFactory(extractWeights,paramMappings),extractSeparableConvParams=extractSeparableConvParamsFactory(extractWeights,paramMappings);function extractReductionBlockParams(channelsIn,channelsOut,mappedPrefix){let separable_conv0=extractSeparableConvParams(channelsIn,channelsOut,`${mappedPrefix}/separable_conv0`),separable_conv1=extractSeparableConvParams(channelsOut,channelsOut,`${mappedPrefix}/separable_conv1`),expansion_conv=extractConvParams(channelsIn,channelsOut,1,`${mappedPrefix}/expansion_conv`);return{separable_conv0,separable_conv1,expansion_conv}}function extractMainBlockParams(channels,mappedPrefix){let separable_conv0=extractSeparableConvParams(channels,channels,`${mappedPrefix}/separable_conv0`),separable_conv1=extractSeparableConvParams(channels,channels,`${mappedPrefix}/separable_conv1`),separable_conv2=extractSeparableConvParams(channels,channels,`${mappedPrefix}/separable_conv2`);return{separable_conv0,separable_conv1,separable_conv2}}return{extractConvParams,extractSeparableConvParams,extractReductionBlockParams,extractMainBlockParams}}function extractParams5(weights,numMainBlocks){let paramMappings=[],{extractWeights,getRemainingWeights}=extractWeightsFactory(weights),{extractConvParams,extractSeparableConvParams,extractReductionBlockParams,extractMainBlockParams}=extractorsFactory3(extractWeights,paramMappings),entry_flow_conv_in=extractConvParams(3,32,3,"entry_flow/conv_in"),entry_flow_reduction_block_0=extractReductionBlockParams(32,64,"entry_flow/reduction_block_0"),entry_flow_reduction_block_1=extractReductionBlockParams(64,128,"entry_flow/reduction_block_1"),entry_flow={conv_in:entry_flow_conv_in,reduction_block_0:entry_flow_reduction_block_0,reduction_block_1:entry_flow_reduction_block_1},middle_flow={};range(numMainBlocks,0,1).forEach(idx=>{middle_flow[`main_block_${idx}`]=extractMainBlockParams(128,`middle_flow/main_block_${idx}`)});let exit_flow_reduction_block=extractReductionBlockParams(128,256,"exit_flow/reduction_block"),exit_flow_separable_conv=extractSeparableConvParams(256,512,"exit_flow/separable_conv"),exit_flow={reduction_block:exit_flow_reduction_block,separable_conv:exit_flow_separable_conv};if(getRemainingWeights().length!==0)throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`);return{paramMappings,params:{entry_flow,middle_flow,exit_flow}}}function loadParamsFactory3(weightMap,paramMappings){let extractWeightEntry=extractWeightEntryFactory(weightMap,paramMappings),extractConvParams=loadConvParamsFactory(extractWeightEntry),extractSeparableConvParams=loadSeparableConvParamsFactory(extractWeightEntry);function extractReductionBlockParams(mappedPrefix){let separable_conv0=extractSeparableConvParams(`${mappedPrefix}/separable_conv0`),separable_conv1=extractSeparableConvParams(`${mappedPrefix}/separable_conv1`),expansion_conv=extractConvParams(`${mappedPrefix}/expansion_conv`);return{separable_conv0,separable_conv1,expansion_conv}}function extractMainBlockParams(mappedPrefix){let separable_conv0=extractSeparableConvParams(`${mappedPrefix}/separable_conv0`),separable_conv1=extractSeparableConvParams(`${mappedPrefix}/separable_conv1`),separable_conv2=extractSeparableConvParams(`${mappedPrefix}/separable_conv2`);return{separable_conv0,separable_conv1,separable_conv2}}return{extractConvParams,extractSeparableConvParams,extractReductionBlockParams,extractMainBlockParams}}function extractParamsFromWeigthMap5(weightMap,numMainBlocks){let paramMappings=[],{extractConvParams,extractSeparableConvParams,extractReductionBlockParams,extractMainBlockParams}=loadParamsFactory3(weightMap,paramMappings),entry_flow_conv_in=extractConvParams("entry_flow/conv_in"),entry_flow_reduction_block_0=extractReductionBlockParams("entry_flow/reduction_block_0"),entry_flow_reduction_block_1=extractReductionBlockParams("entry_flow/reduction_block_1"),entry_flow={conv_in:entry_flow_conv_in,reduction_block_0:entry_flow_reduction_block_0,reduction_block_1:entry_flow_reduction_block_1},middle_flow={};range(numMainBlocks,0,1).forEach(idx=>{middle_flow[`main_block_${idx}`]=extractMainBlockParams(`middle_flow/main_block_${idx}`)});let exit_flow_reduction_block=extractReductionBlockParams("exit_flow/reduction_block"),exit_flow_separable_conv=extractSeparableConvParams("exit_flow/separable_conv"),exit_flow={reduction_block:exit_flow_reduction_block,separable_conv:exit_flow_separable_conv};return disposeUnusedWeightTensors(weightMap,paramMappings),{params:{entry_flow,middle_flow,exit_flow},paramMappings}}function conv(x,params,stride){return tf19.add(tf19.conv2d(x,params.filters,stride,"same"),params.bias)}function reductionBlock(x,params,isActivateInput=!0){let out=isActivateInput?tf19.relu(x):x;return out=depthwiseSeparableConv(out,params.separable_conv0,[1,1]),out=depthwiseSeparableConv(tf19.relu(out),params.separable_conv1,[1,1]),out=tf19.maxPool(out,[3,3],[2,2],"same"),out=tf19.add(out,conv(x,params.expansion_conv,[2,2])),out}function mainBlock(x,params){let out=depthwiseSeparableConv(tf19.relu(x),params.separable_conv0,[1,1]);return out=depthwiseSeparableConv(tf19.relu(out),params.separable_conv1,[1,1]),out=depthwiseSeparableConv(tf19.relu(out),params.separable_conv2,[1,1]),out=tf19.add(out,x),out}var TinyXception=class extends NeuralNetwork{constructor(numMainBlocks){super("TinyXception");this._numMainBlocks=numMainBlocks}forwardInput(input){let{params}=this;if(!params)throw new Error("TinyXception - load model before inference");return tf19.tidy(()=>{let batchTensor=tf19.cast(input.toBatchTensor(112,!0),"float32"),meanRgb=[122.782,117.001,104.298],normalized=normalize(batchTensor,meanRgb).div(tf19.scalar(256)),out=tf19.relu(conv(normalized,params.entry_flow.conv_in,[2,2]));return out=reductionBlock(out,params.entry_flow.reduction_block_0,!1),out=reductionBlock(out,params.entry_flow.reduction_block_1),range(this._numMainBlocks,0,1).forEach(idx=>{out=mainBlock(out,params.middle_flow[`main_block_${idx}`])}),out=reductionBlock(out,params.exit_flow.reduction_block),out=tf19.relu(depthwiseSeparableConv(out,params.exit_flow.separable_conv,[1,1])),out})}async forward(input){return this.forwardInput(await toNetInput(input))}getDefaultModelName(){return"tiny_xception_model"}extractParamsFromWeigthMap(weightMap){return extractParamsFromWeigthMap5(weightMap,this._numMainBlocks)}extractParams(weights){return extractParams5(weights,this._numMainBlocks)}};function extractParams7(weights){let paramMappings=[],{extractWeights,getRemainingWeights}=extractWeightsFactory(weights),extractFCParams=extractFCParamsFactory(extractWeights,paramMappings),age=extractFCParams(512,1,"fc/age"),gender=extractFCParams(512,2,"fc/gender");if(getRemainingWeights().length!==0)throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`);return{paramMappings,params:{fc:{age,gender}}}}function extractParamsFromWeigthMap7(weightMap){let paramMappings=[],extractWeightEntry=extractWeightEntryFactory(weightMap,paramMappings);function extractFcParams(prefix){let weights=extractWeightEntry(`${prefix}/weights`,2),bias=extractWeightEntry(`${prefix}/bias`,1);return{weights,bias}}let params={fc:{age:extractFcParams("fc/age"),gender:extractFcParams("fc/gender")}};return disposeUnusedWeightTensors(weightMap,paramMappings),{params,paramMappings}}var Gender;(function(Gender2){Gender2.FEMALE="female",Gender2.MALE="male"})(Gender||(Gender={}));var AgeGenderNet=class extends NeuralNetwork{constructor(faceFeatureExtractor=new TinyXception(2)){super("AgeGenderNet");this._faceFeatureExtractor=faceFeatureExtractor}get faceFeatureExtractor(){return this._faceFeatureExtractor}runNet(input){let{params}=this;if(!params)throw new Error(`${this._name} - load model before inference`);return tf20.tidy(()=>{let bottleneckFeatures=input instanceof NetInput?this.faceFeatureExtractor.forwardInput(input):input,pooled=tf20.avgPool(bottleneckFeatures,[7,7],[2,2],"valid").as2D(bottleneckFeatures.shape[0],-1),age=fullyConnectedLayer(pooled,params.fc.age).as1D(),gender=fullyConnectedLayer(pooled,params.fc.gender);return{age,gender}})}forwardInput(input){return tf20.tidy(()=>{let{age,gender}=this.runNet(input);return{age,gender:tf20.softmax(gender)}})}async forward(input){return this.forwardInput(await toNetInput(input))}async predictAgeAndGender(input){let netInput=await toNetInput(input),out=await this.forwardInput(netInput),ages=tf20.unstack(out.age),genders=tf20.unstack(out.gender),ageAndGenderTensors=ages.map((ageTensor,i)=>({ageTensor,genderTensor:genders[i]})),predictionsByBatch=await Promise.all(ageAndGenderTensors.map(async({ageTensor,genderTensor})=>{let age=(await ageTensor.data())[0],probMale=(await genderTensor.data())[0],isMale=probMale>.5,gender=isMale?Gender.MALE:Gender.FEMALE,genderProbability=isMale?probMale:1-probMale;return ageTensor.dispose(),genderTensor.dispose(),{age,gender,genderProbability}}));return out.age.dispose(),out.gender.dispose(),netInput.isBatchInput?predictionsByBatch:predictionsByBatch[0]}getDefaultModelName(){return"age_gender_model"}dispose(throwOnRedispose=!0){this.faceFeatureExtractor.dispose(throwOnRedispose),super.dispose(throwOnRedispose)}loadClassifierParams(weights){let{params,paramMappings}=this.extractClassifierParams(weights);this._params=params,this._paramMappings=paramMappings}extractClassifierParams(weights){return extractParams7(weights)}extractParamsFromWeigthMap(weightMap){let{featureExtractorMap,classifierMap}=seperateWeightMaps(weightMap);return this.faceFeatureExtractor.loadFromWeightMap(featureExtractorMap),extractParamsFromWeigthMap7(classifierMap)}extractParams(weights){let classifierWeightSize=512*1+1+(512*2+2),featureExtractorWeights=weights.slice(0,weights.length-classifierWeightSize),classifierWeights=weights.slice(weights.length-classifierWeightSize);return this.faceFeatureExtractor.extractWeights(featureExtractorWeights),this.extractClassifierParams(classifierWeights)}};var tf21=__toModule(require_tfjs_esm()),FaceLandmark68NetBase=class extends FaceProcessor{postProcess(output,inputSize,originalDimensions){let inputDimensions=originalDimensions.map(({width,height})=>{let scale2=inputSize/Math.max(height,width);return{width:width*scale2,height:height*scale2}}),batchSize=inputDimensions.length;return tf21.tidy(()=>{let createInterleavedTensor=(fillX,fillY)=>tf21.stack([tf21.fill([68],fillX,"float32"),tf21.fill([68],fillY,"float32")],1).as2D(1,136).as1D(),getPadding=(batchIdx,cond)=>{let{width,height}=inputDimensions[batchIdx];return cond(width,height)?Math.abs(width-height)/2:0},getPaddingX=batchIdx=>getPadding(batchIdx,(w,h)=>wgetPadding(batchIdx,(w,h)=>hcreateInterleavedTensor(getPaddingX(batchIdx),getPaddingY(batchIdx))))).div(tf21.stack(Array.from(Array(batchSize),(_,batchIdx)=>createInterleavedTensor(inputDimensions[batchIdx].width,inputDimensions[batchIdx].height))));return landmarkTensors})}forwardInput(input){return tf21.tidy(()=>{let out=this.runNet(input);return this.postProcess(out,input.inputSize,input.inputDimensions.map(([height,width])=>({height,width})))})}async forward(input){return this.forwardInput(await toNetInput(input))}async detectLandmarks(input){let netInput=await toNetInput(input),landmarkTensors=tf21.tidy(()=>tf21.unstack(this.forwardInput(netInput))),landmarksForBatch=await Promise.all(landmarkTensors.map(async(landmarkTensor,batchIdx)=>{let landmarksArray=Array.from(await landmarkTensor.data()),xCoords=landmarksArray.filter((_,i)=>isEven(i)),yCoords=landmarksArray.filter((_,i)=>!isEven(i));return new FaceLandmarks68(Array(68).fill(0).map((_,i)=>new Point(xCoords[i],yCoords[i])),{height:netInput.getInputHeight(batchIdx),width:netInput.getInputWidth(batchIdx)})}));return landmarkTensors.forEach(t=>t.dispose()),netInput.isBatchInput?landmarksForBatch:landmarksForBatch[0]}getClassifierChannelsOut(){return 136}},FaceLandmark68Net=class extends FaceLandmark68NetBase{constructor(faceFeatureExtractor=new FaceFeatureExtractor){super("FaceLandmark68Net",faceFeatureExtractor)}getDefaultModelName(){return"face_landmark_68_model"}getClassifierChannelsIn(){return 256}};var tf22=__toModule(require_tfjs_esm());function extractParamsFromWeigthMapTiny(weightMap){let paramMappings=[],{extractDenseBlock3Params}=loadParamsFactory(weightMap,paramMappings),params={dense0:extractDenseBlock3Params("dense0",!0),dense1:extractDenseBlock3Params("dense1"),dense2:extractDenseBlock3Params("dense2")};return disposeUnusedWeightTensors(weightMap,paramMappings),{params,paramMappings}}function extractParamsTiny(weights){let paramMappings=[],{extractWeights,getRemainingWeights}=extractWeightsFactory(weights),{extractDenseBlock3Params}=extractorsFactory(extractWeights,paramMappings),dense0=extractDenseBlock3Params(3,32,"dense0",!0),dense1=extractDenseBlock3Params(32,64,"dense1"),dense2=extractDenseBlock3Params(64,128,"dense2");if(getRemainingWeights().length!==0)throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`);return{paramMappings,params:{dense0,dense1,dense2}}}var TinyFaceFeatureExtractor=class extends NeuralNetwork{constructor(){super("TinyFaceFeatureExtractor")}forwardInput(input){let{params}=this;if(!params)throw new Error("TinyFaceFeatureExtractor - load model before inference");return tf22.tidy(()=>{let batchTensor=tf22.cast(input.toBatchTensor(112,!0),"float32"),meanRgb=[122.782,117.001,104.298],normalized=normalize(batchTensor,meanRgb).div(tf22.scalar(255)),out=denseBlock3(normalized,params.dense0,!0);return out=denseBlock3(out,params.dense1),out=denseBlock3(out,params.dense2),out=tf22.avgPool(out,[14,14],[2,2],"valid"),out})}async forward(input){return this.forwardInput(await toNetInput(input))}getDefaultModelName(){return"face_feature_extractor_tiny_model"}extractParamsFromWeigthMap(weightMap){return extractParamsFromWeigthMapTiny(weightMap)}extractParams(weights){return extractParamsTiny(weights)}},FaceLandmark68TinyNet=class extends FaceLandmark68NetBase{constructor(faceFeatureExtractor=new TinyFaceFeatureExtractor){super("FaceLandmark68TinyNet",faceFeatureExtractor)}getDefaultModelName(){return"face_landmark_68_tiny_model"}getClassifierChannelsIn(){return 128}},FaceLandmarkNet=class extends FaceLandmark68Net{};var tf27=__toModule(require_tfjs_esm()),tf24=__toModule(require_tfjs_esm()),tf23=__toModule(require_tfjs_esm());function scale(x,params){return tf23.add(tf23.mul(x,params.weights),params.biases)}function convLayer2(x,params,strides,withRelu,padding="same"){let{filters,bias}=params.conv,out=tf24.conv2d(x,filters,strides,padding);return out=tf24.add(out,bias),out=scale(out,params.scale),withRelu?tf24.relu(out):out}function conv2(x,params){return convLayer2(x,params,[1,1],!0)}function convNoRelu(x,params){return convLayer2(x,params,[1,1],!1)}function convDown(x,params){return convLayer2(x,params,[2,2],!0,"valid")}var tf25=__toModule(require_tfjs_esm());function extractorsFactory5(extractWeights,paramMappings){function extractFilterValues(numFilterValues,numFilters,filterSize){let weights=extractWeights(numFilterValues),depth=weights.length/(numFilters*filterSize*filterSize);if(isFloat(depth))throw new Error(`depth has to be an integer: ${depth}, weights.length: ${weights.length}, numFilters: ${numFilters}, filterSize: ${filterSize}`);return tf25.tidy(()=>tf25.transpose(tf25.tensor4d(weights,[numFilters,depth,filterSize,filterSize]),[2,3,1,0]))}function extractConvParams(numFilterValues,numFilters,filterSize,mappedPrefix){let filters=extractFilterValues(numFilterValues,numFilters,filterSize),bias=tf25.tensor1d(extractWeights(numFilters));return paramMappings.push({paramPath:`${mappedPrefix}/filters`},{paramPath:`${mappedPrefix}/bias`}),{filters,bias}}function extractScaleLayerParams(numWeights,mappedPrefix){let weights=tf25.tensor1d(extractWeights(numWeights)),biases=tf25.tensor1d(extractWeights(numWeights));return paramMappings.push({paramPath:`${mappedPrefix}/weights`},{paramPath:`${mappedPrefix}/biases`}),{weights,biases}}function extractConvLayerParams(numFilterValues,numFilters,filterSize,mappedPrefix){let conv3=extractConvParams(numFilterValues,numFilters,filterSize,`${mappedPrefix}/conv`),scale2=extractScaleLayerParams(numFilters,`${mappedPrefix}/scale`);return{conv:conv3,scale:scale2}}function extractResidualLayerParams(numFilterValues,numFilters,filterSize,mappedPrefix,isDown=!1){let conv1=extractConvLayerParams((isDown?.5:1)*numFilterValues,numFilters,filterSize,`${mappedPrefix}/conv1`),conv22=extractConvLayerParams(numFilterValues,numFilters,filterSize,`${mappedPrefix}/conv2`);return{conv1,conv2:conv22}}return{extractConvLayerParams,extractResidualLayerParams}}function extractParams9(weights){let{extractWeights,getRemainingWeights}=extractWeightsFactory(weights),paramMappings=[],{extractConvLayerParams,extractResidualLayerParams}=extractorsFactory5(extractWeights,paramMappings),conv32_down=extractConvLayerParams(4704,32,7,"conv32_down"),conv32_1=extractResidualLayerParams(9216,32,3,"conv32_1"),conv32_2=extractResidualLayerParams(9216,32,3,"conv32_2"),conv32_3=extractResidualLayerParams(9216,32,3,"conv32_3"),conv64_down=extractResidualLayerParams(36864,64,3,"conv64_down",!0),conv64_1=extractResidualLayerParams(36864,64,3,"conv64_1"),conv64_2=extractResidualLayerParams(36864,64,3,"conv64_2"),conv64_3=extractResidualLayerParams(36864,64,3,"conv64_3"),conv128_down=extractResidualLayerParams(147456,128,3,"conv128_down",!0),conv128_1=extractResidualLayerParams(147456,128,3,"conv128_1"),conv128_2=extractResidualLayerParams(147456,128,3,"conv128_2"),conv256_down=extractResidualLayerParams(589824,256,3,"conv256_down",!0),conv256_1=extractResidualLayerParams(589824,256,3,"conv256_1"),conv256_2=extractResidualLayerParams(589824,256,3,"conv256_2"),conv256_down_out=extractResidualLayerParams(589824,256,3,"conv256_down_out"),fc=tf25.tidy(()=>tf25.transpose(tf25.tensor2d(extractWeights(256*128),[128,256]),[1,0]));if(paramMappings.push({paramPath:"fc"}),getRemainingWeights().length!==0)throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`);let params={conv32_down,conv32_1,conv32_2,conv32_3,conv64_down,conv64_1,conv64_2,conv64_3,conv128_down,conv128_1,conv128_2,conv256_down,conv256_1,conv256_2,conv256_down_out,fc};return{params,paramMappings}}function extractorsFactory6(weightMap,paramMappings){let extractWeightEntry=extractWeightEntryFactory(weightMap,paramMappings);function extractScaleLayerParams(prefix){let weights=extractWeightEntry(`${prefix}/scale/weights`,1),biases=extractWeightEntry(`${prefix}/scale/biases`,1);return{weights,biases}}function extractConvLayerParams(prefix){let filters=extractWeightEntry(`${prefix}/conv/filters`,4),bias=extractWeightEntry(`${prefix}/conv/bias`,1),scale2=extractScaleLayerParams(prefix);return{conv:{filters,bias},scale:scale2}}function extractResidualLayerParams(prefix){return{conv1:extractConvLayerParams(`${prefix}/conv1`),conv2:extractConvLayerParams(`${prefix}/conv2`)}}return{extractConvLayerParams,extractResidualLayerParams}}function extractParamsFromWeigthMap9(weightMap){let paramMappings=[],{extractConvLayerParams,extractResidualLayerParams}=extractorsFactory6(weightMap,paramMappings),conv32_down=extractConvLayerParams("conv32_down"),conv32_1=extractResidualLayerParams("conv32_1"),conv32_2=extractResidualLayerParams("conv32_2"),conv32_3=extractResidualLayerParams("conv32_3"),conv64_down=extractResidualLayerParams("conv64_down"),conv64_1=extractResidualLayerParams("conv64_1"),conv64_2=extractResidualLayerParams("conv64_2"),conv64_3=extractResidualLayerParams("conv64_3"),conv128_down=extractResidualLayerParams("conv128_down"),conv128_1=extractResidualLayerParams("conv128_1"),conv128_2=extractResidualLayerParams("conv128_2"),conv256_down=extractResidualLayerParams("conv256_down"),conv256_1=extractResidualLayerParams("conv256_1"),conv256_2=extractResidualLayerParams("conv256_2"),conv256_down_out=extractResidualLayerParams("conv256_down_out"),fc=weightMap.fc;if(paramMappings.push({originalPath:"fc",paramPath:"fc"}),!isTensor2D(fc))throw new Error(`expected weightMap[fc] to be a Tensor2D, instead have ${fc}`);let params={conv32_down,conv32_1,conv32_2,conv32_3,conv64_down,conv64_1,conv64_2,conv64_3,conv128_down,conv128_1,conv128_2,conv256_down,conv256_1,conv256_2,conv256_down_out,fc};return disposeUnusedWeightTensors(weightMap,paramMappings),{params,paramMappings}}var tf26=__toModule(require_tfjs_esm());function residual(x,params){let out=conv2(x,params.conv1);return out=convNoRelu(out,params.conv2),out=tf26.add(out,x),out=tf26.relu(out),out}function residualDown(x,params){let out=convDown(x,params.conv1);out=convNoRelu(out,params.conv2);let pooled=tf26.avgPool(x,2,2,"valid"),zeros2=tf26.zeros(pooled.shape),isPad=pooled.shape[3]!==out.shape[3],isAdjustShape=pooled.shape[1]!==out.shape[1]||pooled.shape[2]!==out.shape[2];if(isAdjustShape){let padShapeX=[...out.shape];padShapeX[1]=1;let zerosW=tf26.zeros(padShapeX);out=tf26.concat([out,zerosW],1);let padShapeY=[...out.shape];padShapeY[2]=1;let zerosH=tf26.zeros(padShapeY);out=tf26.concat([out,zerosH],2)}return pooled=isPad?tf26.concat([pooled,zeros2],3):pooled,out=tf26.add(pooled,out),out=tf26.relu(out),out}var FaceRecognitionNet=class extends NeuralNetwork{constructor(){super("FaceRecognitionNet")}forwardInput(input){let{params}=this;if(!params)throw new Error("FaceRecognitionNet - load model before inference");return tf27.tidy(()=>{let batchTensor=tf27.cast(input.toBatchTensor(150,!0),"float32"),meanRgb=[122.782,117.001,104.298],normalized=normalize(batchTensor,meanRgb).div(tf27.scalar(256)),out=convDown(normalized,params.conv32_down);out=tf27.maxPool(out,3,2,"valid"),out=residual(out,params.conv32_1),out=residual(out,params.conv32_2),out=residual(out,params.conv32_3),out=residualDown(out,params.conv64_down),out=residual(out,params.conv64_1),out=residual(out,params.conv64_2),out=residual(out,params.conv64_3),out=residualDown(out,params.conv128_down),out=residual(out,params.conv128_1),out=residual(out,params.conv128_2),out=residualDown(out,params.conv256_down),out=residual(out,params.conv256_1),out=residual(out,params.conv256_2),out=residualDown(out,params.conv256_down_out);let globalAvg=out.mean([1,2]),fullyConnected=tf27.matMul(globalAvg,params.fc);return fullyConnected})}async forward(input){return this.forwardInput(await toNetInput(input))}async computeFaceDescriptor(input){let netInput=await toNetInput(input),faceDescriptorTensors=tf27.tidy(()=>tf27.unstack(this.forwardInput(netInput))),faceDescriptorsForBatch=await Promise.all(faceDescriptorTensors.map(t=>t.data()));return faceDescriptorTensors.forEach(t=>t.dispose()),netInput.isBatchInput?faceDescriptorsForBatch:faceDescriptorsForBatch[0]}getDefaultModelName(){return"face_recognition_model"}extractParamsFromWeigthMap(weightMap){return extractParamsFromWeigthMap9(weightMap)}extractParams(weights){return extractParams9(weights)}};function createFaceRecognitionNet(weights){let net=new FaceRecognitionNet;return net.extractWeights(weights),net}function extendWithFaceDescriptor(sourceObj,descriptor){let extension={descriptor};return Object.assign({},sourceObj,extension)}function isWithAge(obj){return typeof obj.age=="number"}function extendWithAge(sourceObj,age){let extension={age};return Object.assign({},sourceObj,extension)}function isWithGender(obj){return(obj.gender===Gender.MALE||obj.gender===Gender.FEMALE)&&isValidProbablitiy(obj.genderProbability)}function extendWithGender(sourceObj,gender,genderProbability){let extension={gender,genderProbability};return Object.assign({},sourceObj,extension)}var tf34=__toModule(require_tfjs_esm()),tf28=__toModule(require_tfjs_esm());function extractorsFactory7(extractWeights,paramMappings){function extractDepthwiseConvParams(numChannels,mappedPrefix){let filters=tf28.tensor4d(extractWeights(3*3*numChannels),[3,3,numChannels,1]),batch_norm_scale=tf28.tensor1d(extractWeights(numChannels)),batch_norm_offset=tf28.tensor1d(extractWeights(numChannels)),batch_norm_mean=tf28.tensor1d(extractWeights(numChannels)),batch_norm_variance=tf28.tensor1d(extractWeights(numChannels));return paramMappings.push({paramPath:`${mappedPrefix}/filters`},{paramPath:`${mappedPrefix}/batch_norm_scale`},{paramPath:`${mappedPrefix}/batch_norm_offset`},{paramPath:`${mappedPrefix}/batch_norm_mean`},{paramPath:`${mappedPrefix}/batch_norm_variance`}),{filters,batch_norm_scale,batch_norm_offset,batch_norm_mean,batch_norm_variance}}function extractConvParams(channelsIn,channelsOut,filterSize,mappedPrefix,isPointwiseConv){let filters=tf28.tensor4d(extractWeights(channelsIn*channelsOut*filterSize*filterSize),[filterSize,filterSize,channelsIn,channelsOut]),bias=tf28.tensor1d(extractWeights(channelsOut));return paramMappings.push({paramPath:`${mappedPrefix}/filters`},{paramPath:`${mappedPrefix}/${isPointwiseConv?"batch_norm_offset":"bias"}`}),{filters,bias}}function extractPointwiseConvParams(channelsIn,channelsOut,filterSize,mappedPrefix){let{filters,bias}=extractConvParams(channelsIn,channelsOut,filterSize,mappedPrefix,!0);return{filters,batch_norm_offset:bias}}function extractConvPairParams(channelsIn,channelsOut,mappedPrefix){let depthwise_conv=extractDepthwiseConvParams(channelsIn,`${mappedPrefix}/depthwise_conv`),pointwise_conv=extractPointwiseConvParams(channelsIn,channelsOut,1,`${mappedPrefix}/pointwise_conv`);return{depthwise_conv,pointwise_conv}}function extractMobilenetV1Params(){let conv_0=extractPointwiseConvParams(3,32,3,"mobilenetv1/conv_0"),conv_1=extractConvPairParams(32,64,"mobilenetv1/conv_1"),conv_2=extractConvPairParams(64,128,"mobilenetv1/conv_2"),conv_3=extractConvPairParams(128,128,"mobilenetv1/conv_3"),conv_4=extractConvPairParams(128,256,"mobilenetv1/conv_4"),conv_5=extractConvPairParams(256,256,"mobilenetv1/conv_5"),conv_6=extractConvPairParams(256,512,"mobilenetv1/conv_6"),conv_7=extractConvPairParams(512,512,"mobilenetv1/conv_7"),conv_8=extractConvPairParams(512,512,"mobilenetv1/conv_8"),conv_9=extractConvPairParams(512,512,"mobilenetv1/conv_9"),conv_10=extractConvPairParams(512,512,"mobilenetv1/conv_10"),conv_11=extractConvPairParams(512,512,"mobilenetv1/conv_11"),conv_12=extractConvPairParams(512,1024,"mobilenetv1/conv_12"),conv_13=extractConvPairParams(1024,1024,"mobilenetv1/conv_13");return{conv_0,conv_1,conv_2,conv_3,conv_4,conv_5,conv_6,conv_7,conv_8,conv_9,conv_10,conv_11,conv_12,conv_13}}function extractPredictionLayerParams(){let conv_0=extractPointwiseConvParams(1024,256,1,"prediction_layer/conv_0"),conv_1=extractPointwiseConvParams(256,512,3,"prediction_layer/conv_1"),conv_2=extractPointwiseConvParams(512,128,1,"prediction_layer/conv_2"),conv_3=extractPointwiseConvParams(128,256,3,"prediction_layer/conv_3"),conv_4=extractPointwiseConvParams(256,128,1,"prediction_layer/conv_4"),conv_5=extractPointwiseConvParams(128,256,3,"prediction_layer/conv_5"),conv_6=extractPointwiseConvParams(256,64,1,"prediction_layer/conv_6"),conv_7=extractPointwiseConvParams(64,128,3,"prediction_layer/conv_7"),box_encoding_0_predictor=extractConvParams(512,12,1,"prediction_layer/box_predictor_0/box_encoding_predictor"),class_predictor_0=extractConvParams(512,9,1,"prediction_layer/box_predictor_0/class_predictor"),box_encoding_1_predictor=extractConvParams(1024,24,1,"prediction_layer/box_predictor_1/box_encoding_predictor"),class_predictor_1=extractConvParams(1024,18,1,"prediction_layer/box_predictor_1/class_predictor"),box_encoding_2_predictor=extractConvParams(512,24,1,"prediction_layer/box_predictor_2/box_encoding_predictor"),class_predictor_2=extractConvParams(512,18,1,"prediction_layer/box_predictor_2/class_predictor"),box_encoding_3_predictor=extractConvParams(256,24,1,"prediction_layer/box_predictor_3/box_encoding_predictor"),class_predictor_3=extractConvParams(256,18,1,"prediction_layer/box_predictor_3/class_predictor"),box_encoding_4_predictor=extractConvParams(256,24,1,"prediction_layer/box_predictor_4/box_encoding_predictor"),class_predictor_4=extractConvParams(256,18,1,"prediction_layer/box_predictor_4/class_predictor"),box_encoding_5_predictor=extractConvParams(128,24,1,"prediction_layer/box_predictor_5/box_encoding_predictor"),class_predictor_5=extractConvParams(128,18,1,"prediction_layer/box_predictor_5/class_predictor"),box_predictor_0={box_encoding_predictor:box_encoding_0_predictor,class_predictor:class_predictor_0},box_predictor_1={box_encoding_predictor:box_encoding_1_predictor,class_predictor:class_predictor_1},box_predictor_2={box_encoding_predictor:box_encoding_2_predictor,class_predictor:class_predictor_2},box_predictor_3={box_encoding_predictor:box_encoding_3_predictor,class_predictor:class_predictor_3},box_predictor_4={box_encoding_predictor:box_encoding_4_predictor,class_predictor:class_predictor_4},box_predictor_5={box_encoding_predictor:box_encoding_5_predictor,class_predictor:class_predictor_5};return{conv_0,conv_1,conv_2,conv_3,conv_4,conv_5,conv_6,conv_7,box_predictor_0,box_predictor_1,box_predictor_2,box_predictor_3,box_predictor_4,box_predictor_5}}return{extractMobilenetV1Params,extractPredictionLayerParams}}function extractParams11(weights){let paramMappings=[],{extractWeights,getRemainingWeights}=extractWeightsFactory(weights),{extractMobilenetV1Params,extractPredictionLayerParams}=extractorsFactory7(extractWeights,paramMappings),mobilenetv1=extractMobilenetV1Params(),prediction_layer=extractPredictionLayerParams(),extra_dim=tf28.tensor3d(extractWeights(5118*4),[1,5118,4]),output_layer={extra_dim};if(paramMappings.push({paramPath:"output_layer/extra_dim"}),getRemainingWeights().length!==0)throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`);return{params:{mobilenetv1,prediction_layer,output_layer},paramMappings}}function extractorsFactory8(weightMap,paramMappings){let extractWeightEntry=extractWeightEntryFactory(weightMap,paramMappings);function extractPointwiseConvParams(prefix,idx,mappedPrefix){let filters=extractWeightEntry(`${prefix}/Conv2d_${idx}_pointwise/weights`,4,`${mappedPrefix}/filters`),batch_norm_offset=extractWeightEntry(`${prefix}/Conv2d_${idx}_pointwise/convolution_bn_offset`,1,`${mappedPrefix}/batch_norm_offset`);return{filters,batch_norm_offset}}function extractConvPairParams(idx){let mappedPrefix=`mobilenetv1/conv_${idx}`,prefixDepthwiseConv=`MobilenetV1/Conv2d_${idx}_depthwise`,mappedPrefixDepthwiseConv=`${mappedPrefix}/depthwise_conv`,mappedPrefixPointwiseConv=`${mappedPrefix}/pointwise_conv`,filters=extractWeightEntry(`${prefixDepthwiseConv}/depthwise_weights`,4,`${mappedPrefixDepthwiseConv}/filters`),batch_norm_scale=extractWeightEntry(`${prefixDepthwiseConv}/BatchNorm/gamma`,1,`${mappedPrefixDepthwiseConv}/batch_norm_scale`),batch_norm_offset=extractWeightEntry(`${prefixDepthwiseConv}/BatchNorm/beta`,1,`${mappedPrefixDepthwiseConv}/batch_norm_offset`),batch_norm_mean=extractWeightEntry(`${prefixDepthwiseConv}/BatchNorm/moving_mean`,1,`${mappedPrefixDepthwiseConv}/batch_norm_mean`),batch_norm_variance=extractWeightEntry(`${prefixDepthwiseConv}/BatchNorm/moving_variance`,1,`${mappedPrefixDepthwiseConv}/batch_norm_variance`);return{depthwise_conv:{filters,batch_norm_scale,batch_norm_offset,batch_norm_mean,batch_norm_variance},pointwise_conv:extractPointwiseConvParams("MobilenetV1",idx,mappedPrefixPointwiseConv)}}function extractMobilenetV1Params(){return{conv_0:extractPointwiseConvParams("MobilenetV1",0,"mobilenetv1/conv_0"),conv_1:extractConvPairParams(1),conv_2:extractConvPairParams(2),conv_3:extractConvPairParams(3),conv_4:extractConvPairParams(4),conv_5:extractConvPairParams(5),conv_6:extractConvPairParams(6),conv_7:extractConvPairParams(7),conv_8:extractConvPairParams(8),conv_9:extractConvPairParams(9),conv_10:extractConvPairParams(10),conv_11:extractConvPairParams(11),conv_12:extractConvPairParams(12),conv_13:extractConvPairParams(13)}}function extractConvParams(prefix,mappedPrefix){let filters=extractWeightEntry(`${prefix}/weights`,4,`${mappedPrefix}/filters`),bias=extractWeightEntry(`${prefix}/biases`,1,`${mappedPrefix}/bias`);return{filters,bias}}function extractBoxPredictorParams(idx){let box_encoding_predictor=extractConvParams(`Prediction/BoxPredictor_${idx}/BoxEncodingPredictor`,`prediction_layer/box_predictor_${idx}/box_encoding_predictor`),class_predictor=extractConvParams(`Prediction/BoxPredictor_${idx}/ClassPredictor`,`prediction_layer/box_predictor_${idx}/class_predictor`);return{box_encoding_predictor,class_predictor}}function extractPredictionLayerParams(){return{conv_0:extractPointwiseConvParams("Prediction",0,"prediction_layer/conv_0"),conv_1:extractPointwiseConvParams("Prediction",1,"prediction_layer/conv_1"),conv_2:extractPointwiseConvParams("Prediction",2,"prediction_layer/conv_2"),conv_3:extractPointwiseConvParams("Prediction",3,"prediction_layer/conv_3"),conv_4:extractPointwiseConvParams("Prediction",4,"prediction_layer/conv_4"),conv_5:extractPointwiseConvParams("Prediction",5,"prediction_layer/conv_5"),conv_6:extractPointwiseConvParams("Prediction",6,"prediction_layer/conv_6"),conv_7:extractPointwiseConvParams("Prediction",7,"prediction_layer/conv_7"),box_predictor_0:extractBoxPredictorParams(0),box_predictor_1:extractBoxPredictorParams(1),box_predictor_2:extractBoxPredictorParams(2),box_predictor_3:extractBoxPredictorParams(3),box_predictor_4:extractBoxPredictorParams(4),box_predictor_5:extractBoxPredictorParams(5)}}return{extractMobilenetV1Params,extractPredictionLayerParams}}function extractParamsFromWeigthMap11(weightMap){let paramMappings=[],{extractMobilenetV1Params,extractPredictionLayerParams}=extractorsFactory8(weightMap,paramMappings),extra_dim=weightMap["Output/extra_dim"];if(paramMappings.push({originalPath:"Output/extra_dim",paramPath:"output_layer/extra_dim"}),!isTensor3D(extra_dim))throw new Error(`expected weightMap['Output/extra_dim'] to be a Tensor3D, instead have ${extra_dim}`);let params={mobilenetv1:extractMobilenetV1Params(),prediction_layer:extractPredictionLayerParams(),output_layer:{extra_dim}};return disposeUnusedWeightTensors(weightMap,paramMappings),{params,paramMappings}}var tf30=__toModule(require_tfjs_esm()),tf29=__toModule(require_tfjs_esm());function pointwiseConvLayer(x,params,strides){return tf29.tidy(()=>{let out=tf29.conv2d(x,params.filters,strides,"same");return out=tf29.add(out,params.batch_norm_offset),tf29.clipByValue(out,0,6)})}var epsilon=.0010000000474974513;function depthwiseConvLayer(x,params,strides){return tf30.tidy(()=>{let out=tf30.depthwiseConv2d(x,params.filters,strides,"same");return out=tf30.batchNorm(out,params.batch_norm_mean,params.batch_norm_variance,params.batch_norm_offset,params.batch_norm_scale,epsilon),tf30.clipByValue(out,0,6)})}function getStridesForLayerIdx(layerIdx){return[2,4,6,12].some(idx=>idx===layerIdx)?[2,2]:[1,1]}function mobileNetV1(x,params){return tf30.tidy(()=>{let conv11,out=pointwiseConvLayer(x,params.conv_0,[2,2]),convPairParams=[params.conv_1,params.conv_2,params.conv_3,params.conv_4,params.conv_5,params.conv_6,params.conv_7,params.conv_8,params.conv_9,params.conv_10,params.conv_11,params.conv_12,params.conv_13];if(convPairParams.forEach((param,i)=>{let layerIdx=i+1,depthwiseConvStrides=getStridesForLayerIdx(layerIdx);out=depthwiseConvLayer(out,param.depthwise_conv,depthwiseConvStrides),out=pointwiseConvLayer(out,param.pointwise_conv,[1,1]),layerIdx===11&&(conv11=out)}),conv11===null)throw new Error("mobileNetV1 - output of conv layer 11 is null");return{out,conv11}})}function nonMaxSuppression2(boxes,scores,maxOutputSize,iouThreshold,scoreThreshold){let numBoxes=boxes.shape[0],outputSize=Math.min(maxOutputSize,numBoxes),candidates=scores.map((score,boxIndex)=>({score,boxIndex})).filter(c=>c.score>scoreThreshold).sort((c1,c2)=>c2.score-c1.score),suppressFunc=x=>x<=iouThreshold?1:0,selected=[];return candidates.forEach(c=>{if(selected.length>=outputSize)return;let originalScore=c.score;for(let j=selected.length-1;j>=0;--j){let iou3=IOU(boxes,c.boxIndex,selected[j]);if(iou3===0)continue;if(c.score*=suppressFunc(iou3),c.score<=scoreThreshold)break}originalScore===c.score&&selected.push(c.boxIndex)}),selected}function IOU(boxes,i,j){let boxesData=boxes.arraySync(),yminI=Math.min(boxesData[i][0],boxesData[i][2]),xminI=Math.min(boxesData[i][1],boxesData[i][3]),ymaxI=Math.max(boxesData[i][0],boxesData[i][2]),xmaxI=Math.max(boxesData[i][1],boxesData[i][3]),yminJ=Math.min(boxesData[j][0],boxesData[j][2]),xminJ=Math.min(boxesData[j][1],boxesData[j][3]),ymaxJ=Math.max(boxesData[j][0],boxesData[j][2]),xmaxJ=Math.max(boxesData[j][1],boxesData[j][3]),areaI=(ymaxI-yminI)*(xmaxI-xminI),areaJ=(ymaxJ-yminJ)*(xmaxJ-xminJ);if(areaI<=0||areaJ<=0)return 0;let intersectionYmin=Math.max(yminI,yminJ),intersectionXmin=Math.max(xminI,xminJ),intersectionYmax=Math.min(ymaxI,ymaxJ),intersectionXmax=Math.min(xmaxI,xmaxJ),intersectionArea=Math.max(intersectionYmax-intersectionYmin,0)*Math.max(intersectionXmax-intersectionXmin,0);return intersectionArea/(areaI+areaJ-intersectionArea)}var tf31=__toModule(require_tfjs_esm());function getCenterCoordinatesAndSizesLayer(x){let vec=tf31.unstack(tf31.transpose(x,[1,0])),sizes=[tf31.sub(vec[2],vec[0]),tf31.sub(vec[3],vec[1])],centers=[tf31.add(vec[0],tf31.div(sizes[0],tf31.scalar(2))),tf31.add(vec[1],tf31.div(sizes[1],tf31.scalar(2)))];return{sizes,centers}}function decodeBoxesLayer(x0,x1){let{sizes,centers}=getCenterCoordinatesAndSizesLayer(x0),vec=tf31.unstack(tf31.transpose(x1,[1,0])),div0_out=tf31.div(tf31.mul(tf31.exp(tf31.div(vec[2],tf31.scalar(5))),sizes[0]),tf31.scalar(2)),add0_out=tf31.add(tf31.mul(tf31.div(vec[0],tf31.scalar(10)),sizes[0]),centers[0]),div1_out=tf31.div(tf31.mul(tf31.exp(tf31.div(vec[3],tf31.scalar(5))),sizes[1]),tf31.scalar(2)),add1_out=tf31.add(tf31.mul(tf31.div(vec[1],tf31.scalar(10)),sizes[1]),centers[1]);return tf31.transpose(tf31.stack([tf31.sub(add0_out,div0_out),tf31.sub(add1_out,div1_out),tf31.add(add0_out,div0_out),tf31.add(add1_out,div1_out)]),[1,0])}function outputLayer(boxPredictions,classPredictions,params){return tf31.tidy(()=>{let batchSize=boxPredictions.shape[0],boxes=decodeBoxesLayer(tf31.reshape(tf31.tile(params.extra_dim,[batchSize,1,1]),[-1,4]),tf31.reshape(boxPredictions,[-1,4]));boxes=tf31.reshape(boxes,[batchSize,boxes.shape[0]/batchSize,4]);let scoresAndClasses=tf31.sigmoid(tf31.slice(classPredictions,[0,0,1],[-1,-1,-1])),scores=tf31.slice(scoresAndClasses,[0,0,0],[-1,-1,1]);scores=tf31.reshape(scores,[batchSize,scores.shape[1]]);let boxesByBatch=tf31.unstack(boxes),scoresByBatch=tf31.unstack(scores);return{boxes:boxesByBatch,scores:scoresByBatch}})}var tf33=__toModule(require_tfjs_esm()),tf32=__toModule(require_tfjs_esm());function boxPredictionLayer(x,params){return tf32.tidy(()=>{let batchSize=x.shape[0],boxPredictionEncoding=tf32.reshape(convLayer(x,params.box_encoding_predictor),[batchSize,-1,1,4]),classPrediction=tf32.reshape(convLayer(x,params.class_predictor),[batchSize,-1,3]);return{boxPredictionEncoding,classPrediction}})}function predictionLayer(x,conv11,params){return tf33.tidy(()=>{let conv0=pointwiseConvLayer(x,params.conv_0,[1,1]),conv1=pointwiseConvLayer(conv0,params.conv_1,[2,2]),conv22=pointwiseConvLayer(conv1,params.conv_2,[1,1]),conv3=pointwiseConvLayer(conv22,params.conv_3,[2,2]),conv4=pointwiseConvLayer(conv3,params.conv_4,[1,1]),conv5=pointwiseConvLayer(conv4,params.conv_5,[2,2]),conv6=pointwiseConvLayer(conv5,params.conv_6,[1,1]),conv7=pointwiseConvLayer(conv6,params.conv_7,[2,2]),boxPrediction0=boxPredictionLayer(conv11,params.box_predictor_0),boxPrediction1=boxPredictionLayer(x,params.box_predictor_1),boxPrediction2=boxPredictionLayer(conv1,params.box_predictor_2),boxPrediction3=boxPredictionLayer(conv3,params.box_predictor_3),boxPrediction4=boxPredictionLayer(conv5,params.box_predictor_4),boxPrediction5=boxPredictionLayer(conv7,params.box_predictor_5),boxPredictions=tf33.concat([boxPrediction0.boxPredictionEncoding,boxPrediction1.boxPredictionEncoding,boxPrediction2.boxPredictionEncoding,boxPrediction3.boxPredictionEncoding,boxPrediction4.boxPredictionEncoding,boxPrediction5.boxPredictionEncoding],1),classPredictions=tf33.concat([boxPrediction0.classPrediction,boxPrediction1.classPrediction,boxPrediction2.classPrediction,boxPrediction3.classPrediction,boxPrediction4.classPrediction,boxPrediction5.classPrediction],1);return{boxPredictions,classPredictions}})}var SsdMobilenetv1Options=class{constructor({minConfidence,maxResults}={}){this._name="SsdMobilenetv1Options";if(this._minConfidence=minConfidence||.5,this._maxResults=maxResults||100,typeof this._minConfidence!="number"||this._minConfidence<=0||this._minConfidence>=1)throw new Error(`${this._name} - expected minConfidence to be a number between 0 and 1`);if(typeof this._maxResults!="number")throw new Error(`${this._name} - expected maxResults to be a number`)}get minConfidence(){return this._minConfidence}get maxResults(){return this._maxResults}},SsdMobilenetv1=class extends NeuralNetwork{constructor(){super("SsdMobilenetv1")}forwardInput(input){let{params}=this;if(!params)throw new Error("SsdMobilenetv1 - load model before inference");return tf34.tidy(()=>{let batchTensor=tf34.cast(input.toBatchTensor(512,!1),"float32"),x=tf34.sub(tf34.mul(batchTensor,tf34.scalar(.007843137718737125)),tf34.scalar(1)),features=mobileNetV1(x,params.mobilenetv1),{boxPredictions,classPredictions}=predictionLayer(features.out,features.conv11,params.prediction_layer);return outputLayer(boxPredictions,classPredictions,params.output_layer)})}async forward(input){return this.forwardInput(await toNetInput(input))}async locateFaces(input,options={}){let{maxResults,minConfidence}=new SsdMobilenetv1Options(options),netInput=await toNetInput(input),{boxes:_boxes,scores:_scores}=this.forwardInput(netInput),boxes=_boxes[0],scores=_scores[0];for(let i=1;i<_boxes.length;i++)_boxes[i].dispose(),_scores[i].dispose();let scoresData=Array.from(await scores.data()),iouThreshold=.5,indices=nonMaxSuppression2(boxes,scoresData,maxResults,iouThreshold,minConfidence),reshapedDims=netInput.getReshapedInputDimensions(0),inputSize=netInput.inputSize,padX=inputSize/reshapedDims.width,padY=inputSize/reshapedDims.height,boxesData=boxes.arraySync(),results=indices.map(idx=>{let[top,bottom]=[Math.max(0,boxesData[idx][0]),Math.min(1,boxesData[idx][2])].map(val=>val*padY),[left,right]=[Math.max(0,boxesData[idx][1]),Math.min(1,boxesData[idx][3])].map(val=>val*padX);return new FaceDetection(scoresData[idx],new Rect(left,top,right-left,bottom-top),{height:netInput.getInputHeight(0),width:netInput.getInputWidth(0)})});return boxes.dispose(),scores.dispose(),results}getDefaultModelName(){return"ssd_mobilenetv1_model"}extractParamsFromWeigthMap(weightMap){return extractParamsFromWeigthMap11(weightMap)}extractParams(weights){return extractParams11(weights)}};function createSsdMobilenetv1(weights){let net=new SsdMobilenetv1;return net.extractWeights(weights),net}function createFaceDetectionNet(weights){return createSsdMobilenetv1(weights)}var FaceDetectionNet=class extends SsdMobilenetv1{},IOU_THRESHOLD=.4,BOX_ANCHORS=[new Point(.738768,.874946),new Point(2.42204,2.65704),new Point(4.30971,7.04493),new Point(10.246,4.59428),new Point(12.6868,11.8741)],BOX_ANCHORS_SEPARABLE=[new Point(1.603231,2.094468),new Point(6.041143,7.080126),new Point(2.882459,3.518061),new Point(4.266906,5.178857),new Point(9.041765,10.66308)],MEAN_RGB_SEPARABLE=[117.001,114.697,97.404],DEFAULT_MODEL_NAME="tiny_yolov2_model",DEFAULT_MODEL_NAME_SEPARABLE_CONV="tiny_yolov2_separable_conv_model",tf39=__toModule(require_tfjs_esm()),isNumber=arg=>typeof arg=="number";function validateConfig(config2){if(!config2)throw new Error(`invalid config: ${config2}`);if(typeof config2.withSeparableConvs!="boolean")throw new Error(`config.withSeparableConvs has to be a boolean, have: ${config2.withSeparableConvs}`);if(!isNumber(config2.iouThreshold)||config2.iouThreshold<0||config2.iouThreshold>1)throw new Error(`config.iouThreshold has to be a number between [0, 1], have: ${config2.iouThreshold}`);if(!Array.isArray(config2.classes)||!config2.classes.length||!config2.classes.every(c=>typeof c=="string"))throw new Error(`config.classes has to be an array class names: string[], have: ${JSON.stringify(config2.classes)}`);if(!Array.isArray(config2.anchors)||!config2.anchors.length||!config2.anchors.map(a=>a||{}).every(a=>isNumber(a.x)&&isNumber(a.y)))throw new Error(`config.anchors has to be an array of { x: number, y: number }, have: ${JSON.stringify(config2.anchors)}`);if(config2.meanRgb&&(!Array.isArray(config2.meanRgb)||config2.meanRgb.length!==3||!config2.meanRgb.every(isNumber)))throw new Error(`config.meanRgb has to be an array of shape [number, number, number], have: ${JSON.stringify(config2.meanRgb)}`)}var tf36=__toModule(require_tfjs_esm()),tf35=__toModule(require_tfjs_esm());function leaky(x){return tf35.tidy(()=>{let min=tf35.mul(x,tf35.scalar(.10000000149011612));return tf35.add(tf35.relu(tf35.sub(x,min)),min)})}function convWithBatchNorm(x,params){return tf36.tidy(()=>{let out=tf36.pad(x,[[0,0],[1,1],[1,1],[0,0]]);return out=tf36.conv2d(out,params.conv.filters,[1,1],"valid"),out=tf36.sub(out,params.bn.sub),out=tf36.mul(out,params.bn.truediv),out=tf36.add(out,params.conv.bias),leaky(out)})}var tf37=__toModule(require_tfjs_esm());function depthwiseSeparableConv3(x,params){return tf37.tidy(()=>{let out=tf37.pad(x,[[0,0],[1,1],[1,1],[0,0]]);return out=tf37.separableConv2d(out,params.depthwise_filter,params.pointwise_filter,[1,1],"valid"),out=tf37.add(out,params.bias),leaky(out)})}var tf38=__toModule(require_tfjs_esm());function extractorsFactory9(extractWeights,paramMappings){let extractConvParams=extractConvParamsFactory(extractWeights,paramMappings);function extractBatchNormParams(size,mappedPrefix){let sub6=tf38.tensor1d(extractWeights(size)),truediv=tf38.tensor1d(extractWeights(size));return paramMappings.push({paramPath:`${mappedPrefix}/sub`},{paramPath:`${mappedPrefix}/truediv`}),{sub:sub6,truediv}}function extractConvWithBatchNormParams(channelsIn,channelsOut,mappedPrefix){let conv3=extractConvParams(channelsIn,channelsOut,3,`${mappedPrefix}/conv`),bn=extractBatchNormParams(channelsOut,`${mappedPrefix}/bn`);return{conv:conv3,bn}}let extractSeparableConvParams=extractSeparableConvParamsFactory(extractWeights,paramMappings);return{extractConvParams,extractConvWithBatchNormParams,extractSeparableConvParams}}function extractParams13(weights,config2,boxEncodingSize,filterSizes){let{extractWeights,getRemainingWeights}=extractWeightsFactory(weights),paramMappings=[],{extractConvParams,extractConvWithBatchNormParams,extractSeparableConvParams}=extractorsFactory9(extractWeights,paramMappings),params;if(config2.withSeparableConvs){let[s0,s1,s2,s3,s4,s5,s6,s7,s8]=filterSizes,conv0=config2.isFirstLayerConv2d?extractConvParams(s0,s1,3,"conv0"):extractSeparableConvParams(s0,s1,"conv0"),conv1=extractSeparableConvParams(s1,s2,"conv1"),conv22=extractSeparableConvParams(s2,s3,"conv2"),conv3=extractSeparableConvParams(s3,s4,"conv3"),conv4=extractSeparableConvParams(s4,s5,"conv4"),conv5=extractSeparableConvParams(s5,s6,"conv5"),conv6=s7?extractSeparableConvParams(s6,s7,"conv6"):void 0,conv7=s8?extractSeparableConvParams(s7,s8,"conv7"):void 0,conv8=extractConvParams(s8||s7||s6,5*boxEncodingSize,1,"conv8");params={conv0,conv1,conv2:conv22,conv3,conv4,conv5,conv6,conv7,conv8}}else{let[s0,s1,s2,s3,s4,s5,s6,s7,s8]=filterSizes,conv0=extractConvWithBatchNormParams(s0,s1,"conv0"),conv1=extractConvWithBatchNormParams(s1,s2,"conv1"),conv22=extractConvWithBatchNormParams(s2,s3,"conv2"),conv3=extractConvWithBatchNormParams(s3,s4,"conv3"),conv4=extractConvWithBatchNormParams(s4,s5,"conv4"),conv5=extractConvWithBatchNormParams(s5,s6,"conv5"),conv6=extractConvWithBatchNormParams(s6,s7,"conv6"),conv7=extractConvWithBatchNormParams(s7,s8,"conv7"),conv8=extractConvParams(s8,5*boxEncodingSize,1,"conv8");params={conv0,conv1,conv2:conv22,conv3,conv4,conv5,conv6,conv7,conv8}}if(getRemainingWeights().length!==0)throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`);return{params,paramMappings}}function extractorsFactory10(weightMap,paramMappings){let extractWeightEntry=extractWeightEntryFactory(weightMap,paramMappings);function extractBatchNormParams(prefix){let sub6=extractWeightEntry(`${prefix}/sub`,1),truediv=extractWeightEntry(`${prefix}/truediv`,1);return{sub:sub6,truediv}}function extractConvParams(prefix){let filters=extractWeightEntry(`${prefix}/filters`,4),bias=extractWeightEntry(`${prefix}/bias`,1);return{filters,bias}}function extractConvWithBatchNormParams(prefix){let conv3=extractConvParams(`${prefix}/conv`),bn=extractBatchNormParams(`${prefix}/bn`);return{conv:conv3,bn}}let extractSeparableConvParams=loadSeparableConvParamsFactory(extractWeightEntry);return{extractConvParams,extractConvWithBatchNormParams,extractSeparableConvParams}}function extractParamsFromWeigthMap13(weightMap,config2){let paramMappings=[],{extractConvParams,extractConvWithBatchNormParams,extractSeparableConvParams}=extractorsFactory10(weightMap,paramMappings),params;if(config2.withSeparableConvs){let numFilters=config2.filterSizes&&config2.filterSizes.length||9;params={conv0:config2.isFirstLayerConv2d?extractConvParams("conv0"):extractSeparableConvParams("conv0"),conv1:extractSeparableConvParams("conv1"),conv2:extractSeparableConvParams("conv2"),conv3:extractSeparableConvParams("conv3"),conv4:extractSeparableConvParams("conv4"),conv5:extractSeparableConvParams("conv5"),conv6:numFilters>7?extractSeparableConvParams("conv6"):void 0,conv7:numFilters>8?extractSeparableConvParams("conv7"):void 0,conv8:extractConvParams("conv8")}}else params={conv0:extractConvWithBatchNormParams("conv0"),conv1:extractConvWithBatchNormParams("conv1"),conv2:extractConvWithBatchNormParams("conv2"),conv3:extractConvWithBatchNormParams("conv3"),conv4:extractConvWithBatchNormParams("conv4"),conv5:extractConvWithBatchNormParams("conv5"),conv6:extractConvWithBatchNormParams("conv6"),conv7:extractConvWithBatchNormParams("conv7"),conv8:extractConvParams("conv8")};return disposeUnusedWeightTensors(weightMap,paramMappings),{params,paramMappings}}var TinyYolov2SizeType;(function(TinyYolov2SizeType2){TinyYolov2SizeType2[TinyYolov2SizeType2.XS=224]="XS",TinyYolov2SizeType2[TinyYolov2SizeType2.SM=320]="SM",TinyYolov2SizeType2[TinyYolov2SizeType2.MD=416]="MD",TinyYolov2SizeType2[TinyYolov2SizeType2.LG=608]="LG"})(TinyYolov2SizeType||(TinyYolov2SizeType={}));var TinyYolov2Options=class{constructor({inputSize,scoreThreshold}={}){this._name="TinyYolov2Options";if(this._inputSize=inputSize||416,this._scoreThreshold=scoreThreshold||.5,typeof this._inputSize!="number"||this._inputSize%32!==0)throw new Error(`${this._name} - expected inputSize to be a number divisible by 32`);if(typeof this._scoreThreshold!="number"||this._scoreThreshold<=0||this._scoreThreshold>=1)throw new Error(`${this._name} - expected scoreThreshold to be a number between 0 and 1`)}get inputSize(){return this._inputSize}get scoreThreshold(){return this._scoreThreshold}},TinyYolov2Base2=class extends NeuralNetwork{constructor(config2){super("TinyYolov2");validateConfig(config2),this._config=config2}get config(){return this._config}get withClassScores(){return this.config.withClassScores||this.config.classes.length>1}get boxEncodingSize(){return 5+(this.withClassScores?this.config.classes.length:0)}runTinyYolov2(x,params){let out=convWithBatchNorm(x,params.conv0);return out=tf39.maxPool(out,[2,2],[2,2],"same"),out=convWithBatchNorm(out,params.conv1),out=tf39.maxPool(out,[2,2],[2,2],"same"),out=convWithBatchNorm(out,params.conv2),out=tf39.maxPool(out,[2,2],[2,2],"same"),out=convWithBatchNorm(out,params.conv3),out=tf39.maxPool(out,[2,2],[2,2],"same"),out=convWithBatchNorm(out,params.conv4),out=tf39.maxPool(out,[2,2],[2,2],"same"),out=convWithBatchNorm(out,params.conv5),out=tf39.maxPool(out,[2,2],[1,1],"same"),out=convWithBatchNorm(out,params.conv6),out=convWithBatchNorm(out,params.conv7),convLayer(out,params.conv8,"valid",!1)}runMobilenet(x,params){let out=this.config.isFirstLayerConv2d?leaky(convLayer(x,params.conv0,"valid",!1)):depthwiseSeparableConv3(x,params.conv0);return out=tf39.maxPool(out,[2,2],[2,2],"same"),out=depthwiseSeparableConv3(out,params.conv1),out=tf39.maxPool(out,[2,2],[2,2],"same"),out=depthwiseSeparableConv3(out,params.conv2),out=tf39.maxPool(out,[2,2],[2,2],"same"),out=depthwiseSeparableConv3(out,params.conv3),out=tf39.maxPool(out,[2,2],[2,2],"same"),out=depthwiseSeparableConv3(out,params.conv4),out=tf39.maxPool(out,[2,2],[2,2],"same"),out=depthwiseSeparableConv3(out,params.conv5),out=tf39.maxPool(out,[2,2],[1,1],"same"),out=params.conv6?depthwiseSeparableConv3(out,params.conv6):out,out=params.conv7?depthwiseSeparableConv3(out,params.conv7):out,convLayer(out,params.conv8,"valid",!1)}forwardInput(input,inputSize){let{params}=this;if(!params)throw new Error("TinyYolov2 - load model before inference");return tf39.tidy(()=>{let batchTensor=tf39.cast(input.toBatchTensor(inputSize,!1),"float32");return batchTensor=this.config.meanRgb?normalize(batchTensor,this.config.meanRgb):batchTensor,batchTensor=batchTensor.div(tf39.scalar(256)),this.config.withSeparableConvs?this.runMobilenet(batchTensor,params):this.runTinyYolov2(batchTensor,params)})}async forward(input,inputSize){return await this.forwardInput(await toNetInput(input),inputSize)}async detect(input,forwardParams={}){let{inputSize,scoreThreshold}=new TinyYolov2Options(forwardParams),netInput=await toNetInput(input),out=await this.forwardInput(netInput,inputSize),out0=tf39.tidy(()=>tf39.unstack(out)[0].expandDims()),inputDimensions={width:netInput.getInputWidth(0),height:netInput.getInputHeight(0)},results=await this.extractBoxes(out0,netInput.getReshapedInputDimensions(0),scoreThreshold);out.dispose(),out0.dispose();let boxes=results.map(res=>res.box),scores=results.map(res=>res.score),classScores=results.map(res=>res.classScore),classNames=results.map(res=>this.config.classes[res.label]),indices=nonMaxSuppression(boxes.map(box=>box.rescale(inputSize)),scores,this.config.iouThreshold,!0),detections=indices.map(idx=>new ObjectDetection(scores[idx],classScores[idx],classNames[idx],boxes[idx],inputDimensions));return detections}getDefaultModelName(){return""}extractParamsFromWeigthMap(weightMap){return extractParamsFromWeigthMap13(weightMap,this.config)}extractParams(weights){let filterSizes=this.config.filterSizes||TinyYolov2Base2.DEFAULT_FILTER_SIZES,numFilters=filterSizes?filterSizes.length:void 0;if(numFilters!==7&&numFilters!==8&&numFilters!==9)throw new Error(`TinyYolov2 - expected 7 | 8 | 9 convolutional filters, but found ${numFilters} filterSizes in config`);return extractParams13(weights,this.config,this.boxEncodingSize,filterSizes)}async extractBoxes(outputTensor,inputBlobDimensions,scoreThreshold){let{width,height}=inputBlobDimensions,inputSize=Math.max(width,height),correctionFactorX=inputSize/width,correctionFactorY=inputSize/height,numCells=outputTensor.shape[1],numBoxes=this.config.anchors.length,[boxesTensor,scoresTensor,classScoresTensor]=tf39.tidy(()=>{let reshaped=outputTensor.reshape([numCells,numCells,numBoxes,this.boxEncodingSize]),boxes=reshaped.slice([0,0,0,0],[numCells,numCells,numBoxes,4]),scores=reshaped.slice([0,0,0,4],[numCells,numCells,numBoxes,1]),classScores=this.withClassScores?tf39.softmax(reshaped.slice([0,0,0,5],[numCells,numCells,numBoxes,this.config.classes.length]),3):tf39.scalar(0);return[boxes,scores,classScores]}),results=[],scoresData=await scoresTensor.array(),boxesData=await boxesTensor.array();for(let row=0;rowscoreThreshold){let ctX=(col+sigmoid(boxesData[row][col][anchor][0]))/numCells*correctionFactorX,ctY=(row+sigmoid(boxesData[row][col][anchor][1]))/numCells*correctionFactorY,width2=Math.exp(boxesData[row][col][anchor][2])*this.config.anchors[anchor].x/numCells*correctionFactorX,height2=Math.exp(boxesData[row][col][anchor][3])*this.config.anchors[anchor].y/numCells*correctionFactorY,x=ctX-width2/2,y=ctY-height2/2,pos={row,col,anchor},{classScore,label}=this.withClassScores?await this.extractPredictedClass(classScoresTensor,pos):{classScore:1,label:0};results.push({box:new BoundingBox(x,y,x+width2,y+height2),score,classScore:score*classScore,label,...pos})}}return boxesTensor.dispose(),scoresTensor.dispose(),classScoresTensor.dispose(),results}async extractPredictedClass(classesTensor,pos){let{row,col,anchor}=pos,classesData=await classesTensor.array();return Array(this.config.classes.length).fill(0).map((_,i)=>classesData[row][col][anchor][i]).map((classScore,label)=>({classScore,label})).reduce((max,curr)=>max.classScore>curr.classScore?max:curr)}},TinyYolov2Base=TinyYolov2Base2;TinyYolov2Base.DEFAULT_FILTER_SIZES=[3,16,32,64,128,256,512,1024,1024];var TinyYolov2=class extends TinyYolov2Base{constructor(withSeparableConvs=!0){let config2=Object.assign({},{withSeparableConvs,iouThreshold:IOU_THRESHOLD,classes:["face"]},withSeparableConvs?{anchors:BOX_ANCHORS_SEPARABLE,meanRgb:MEAN_RGB_SEPARABLE}:{anchors:BOX_ANCHORS,withClassScores:!0});super(config2)}get withSeparableConvs(){return this.config.withSeparableConvs}get anchors(){return this.config.anchors}async locateFaces(input,forwardParams){let objectDetections=await this.detect(input,forwardParams);return objectDetections.map(det=>new FaceDetection(det.score,det.relativeBox,{width:det.imageWidth,height:det.imageHeight}))}getDefaultModelName(){return this.withSeparableConvs?DEFAULT_MODEL_NAME_SEPARABLE_CONV:DEFAULT_MODEL_NAME}extractParamsFromWeigthMap(weightMap){return super.extractParamsFromWeigthMap(weightMap)}};function createTinyYolov2(weights,withSeparableConvs=!0){let net=new TinyYolov2(withSeparableConvs);return net.extractWeights(weights),net}var TinyFaceDetectorOptions=class extends TinyYolov2Options{constructor(){super(...arguments);this._name="TinyFaceDetectorOptions"}},ComposableTask=class{async then(onfulfilled){return onfulfilled(await this.run())}async run(){throw new Error("ComposableTask - run is not implemented")}},tf41=__toModule(require_tfjs_esm()),tf40=__toModule(require_tfjs_esm());async function extractAllFacesAndComputeResults(parentResults,input,computeResults,extractedFaces,getRectForAlignment=({alignedRect})=>alignedRect){let faceBoxes=parentResults.map(parentResult=>isWithFaceLandmarks(parentResult)?getRectForAlignment(parentResult):parentResult.detection),faces=extractedFaces||(input instanceof tf40.Tensor?await extractFaceTensors(input,faceBoxes):await extractFaces(input,faceBoxes)),results=await computeResults(faces);return faces.forEach(f=>f instanceof tf40.Tensor&&f.dispose()),results}async function extractSingleFaceAndComputeResult(parentResult,input,computeResult,extractedFaces,getRectForAlignment){return extractAllFacesAndComputeResults([parentResult],input,async faces=>computeResult(faces[0]),extractedFaces,getRectForAlignment)}var IOU_THRESHOLD2=.4,BOX_ANCHORS2=[new Point(1.603231,2.094468),new Point(6.041143,7.080126),new Point(2.882459,3.518061),new Point(4.266906,5.178857),new Point(9.041765,10.66308)],MEAN_RGB=[117.001,114.697,97.404],TinyFaceDetector=class extends TinyYolov2Base{constructor(){let config2={withSeparableConvs:!0,iouThreshold:IOU_THRESHOLD2,classes:["face"],anchors:BOX_ANCHORS2,meanRgb:MEAN_RGB,isFirstLayerConv2d:!0,filterSizes:[3,16,32,64,128,256,512]};super(config2)}get anchors(){return this.config.anchors}async locateFaces(input,forwardParams){let objectDetections=await this.detect(input,forwardParams);return objectDetections.map(det=>new FaceDetection(det.score,det.relativeBox,{width:det.imageWidth,height:det.imageHeight}))}getDefaultModelName(){return"tiny_face_detector_model"}extractParamsFromWeigthMap(weightMap){return super.extractParamsFromWeigthMap(weightMap)}},nets={ssdMobilenetv1:new SsdMobilenetv1,tinyFaceDetector:new TinyFaceDetector,tinyYolov2:new TinyYolov2,faceLandmark68Net:new FaceLandmark68Net,faceLandmark68TinyNet:new FaceLandmark68TinyNet,faceRecognitionNet:new FaceRecognitionNet,faceExpressionNet:new FaceExpressionNet,ageGenderNet:new AgeGenderNet},ssdMobilenetv1=(input,options)=>nets.ssdMobilenetv1.locateFaces(input,options),tinyFaceDetector=(input,options)=>nets.tinyFaceDetector.locateFaces(input,options),tinyYolov23=(input,options)=>nets.tinyYolov2.locateFaces(input,options),detectFaceLandmarks=input=>nets.faceLandmark68Net.detectLandmarks(input),detectFaceLandmarksTiny=input=>nets.faceLandmark68TinyNet.detectLandmarks(input),computeFaceDescriptor=input=>nets.faceRecognitionNet.computeFaceDescriptor(input),recognizeFaceExpressions=input=>nets.faceExpressionNet.predictExpressions(input),predictAgeAndGender=input=>nets.ageGenderNet.predictAgeAndGender(input),loadSsdMobilenetv1Model=url=>nets.ssdMobilenetv1.load(url),loadTinyFaceDetectorModel=url=>nets.tinyFaceDetector.load(url),loadTinyYolov2Model=url=>nets.tinyYolov2.load(url),loadFaceLandmarkModel=url=>nets.faceLandmark68Net.load(url),loadFaceLandmarkTinyModel=url=>nets.faceLandmark68TinyNet.load(url),loadFaceRecognitionModel=url=>nets.faceRecognitionNet.load(url),loadFaceExpressionModel=url=>nets.faceExpressionNet.load(url),loadAgeGenderModel=url=>nets.ageGenderNet.load(url),loadFaceDetectionModel=loadSsdMobilenetv1Model,locateFaces=ssdMobilenetv1,detectLandmarks=detectFaceLandmarks,PredictFaceExpressionsTaskBase=class extends ComposableTask{constructor(parentTask,input,extractedFaces){super();this.parentTask=parentTask;this.input=input;this.extractedFaces=extractedFaces}},PredictAllFaceExpressionsTask=class extends PredictFaceExpressionsTaskBase{async run(){let parentResults=await this.parentTask,faceExpressionsByFace=await extractAllFacesAndComputeResults(parentResults,this.input,async faces=>await Promise.all(faces.map(face=>nets.faceExpressionNet.predictExpressions(face))),this.extractedFaces);return parentResults.map((parentResult,i)=>extendWithFaceExpressions(parentResult,faceExpressionsByFace[i]))}withAgeAndGender(){return new PredictAllAgeAndGenderTask(this,this.input)}},PredictSingleFaceExpressionsTask=class extends PredictFaceExpressionsTaskBase{async run(){let parentResult=await this.parentTask;if(!parentResult)return;let faceExpressions=await extractSingleFaceAndComputeResult(parentResult,this.input,face=>nets.faceExpressionNet.predictExpressions(face),this.extractedFaces);return extendWithFaceExpressions(parentResult,faceExpressions)}withAgeAndGender(){return new PredictSingleAgeAndGenderTask(this,this.input)}},PredictAllFaceExpressionsWithFaceAlignmentTask=class extends PredictAllFaceExpressionsTask{withAgeAndGender(){return new PredictAllAgeAndGenderWithFaceAlignmentTask(this,this.input)}withFaceDescriptors(){return new ComputeAllFaceDescriptorsTask(this,this.input)}},PredictSingleFaceExpressionsWithFaceAlignmentTask=class extends PredictSingleFaceExpressionsTask{withAgeAndGender(){return new PredictSingleAgeAndGenderWithFaceAlignmentTask(this,this.input)}withFaceDescriptor(){return new ComputeSingleFaceDescriptorTask(this,this.input)}},PredictAgeAndGenderTaskBase=class extends ComposableTask{constructor(parentTask,input,extractedFaces){super();this.parentTask=parentTask;this.input=input;this.extractedFaces=extractedFaces}},PredictAllAgeAndGenderTask=class extends PredictAgeAndGenderTaskBase{async run(){let parentResults=await this.parentTask,ageAndGenderByFace=await extractAllFacesAndComputeResults(parentResults,this.input,async faces=>await Promise.all(faces.map(face=>nets.ageGenderNet.predictAgeAndGender(face))),this.extractedFaces);return parentResults.map((parentResult,i)=>{let{age,gender,genderProbability}=ageAndGenderByFace[i];return extendWithAge(extendWithGender(parentResult,gender,genderProbability),age)})}withFaceExpressions(){return new PredictAllFaceExpressionsTask(this,this.input)}},PredictSingleAgeAndGenderTask=class extends PredictAgeAndGenderTaskBase{async run(){let parentResult=await this.parentTask;if(!parentResult)return;let{age,gender,genderProbability}=await extractSingleFaceAndComputeResult(parentResult,this.input,face=>nets.ageGenderNet.predictAgeAndGender(face),this.extractedFaces);return extendWithAge(extendWithGender(parentResult,gender,genderProbability),age)}withFaceExpressions(){return new PredictSingleFaceExpressionsTask(this,this.input)}},PredictAllAgeAndGenderWithFaceAlignmentTask=class extends PredictAllAgeAndGenderTask{withFaceExpressions(){return new PredictAllFaceExpressionsWithFaceAlignmentTask(this,this.input)}withFaceDescriptors(){return new ComputeAllFaceDescriptorsTask(this,this.input)}},PredictSingleAgeAndGenderWithFaceAlignmentTask=class extends PredictSingleAgeAndGenderTask{withFaceExpressions(){return new PredictSingleFaceExpressionsWithFaceAlignmentTask(this,this.input)}withFaceDescriptor(){return new ComputeSingleFaceDescriptorTask(this,this.input)}},ComputeFaceDescriptorsTaskBase=class extends ComposableTask{constructor(parentTask,input){super();this.parentTask=parentTask;this.input=input}},ComputeAllFaceDescriptorsTask=class extends ComputeFaceDescriptorsTaskBase{async run(){let parentResults=await this.parentTask,descriptors=await extractAllFacesAndComputeResults(parentResults,this.input,faces=>Promise.all(faces.map(face=>nets.faceRecognitionNet.computeFaceDescriptor(face))),null,parentResult=>parentResult.landmarks.align(null,{useDlibAlignment:!0}));return descriptors.map((descriptor,i)=>extendWithFaceDescriptor(parentResults[i],descriptor))}withFaceExpressions(){return new PredictAllFaceExpressionsWithFaceAlignmentTask(this,this.input)}withAgeAndGender(){return new PredictAllAgeAndGenderWithFaceAlignmentTask(this,this.input)}},ComputeSingleFaceDescriptorTask=class extends ComputeFaceDescriptorsTaskBase{async run(){let parentResult=await this.parentTask;if(!parentResult)return;let descriptor=await extractSingleFaceAndComputeResult(parentResult,this.input,face=>nets.faceRecognitionNet.computeFaceDescriptor(face),null,parentResult2=>parentResult2.landmarks.align(null,{useDlibAlignment:!0}));return extendWithFaceDescriptor(parentResult,descriptor)}withFaceExpressions(){return new PredictSingleFaceExpressionsWithFaceAlignmentTask(this,this.input)}withAgeAndGender(){return new PredictSingleAgeAndGenderWithFaceAlignmentTask(this,this.input)}},DetectFaceLandmarksTaskBase=class extends ComposableTask{constructor(parentTask,input,useTinyLandmarkNet){super();this.parentTask=parentTask;this.input=input;this.useTinyLandmarkNet=useTinyLandmarkNet}get landmarkNet(){return this.useTinyLandmarkNet?nets.faceLandmark68TinyNet:nets.faceLandmark68Net}},DetectAllFaceLandmarksTask=class extends DetectFaceLandmarksTaskBase{async run(){let parentResults=await this.parentTask,detections=parentResults.map(res=>res.detection),faces=this.input instanceof tf41.Tensor?await extractFaceTensors(this.input,detections):await extractFaces(this.input,detections),faceLandmarksByFace=await Promise.all(faces.map(face=>this.landmarkNet.detectLandmarks(face)));return faces.forEach(f=>f instanceof tf41.Tensor&&f.dispose()),parentResults.map((parentResult,i)=>extendWithFaceLandmarks(parentResult,faceLandmarksByFace[i]))}withFaceExpressions(){return new PredictAllFaceExpressionsWithFaceAlignmentTask(this,this.input)}withAgeAndGender(){return new PredictAllAgeAndGenderWithFaceAlignmentTask(this,this.input)}withFaceDescriptors(){return new ComputeAllFaceDescriptorsTask(this,this.input)}},DetectSingleFaceLandmarksTask=class extends DetectFaceLandmarksTaskBase{async run(){let parentResult=await this.parentTask;if(!parentResult)return;let{detection}=parentResult,faces=this.input instanceof tf41.Tensor?await extractFaceTensors(this.input,[detection]):await extractFaces(this.input,[detection]),landmarks=await this.landmarkNet.detectLandmarks(faces[0]);return faces.forEach(f=>f instanceof tf41.Tensor&&f.dispose()),extendWithFaceLandmarks(parentResult,landmarks)}withFaceExpressions(){return new PredictSingleFaceExpressionsWithFaceAlignmentTask(this,this.input)}withAgeAndGender(){return new PredictSingleAgeAndGenderWithFaceAlignmentTask(this,this.input)}withFaceDescriptor(){return new ComputeSingleFaceDescriptorTask(this,this.input)}},DetectFacesTaskBase=class extends ComposableTask{constructor(input,options=new SsdMobilenetv1Options){super();this.input=input;this.options=options}},DetectAllFacesTask=class extends DetectFacesTaskBase{async run(){let{input,options}=this,faceDetectionFunction=options instanceof TinyFaceDetectorOptions?input2=>nets.tinyFaceDetector.locateFaces(input2,options):options instanceof SsdMobilenetv1Options?input2=>nets.ssdMobilenetv1.locateFaces(input2,options):options instanceof TinyYolov2Options?input2=>nets.tinyYolov2.locateFaces(input2,options):null;if(!faceDetectionFunction)throw new Error("detectFaces - expected options to be instance of TinyFaceDetectorOptions | SsdMobilenetv1Options | MtcnnOptions | TinyYolov2Options");return faceDetectionFunction(input)}runAndExtendWithFaceDetections(){return new Promise(async res=>{let detections=await this.run();return res(detections.map(detection=>extendWithFaceDetection({},detection)))})}withFaceLandmarks(useTinyLandmarkNet=!1){return new DetectAllFaceLandmarksTask(this.runAndExtendWithFaceDetections(),this.input,useTinyLandmarkNet)}withFaceExpressions(){return new PredictAllFaceExpressionsTask(this.runAndExtendWithFaceDetections(),this.input)}withAgeAndGender(){return new PredictAllAgeAndGenderTask(this.runAndExtendWithFaceDetections(),this.input)}},DetectSingleFaceTask=class extends DetectFacesTaskBase{async run(){let faceDetections=await new DetectAllFacesTask(this.input,this.options),faceDetectionWithHighestScore=faceDetections[0];return faceDetections.forEach(faceDetection=>{faceDetection.score>faceDetectionWithHighestScore.score&&(faceDetectionWithHighestScore=faceDetection)}),faceDetectionWithHighestScore}runAndExtendWithFaceDetection(){return new Promise(async res=>{let detection=await this.run();return res(detection?extendWithFaceDetection({},detection):void 0)})}withFaceLandmarks(useTinyLandmarkNet=!1){return new DetectSingleFaceLandmarksTask(this.runAndExtendWithFaceDetection(),this.input,useTinyLandmarkNet)}withFaceExpressions(){return new PredictSingleFaceExpressionsTask(this.runAndExtendWithFaceDetection(),this.input)}withAgeAndGender(){return new PredictSingleAgeAndGenderTask(this.runAndExtendWithFaceDetection(),this.input)}};function detectSingleFace(input,options=new SsdMobilenetv1Options){return new DetectSingleFaceTask(input,options)}function detectAllFaces(input,options=new SsdMobilenetv1Options){return new DetectAllFacesTask(input,options)}async function allFacesSsdMobilenetv1(input,minConfidence){return console.warn("allFacesSsdMobilenetv1 is deprecated and will be removed soon, use the high level api instead"),await detectAllFaces(input,new SsdMobilenetv1Options(minConfidence?{minConfidence}:{})).withFaceLandmarks().withFaceDescriptors()}async function allFacesTinyYolov2(input,forwardParams={}){return console.warn("allFacesTinyYolov2 is deprecated and will be removed soon, use the high level api instead"),await detectAllFaces(input,new TinyYolov2Options(forwardParams)).withFaceLandmarks().withFaceDescriptors()}var allFaces=allFacesSsdMobilenetv1;function euclideanDistance(arr1,arr2){if(arr1.length!==arr2.length)throw new Error("euclideanDistance: arr1.length !== arr2.length");let desc1=Array.from(arr1),desc2=Array.from(arr2);return Math.sqrt(desc1.map((val,i)=>val-desc2[i]).reduce((res,diff)=>res+Math.pow(diff,2),0))}var FaceMatcher=class{constructor(inputs,distanceThreshold=.6){this._distanceThreshold=distanceThreshold;let inputArray=Array.isArray(inputs)?inputs:[inputs];if(!inputArray.length)throw new Error("FaceRecognizer.constructor - expected atleast one input");let count=1,createUniqueLabel=()=>`person ${count++}`;this._labeledDescriptors=inputArray.map(desc=>{if(desc instanceof LabeledFaceDescriptors)return desc;if(desc instanceof Float32Array)return new LabeledFaceDescriptors(createUniqueLabel(),[desc]);if(desc.descriptor&&desc.descriptor instanceof Float32Array)return new LabeledFaceDescriptors(createUniqueLabel(),[desc.descriptor]);throw new Error("FaceRecognizer.constructor - expected inputs to be of type LabeledFaceDescriptors | WithFaceDescriptor | Float32Array | Array | Float32Array>")})}get labeledDescriptors(){return this._labeledDescriptors}get distanceThreshold(){return this._distanceThreshold}computeMeanDistance(queryDescriptor,descriptors){return descriptors.map(d=>euclideanDistance(d,queryDescriptor)).reduce((d1,d2)=>d1+d2,0)/(descriptors.length||1)}matchDescriptor(queryDescriptor){return this.labeledDescriptors.map(({descriptors,label})=>new FaceMatch(label,this.computeMeanDistance(queryDescriptor,descriptors))).reduce((best,curr)=>best.distanceld.toJSON())}}static fromJSON(json){let labeledDescriptors=json.labeledDescriptors.map(ld=>LabeledFaceDescriptors.fromJSON(ld));return new FaceMatcher(labeledDescriptors,json.distanceThreshold)}};function createTinyFaceDetector(weights){let net=new TinyFaceDetector;return net.extractWeights(weights),net}function resizeResults(results,dimensions){let{width,height}=new Dimensions(dimensions.width,dimensions.height);if(width<=0||height<=0)throw new Error(`resizeResults - invalid dimensions: ${JSON.stringify({width,height})}`);if(Array.isArray(results))return results.map(obj=>resizeResults(obj,{width,height}));if(isWithFaceLandmarks(results)){let resizedDetection=results.detection.forSize(width,height),resizedLandmarks=results.unshiftedLandmarks.forSize(resizedDetection.box.width,resizedDetection.box.height);return extendWithFaceLandmarks(extendWithFaceDetection(results,resizedDetection),resizedLandmarks)}return isWithFaceDetection(results)?extendWithFaceDetection(results,results.detection.forSize(width,height)):results instanceof FaceLandmarks||results instanceof FaceDetection?results.forSize(width,height):results}var version="0.9.3",node=typeof process!="undefined",browser3=typeof navigator!="undefined"&&typeof navigator.userAgent!="undefined",version2={faceapi:version,node,browser:browser3};export{AgeGenderNet,BoundingBox,Box,ComposableTask,ComputeAllFaceDescriptorsTask,ComputeFaceDescriptorsTaskBase,ComputeSingleFaceDescriptorTask,DetectAllFaceLandmarksTask,DetectAllFacesTask,DetectFaceLandmarksTaskBase,DetectFacesTaskBase,DetectSingleFaceLandmarksTask,DetectSingleFaceTask,Dimensions,FACE_EXPRESSION_LABELS,FaceDetection,FaceDetectionNet,FaceExpressionNet,FaceExpressions,FaceLandmark68Net,FaceLandmark68TinyNet,FaceLandmarkNet,FaceLandmarks,FaceLandmarks5,FaceLandmarks68,FaceMatch,FaceMatcher,FaceRecognitionNet,Gender,LabeledBox,LabeledFaceDescriptors,NetInput,NeuralNetwork,ObjectDetection,Point,PredictedBox,Rect,SsdMobilenetv1,SsdMobilenetv1Options,TinyFaceDetector,TinyFaceDetectorOptions,TinyYolov2,TinyYolov2Options,TinyYolov2SizeType,allFaces,allFacesSsdMobilenetv1,allFacesTinyYolov2,awaitMediaLoaded,bufferToImage,computeFaceDescriptor,createCanvas,createCanvasFromMedia,createFaceDetectionNet,createFaceRecognitionNet,createSsdMobilenetv1,createTinyFaceDetector,createTinyYolov2,detectAllFaces,detectFaceLandmarks,detectFaceLandmarksTiny,detectLandmarks,detectSingleFace,draw_exports as draw,env,euclideanDistance,extendWithAge,extendWithFaceDescriptor,extendWithFaceDetection,extendWithFaceExpressions,extendWithFaceLandmarks,extendWithGender,extractFaceTensors,extractFaces,fetchImage,fetchJson,fetchNetWeights,fetchOrThrow,getContext2dOrThrow,getMediaDimensions,imageTensorToCanvas,imageToSquare,inverseSigmoid,iou,isMediaElement,isMediaLoaded,isWithAge,isWithFaceDetection,isWithFaceExpressions,isWithFaceLandmarks,isWithGender,loadAgeGenderModel,loadFaceDetectionModel,loadFaceExpressionModel,loadFaceLandmarkModel,loadFaceLandmarkTinyModel,loadFaceRecognitionModel,loadSsdMobilenetv1Model,loadTinyFaceDetectorModel,loadTinyYolov2Model,loadWeightMap,locateFaces,matchDimensions,minBbox,nets,nonMaxSuppression,normalize,padToSquare,predictAgeAndGender,recognizeFaceExpressions,resizeResults,resolveInput,shuffleArray,sigmoid,ssdMobilenetv1,tf42 as tf,tinyFaceDetector,tinyYolov23 as tinyYolov2,toNetInput,utils_exports as utils,validateConfig,version2 as version};
+var __create=Object.create,__defProp=Object.defineProperty,__getProtoOf=Object.getPrototypeOf,__hasOwnProp=Object.prototype.hasOwnProperty,__getOwnPropNames=Object.getOwnPropertyNames,__getOwnPropDesc=Object.getOwnPropertyDescriptor,__markAsModule=target=>__defProp(target,"__esModule",{value:!0}),__commonJS=(callback,module)=>()=>(module||(module={exports:{}},callback(module.exports,module)),module.exports),__export=(target,all)=>{__markAsModule(target);for(var name in all)__defProp(target,name,{get:all[name],enumerable:!0})},__exportStar=(target,module,desc)=>{if(__markAsModule(target),module&&typeof module=="object"||typeof module=="function")for(let key of __getOwnPropNames(module))!__hasOwnProp.call(target,key)&&key!=="default"&&__defProp(target,key,{get:()=>module[key],enumerable:!(desc=__getOwnPropDesc(module,key))||desc.enumerable});return target},__toModule=module=>module&&module.__esModule?module:__exportStar(__defProp(module!=null?__create(__getProtoOf(module)):{},"default",{value:module,enumerable:!0}),module);import*as dist_star from"@tensorflow/tfjs/dist/index.js";import*as tfjs_backend_wasm_star from"@tensorflow/tfjs-backend-wasm";var require_tfjs_esm=__commonJS(exports=>{__exportStar(exports,dist_star);__exportStar(exports,tfjs_backend_wasm_star)}),require_isNodejs=__commonJS((exports,module)=>{__export(exports,{isNodejs:()=>isNodejs3});function isNodejs3(){return typeof global=="object"&&!0&&typeof module!="undefined"&&typeof process!="undefined"&&!!process.version}}),tf42=__toModule(require_tfjs_esm()),draw_exports={};__export(draw_exports,{AnchorPosition:()=>AnchorPosition,DrawBox:()=>DrawBox,DrawBoxOptions:()=>DrawBoxOptions,DrawFaceLandmarks:()=>DrawFaceLandmarks,DrawFaceLandmarksOptions:()=>DrawFaceLandmarksOptions,DrawTextField:()=>DrawTextField,DrawTextFieldOptions:()=>DrawTextFieldOptions,drawContour:()=>drawContour,drawDetections:()=>drawDetections,drawFaceExpressions:()=>drawFaceExpressions,drawFaceLandmarks:()=>drawFaceLandmarks});function drawContour(ctx,points,isClosed=!1){if(ctx.beginPath(),points.slice(1).forEach(({x,y},prevIdx)=>{let from=points[prevIdx];ctx.moveTo(from.x,from.y),ctx.lineTo(x,y)}),isClosed){let from=points[points.length-1],to=points[0];if(!from||!to)return;ctx.moveTo(from.x,from.y),ctx.lineTo(to.x,to.y)}ctx.stroke()}var utils_exports={};__export(utils_exports,{computeReshapedDimensions:()=>computeReshapedDimensions,getCenterPoint:()=>getCenterPoint,isDimensions:()=>isDimensions,isEven:()=>isEven,isFloat:()=>isFloat,isTensor:()=>isTensor,isTensor1D:()=>isTensor1D,isTensor2D:()=>isTensor2D,isTensor3D:()=>isTensor3D,isTensor4D:()=>isTensor4D,isValidNumber:()=>isValidNumber,isValidProbablitiy:()=>isValidProbablitiy,range:()=>range,round:()=>round});var tf=__toModule(require_tfjs_esm()),Dimensions=class{constructor(width,height){if(!isValidNumber(width)||!isValidNumber(height))throw new Error(`Dimensions.constructor - expected width and height to be valid numbers, instead have ${JSON.stringify({width,height})}`);this._width=width,this._height=height}get width(){return this._width}get height(){return this._height}reverse(){return new Dimensions(1/this.width,1/this.height)}};function isTensor(tensor2,dim){return tensor2 instanceof tf.Tensor&&tensor2.shape.length===dim}function isTensor1D(tensor2){return isTensor(tensor2,1)}function isTensor2D(tensor2){return isTensor(tensor2,2)}function isTensor3D(tensor2){return isTensor(tensor2,3)}function isTensor4D(tensor2){return isTensor(tensor2,4)}function isFloat(num){return num%1!==0}function isEven(num){return num%2===0}function round(num,prec=2){let f=Math.pow(10,prec);return Math.floor(num*f)/f}function isDimensions(obj){return obj&&obj.width&&obj.height}function computeReshapedDimensions({width,height},inputSize){let scale2=inputSize/Math.max(height,width);return new Dimensions(Math.round(width*scale2),Math.round(height*scale2))}function getCenterPoint(pts){return pts.reduce((sum,pt)=>sum.add(pt),new Point(0,0)).div(new Point(pts.length,pts.length))}function range(num,start,step){return Array(num).fill(0).map((_,i)=>start+i*step)}function isValidNumber(num){return!!num&&num!==Infinity&&num!==-Infinity&&!isNaN(num)||num===0}function isValidProbablitiy(num){return isValidNumber(num)&&0<=num&&num<=1}var Point=class{constructor(x,y){this._x=x,this._y=y}get x(){return this._x}get y(){return this._y}add(pt){return new Point(this.x+pt.x,this.y+pt.y)}sub(pt){return new Point(this.x-pt.x,this.y-pt.y)}mul(pt){return new Point(this.x*pt.x,this.y*pt.y)}div(pt){return new Point(this.x/pt.x,this.y/pt.y)}abs(){return new Point(Math.abs(this.x),Math.abs(this.y))}magnitude(){return Math.sqrt(Math.pow(this.x,2)+Math.pow(this.y,2))}floor(){return new Point(Math.floor(this.x),Math.floor(this.y))}},Box=class{static isRect(rect){return!!rect&&[rect.x,rect.y,rect.width,rect.height].every(isValidNumber)}static assertIsValidBox(box,callee,allowNegativeDimensions=!1){if(!Box.isRect(box))throw new Error(`${callee} - invalid box: ${JSON.stringify(box)}, expected object with properties x, y, width, height`);if(!allowNegativeDimensions&&(box.width<0||box.height<0))throw new Error(`${callee} - width (${box.width}) and height (${box.height}) must be positive numbers`)}constructor(_box,allowNegativeDimensions=!0){let box=_box||{},isBbox=[box.left,box.top,box.right,box.bottom].every(isValidNumber),isRect=[box.x,box.y,box.width,box.height].every(isValidNumber);if(!isRect&&!isBbox)throw new Error(`Box.constructor - expected box to be IBoundingBox | IRect, instead have ${JSON.stringify(box)}`);let[x,y,width,height]=isRect?[box.x,box.y,box.width,box.height]:[box.left,box.top,box.right-box.left,box.bottom-box.top];Box.assertIsValidBox({x,y,width,height},"Box.constructor",allowNegativeDimensions),this._x=x,this._y=y,this._width=width,this._height=height}get x(){return this._x}get y(){return this._y}get width(){return this._width}get height(){return this._height}get left(){return this.x}get top(){return this.y}get right(){return this.x+this.width}get bottom(){return this.y+this.height}get area(){return this.width*this.height}get topLeft(){return new Point(this.left,this.top)}get topRight(){return new Point(this.right,this.top)}get bottomLeft(){return new Point(this.left,this.bottom)}get bottomRight(){return new Point(this.right,this.bottom)}round(){let[x,y,width,height]=[this.x,this.y,this.width,this.height].map(val=>Math.round(val));return new Box({x,y,width,height})}floor(){let[x,y,width,height]=[this.x,this.y,this.width,this.height].map(val=>Math.floor(val));return new Box({x,y,width,height})}toSquare(){let{x,y,width,height}=this,diff=Math.abs(width-height);return widthimageWidth&&(edx=-ex+imageWidth+w,ex=imageWidth),ey>imageHeight&&(edy=-ey+imageHeight+h,ey=imageHeight),x<1&&(edy=2-x,x=1),y<1&&(edy=2-y,y=1),{dy,edy,dx,edx,y,ey,x,ex,w,h}}calibrate(region){return new Box({left:this.left+region.left*this.width,top:this.top+region.top*this.height,right:this.right+region.right*this.width,bottom:this.bottom+region.bottom*this.height}).toSquare().round()}},BoundingBox=class extends Box{constructor(left,top,right,bottom,allowNegativeDimensions=!1){super({left,top,right,bottom},allowNegativeDimensions)}};var ObjectDetection=class{constructor(score,classScore,className,relativeBox,imageDims){this._imageDims=new Dimensions(imageDims.width,imageDims.height),this._score=score,this._classScore=classScore,this._className=className,this._box=new Box(relativeBox).rescale(this._imageDims)}get score(){return this._score}get classScore(){return this._classScore}get className(){return this._className}get box(){return this._box}get imageDims(){return this._imageDims}get imageWidth(){return this.imageDims.width}get imageHeight(){return this.imageDims.height}get relativeBox(){return new Box(this._box).rescale(this.imageDims.reverse())}forSize(width,height){return new ObjectDetection(this.score,this.classScore,this.className,this.relativeBox,{width,height})}},FaceDetection=class extends ObjectDetection{constructor(score,relativeBox,imageDims){super(score,score,"",relativeBox,imageDims)}forSize(width,height){let{score,relativeBox,imageDims}=super.forSize(width,height);return new FaceDetection(score,relativeBox,imageDims)}};function iou(box1,box2,isIOU=!0){let width=Math.max(0,Math.min(box1.right,box2.right)-Math.max(box1.left,box2.left)),height=Math.max(0,Math.min(box1.bottom,box2.bottom)-Math.max(box1.top,box2.top)),interSection=width*height;return isIOU?interSection/(box1.area+box2.area-interSection):interSection/Math.min(box1.area,box2.area)}function minBbox(pts){let xs=pts.map(pt=>pt.x),ys=pts.map(pt=>pt.y),minX=xs.reduce((min,x)=>xymaxmax({score,boxIndex})).sort((c1,c2)=>c1.score-c2.score).map(c=>c.boxIndex),pick=[];for(;indicesSortedByScore.length>0;){let curr=indicesSortedByScore.pop();pick.push(curr);let indices=indicesSortedByScore,outputs=[];for(let i=0;ioutputs[j]<=iouThreshold)}return pick}var tf2=__toModule(require_tfjs_esm());function normalize(x,meanRgb){return tf2.tidy(()=>{let[r,g,b]=meanRgb,avg_r=tf2.fill([...x.shape.slice(0,3),1],r,"float32"),avg_g=tf2.fill([...x.shape.slice(0,3),1],g,"float32"),avg_b=tf2.fill([...x.shape.slice(0,3),1],b,"float32"),avg_rgb=tf2.concat([avg_r,avg_g,avg_b],3);return tf2.sub(x,avg_rgb)})}var tf3=__toModule(require_tfjs_esm());function padToSquare(imgTensor,isCenterImage=!1){return tf3.tidy(()=>{let[height,width]=imgTensor.shape.slice(1);if(height===width)return imgTensor;let dimDiff=Math.abs(height-width),paddingAmount=Math.round(dimDiff*(isCenterImage?.5:1)),paddingAxis=height>width?2:1,createPaddingTensor=paddingAmount2=>{let paddingTensorShape=imgTensor.shape.slice();return paddingTensorShape[paddingAxis]=paddingAmount2,tf3.fill(paddingTensorShape,0,"float32")},paddingTensorAppend=createPaddingTensor(paddingAmount),remainingPaddingAmount=dimDiff-paddingTensorAppend.shape[paddingAxis],paddingTensorPrepend=isCenterImage&&remainingPaddingAmount?createPaddingTensor(remainingPaddingAmount):null,tensorsToStack=[paddingTensorPrepend,imgTensor,paddingTensorAppend].filter(t=>!!t).map(t=>tf3.cast(t,"float32"));return tf3.concat(tensorsToStack,paddingAxis)})}function shuffleArray(inputArray){let array=inputArray.slice();for(let i=array.length-1;i>0;i--){let j=Math.floor(Math.random()*(i+1)),x=array[i];array[i]=array[j],array[j]=x}return array}function sigmoid(x){return 1/(1+Math.exp(-x))}function inverseSigmoid(x){return Math.log(x/(1-x))}var Rect=class extends Box{constructor(x,y,width,height,allowNegativeDimensions=!1){super({x,y,width,height},allowNegativeDimensions)}},relX=.5,relY=.43,relScale=.45,FaceLandmarks=class{constructor(relativeFaceLandmarkPositions,imgDims,shift=new Point(0,0)){let{width,height}=imgDims;this._imgDims=new Dimensions(width,height),this._shift=shift,this._positions=relativeFaceLandmarkPositions.map(pt=>pt.mul(new Point(width,height)).add(shift))}get shift(){return new Point(this._shift.x,this._shift.y)}get imageWidth(){return this._imgDims.width}get imageHeight(){return this._imgDims.height}get positions(){return this._positions}get relativePositions(){return this._positions.map(pt=>pt.sub(this._shift).div(new Point(this.imageWidth,this.imageHeight)))}forSize(width,height){return new this.constructor(this.relativePositions,{width,height})}shiftBy(x,y){return new this.constructor(this.relativePositions,this._imgDims,new Point(x,y))}shiftByPoint(pt){return this.shiftBy(pt.x,pt.y)}align(detection,options={}){if(detection){let box=detection instanceof FaceDetection?detection.box.floor():new Box(detection);return this.shiftBy(box.x,box.y).align(null,options)}let{useDlibAlignment,minBoxPadding}=Object.assign({},{useDlibAlignment:!1,minBoxPadding:.2},options);return useDlibAlignment?this.alignDlib():this.alignMinBbox(minBoxPadding)}alignDlib(){let centers=this.getRefPointsForAlignment(),[leftEyeCenter,rightEyeCenter,mouthCenter]=centers,distToMouth=pt=>mouthCenter.sub(pt).magnitude(),eyeToMouthDist=(distToMouth(leftEyeCenter)+distToMouth(rightEyeCenter))/2,size=Math.floor(eyeToMouthDist/relScale),refPoint=getCenterPoint(centers),x=Math.floor(Math.max(0,refPoint.x-relX*size)),y=Math.floor(Math.max(0,refPoint.y-relY*size));return new Rect(x,y,Math.min(size,this.imageWidth+x),Math.min(size,this.imageHeight+y))}alignMinBbox(padding){let box=minBbox(this.positions);return box.pad(box.width*padding,box.height*padding)}getRefPointsForAlignment(){throw new Error("getRefPointsForAlignment not implemented by base class")}};var FaceLandmarks5=class extends FaceLandmarks{getRefPointsForAlignment(){let pts=this.positions;return[pts[0],pts[1],getCenterPoint([pts[3],pts[4]])]}};var FaceLandmarks68=class extends FaceLandmarks{getJawOutline(){return this.positions.slice(0,17)}getLeftEyeBrow(){return this.positions.slice(17,22)}getRightEyeBrow(){return this.positions.slice(22,27)}getNose(){return this.positions.slice(27,36)}getLeftEye(){return this.positions.slice(36,42)}getRightEye(){return this.positions.slice(42,48)}getMouth(){return this.positions.slice(48,68)}getRefPointsForAlignment(){return[this.getLeftEye(),this.getRightEye(),this.getMouth()].map(getCenterPoint)}};var FaceMatch=class{constructor(label,distance){this._label=label,this._distance=distance}get label(){return this._label}get distance(){return this._distance}toString(withDistance=!0){return`${this.label}${withDistance?` (${round(this.distance)})`:""}`}};var LabeledBox=class extends Box{static assertIsValidLabeledBox(box,callee){if(Box.assertIsValidBox(box,callee),!isValidNumber(box.label))throw new Error(`${callee} - expected property label (${box.label}) to be a number`)}constructor(box,label){super(box);this._label=label}get label(){return this._label}};var LabeledFaceDescriptors=class{constructor(label,descriptors){if(!(typeof label=="string"))throw new Error("LabeledFaceDescriptors - constructor expected label to be a string");if(!Array.isArray(descriptors)||descriptors.some(desc=>!(desc instanceof Float32Array)))throw new Error("LabeledFaceDescriptors - constructor expected descriptors to be an array of Float32Array");this._label=label,this._descriptors=descriptors}get label(){return this._label}get descriptors(){return this._descriptors}toJSON(){return{label:this.label,descriptors:this.descriptors.map(d=>Array.from(d))}}static fromJSON(json){let descriptors=json.descriptors.map(d=>new Float32Array(d));return new LabeledFaceDescriptors(json.label,descriptors)}};var PredictedBox=class extends LabeledBox{static assertIsValidPredictedBox(box,callee){if(LabeledBox.assertIsValidLabeledBox(box,callee),!isValidProbablitiy(box.score)||!isValidProbablitiy(box.classScore))throw new Error(`${callee} - expected properties score (${box.score}) and (${box.classScore}) to be a number between [0, 1]`)}constructor(box,label,score,classScore){super(box,label);this._score=score,this._classScore=classScore}get score(){return this._score}get classScore(){return this._classScore}};function isWithFaceDetection(obj){return obj.detection instanceof FaceDetection}function extendWithFaceDetection(sourceObj,detection){let extension={detection};return Object.assign({},sourceObj,extension)}function createBrowserEnv(){let fetch=window.fetch||function(){throw new Error("fetch - missing fetch implementation for browser environment")},readFile=function(){throw new Error("readFile - filesystem not available for browser environment")};return{Canvas:HTMLCanvasElement,CanvasRenderingContext2D,Image:HTMLImageElement,ImageData,Video:HTMLVideoElement,createCanvasElement:()=>document.createElement("canvas"),createImageElement:()=>document.createElement("img"),fetch,readFile}}function createFileSystem(fs){let requireFsError="";if(!fs)try{fs=require("fs")}catch(err){requireFsError=err.toString()}let readFile=fs?function(filePath){return new Promise((res,rej)=>{fs.readFile(filePath,function(err,buffer){return err?rej(err):res(buffer)})})}:function(){throw new Error(`readFile - failed to require fs in nodejs environment with error: ${requireFsError}`)};return{readFile}}function createNodejsEnv(){let Canvas=global.Canvas||global.HTMLCanvasElement,Image=global.Image||global.HTMLImageElement,createCanvasElement=function(){if(Canvas)return new Canvas;throw new Error("createCanvasElement - missing Canvas implementation for nodejs environment")},createImageElement=function(){if(Image)return new Image;throw new Error("createImageElement - missing Image implementation for nodejs environment")},fetch=global.fetch||function(){throw new Error("fetch - missing fetch implementation for nodejs environment")},fileSystem=createFileSystem();return{Canvas:Canvas||class{},CanvasRenderingContext2D:global.CanvasRenderingContext2D||class{},Image:Image||class{},ImageData:global.ImageData||class{},Video:global.HTMLVideoElement||class{},createCanvasElement,createImageElement,fetch,...fileSystem}}function isBrowser(){return typeof window=="object"&&typeof document!="undefined"&&typeof HTMLImageElement!="undefined"&&typeof HTMLCanvasElement!="undefined"&&typeof HTMLVideoElement!="undefined"&&typeof ImageData!="undefined"&&typeof CanvasRenderingContext2D!="undefined"}var isNodejs=__toModule(require_isNodejs()),environment;function getEnv(){if(!environment)throw new Error("getEnv - environment is not defined, check isNodejs() and isBrowser()");return environment}function setEnv(env16){environment=env16}function initialize(){if(isBrowser())return setEnv(createBrowserEnv());if(isNodejs.isNodejs())return setEnv(createNodejsEnv())}function monkeyPatch(env16){if(environment||initialize(),!environment)throw new Error("monkeyPatch - environment is not defined, check isNodejs() and isBrowser()");let{Canvas=environment.Canvas,Image=environment.Image}=env16;environment.Canvas=Canvas,environment.Image=Image,environment.createCanvasElement=env16.createCanvasElement||(()=>new Canvas),environment.createImageElement=env16.createImageElement||(()=>new Image),environment.ImageData=env16.ImageData||environment.ImageData,environment.Video=env16.Video||environment.Video,environment.fetch=env16.fetch||environment.fetch,environment.readFile=env16.readFile||environment.readFile}var env={getEnv,setEnv,initialize,createBrowserEnv,createFileSystem,createNodejsEnv,monkeyPatch,isBrowser,isNodejs:isNodejs.isNodejs};initialize();function resolveInput(arg){return!env.isNodejs()&&typeof arg=="string"?document.getElementById(arg):arg}function getContext2dOrThrow(canvasArg){let{Canvas,CanvasRenderingContext2D:CanvasRenderingContext2D2}=env.getEnv();if(canvasArg instanceof CanvasRenderingContext2D2)return canvasArg;let canvas=resolveInput(canvasArg);if(!(canvas instanceof Canvas))throw new Error("resolveContext2d - expected canvas to be of instance of Canvas");let ctx=canvas.getContext("2d");if(!ctx)throw new Error("resolveContext2d - canvas 2d context is null");return ctx}var AnchorPosition;(function(AnchorPosition2){AnchorPosition2.TOP_LEFT="TOP_LEFT",AnchorPosition2.TOP_RIGHT="TOP_RIGHT",AnchorPosition2.BOTTOM_LEFT="BOTTOM_LEFT",AnchorPosition2.BOTTOM_RIGHT="BOTTOM_RIGHT"})(AnchorPosition||(AnchorPosition={}));var DrawTextFieldOptions=class{constructor(options={}){let{anchorPosition,backgroundColor,fontColor,fontSize,fontStyle,padding}=options;this.anchorPosition=anchorPosition||AnchorPosition.TOP_LEFT,this.backgroundColor=backgroundColor||"rgba(0, 0, 0, 0.5)",this.fontColor=fontColor||"rgba(255, 255, 255, 1)",this.fontSize=fontSize||14,this.fontStyle=fontStyle||"Georgia",this.padding=padding||4}},DrawTextField=class{constructor(text,anchor,options={}){this.text=typeof text=="string"?[text]:text instanceof DrawTextField?text.text:text,this.anchor=anchor,this.options=new DrawTextFieldOptions(options)}measureWidth(ctx){let{padding}=this.options;return this.text.map(l=>ctx.measureText(l).width).reduce((w0,w1)=>w0{let x=padding+upperLeft.x,y=padding+upperLeft.y+(i+1)*fontSize;ctx.fillText(textLine,x,y)})}},DrawBoxOptions=class{constructor(options={}){let{boxColor,lineWidth,label,drawLabelOptions}=options;this.boxColor=boxColor||"rgba(0, 0, 255, 1)",this.lineWidth=lineWidth||2,this.label=label;let defaultDrawLabelOptions={anchorPosition:AnchorPosition.BOTTOM_LEFT,backgroundColor:this.boxColor};this.drawLabelOptions=new DrawTextFieldOptions(Object.assign({},defaultDrawLabelOptions,drawLabelOptions))}},DrawBox=class{constructor(box,options={}){this.box=new Box(box),this.options=new DrawBoxOptions(options)}draw(canvasArg){let ctx=getContext2dOrThrow(canvasArg),{boxColor,lineWidth}=this.options,{x,y,width,height}=this.box;ctx.strokeStyle=boxColor,ctx.lineWidth=lineWidth,ctx.strokeRect(x,y,width,height);let{label}=this.options;label&&new DrawTextField([label],{x:x-lineWidth/2,y},this.options.drawLabelOptions).draw(canvasArg)}};function drawDetections(canvasArg,detections){let detectionsArray=Array.isArray(detections)?detections:[detections];detectionsArray.forEach(det=>{let score=det instanceof FaceDetection?det.score:isWithFaceDetection(det)?det.detection.score:void 0,box=det instanceof FaceDetection?det.box:isWithFaceDetection(det)?det.detection.box:new Box(det),label=score?`${round(score)}`:void 0;new DrawBox(box,{label}).draw(canvasArg)})}var tf18=__toModule(require_tfjs_esm());function isMediaLoaded(media){let{Image,Video}=env.getEnv();return media instanceof Image&&media.complete||media instanceof Video&&media.readyState>=3}function awaitMediaLoaded(media){return new Promise((resolve,reject)=>{if(media instanceof env.getEnv().Canvas||isMediaLoaded(media))return resolve(null);function onLoad(e){if(!e.currentTarget)return;e.currentTarget.removeEventListener("load",onLoad),e.currentTarget.removeEventListener("error",onError),resolve(e)}function onError(e){if(!e.currentTarget)return;e.currentTarget.removeEventListener("load",onLoad),e.currentTarget.removeEventListener("error",onError),reject(e)}media.addEventListener("load",onLoad),media.addEventListener("error",onError)})}function bufferToImage(buf){return new Promise((resolve,reject)=>{if(!(buf instanceof Blob))return reject("bufferToImage - expected buf to be of type: Blob");let reader=new FileReader;reader.onload=()=>{if(typeof reader.result!="string")return reject("bufferToImage - expected reader.result to be a string, in onload");let img=env.getEnv().createImageElement();img.onload=()=>resolve(img),img.onerror=reject,img.src=reader.result},reader.onerror=reject,reader.readAsDataURL(buf)})}function getMediaDimensions(input){let{Image,Video}=env.getEnv();return input instanceof Image?new Dimensions(input.naturalWidth,input.naturalHeight):input instanceof Video?new Dimensions(input.videoWidth,input.videoHeight):new Dimensions(input.width,input.height)}function createCanvas({width,height}){let{createCanvasElement}=env.getEnv(),canvas=createCanvasElement();return canvas.width=width,canvas.height=height,canvas}function createCanvasFromMedia(media,dims){let{ImageData:ImageData2}=env.getEnv();if(!(media instanceof ImageData2)&&!isMediaLoaded(media))throw new Error("createCanvasFromMedia - media has not finished loading yet");let{width,height}=dims||getMediaDimensions(media),canvas=createCanvas({width,height});return media instanceof ImageData2?getContext2dOrThrow(canvas).putImageData(media,0,0):getContext2dOrThrow(canvas).drawImage(media,0,0,width,height),canvas}var tf4=__toModule(require_tfjs_esm());async function imageTensorToCanvas(imgTensor,canvas){let targetCanvas=canvas||env.getEnv().createCanvasElement(),[height,width,numChannels]=imgTensor.shape.slice(isTensor4D(imgTensor)?1:0),imgTensor3D=tf4.tidy(()=>imgTensor.as3D(height,width,numChannels).toInt());return await tf4.browser.toPixels(imgTensor3D,targetCanvas),imgTensor3D.dispose(),targetCanvas}function isMediaElement(input){let{Image,Canvas,Video}=env.getEnv();return input instanceof Image||input instanceof Canvas||input instanceof Video}var tf5=__toModule(require_tfjs_esm());function imageToSquare(input,inputSize,centerImage=!1){let{Image,Canvas}=env.getEnv();if(!(input instanceof Image||input instanceof Canvas))throw new Error("imageToSquare - expected arg0 to be HTMLImageElement | HTMLCanvasElement");let dims=getMediaDimensions(input),scale2=inputSize/Math.max(dims.height,dims.width),width=scale2*dims.width,height=scale2*dims.height,targetCanvas=createCanvas({width:inputSize,height:inputSize}),inputCanvas=input instanceof Canvas?input:createCanvasFromMedia(input),offset=Math.abs(width-height)/2,dx=centerImage&&width{if(isTensor3D(input)){this._imageTensors[idx]=input,this._inputDimensions[idx]=input.shape;return}if(isTensor4D(input)){let batchSize=input.shape[0];if(batchSize!==1)throw new Error(`NetInput - tf.Tensor4D with batchSize ${batchSize} passed, but not supported in input array`);this._imageTensors[idx]=input,this._inputDimensions[idx]=input.shape.slice(1);return}let canvas=input instanceof env.getEnv().Canvas?input:createCanvasFromMedia(input);this._canvases[idx]=canvas,this._inputDimensions[idx]=[canvas.height,canvas.width,3]})}get imageTensors(){return this._imageTensors}get canvases(){return this._canvases}get isBatchInput(){return this.batchSize>1||this._treatAsBatchInput}get batchSize(){return this._batchSize}get inputDimensions(){return this._inputDimensions}get inputSize(){return this._inputSize}get reshapedInputDimensions(){return range(this.batchSize,0,1).map((_,batchIdx)=>this.getReshapedInputDimensions(batchIdx))}getInput(batchIdx){return this.canvases[batchIdx]||this.imageTensors[batchIdx]}getInputDimensions(batchIdx){return this._inputDimensions[batchIdx]}getInputHeight(batchIdx){return this._inputDimensions[batchIdx][0]}getInputWidth(batchIdx){return this._inputDimensions[batchIdx][1]}getReshapedInputDimensions(batchIdx){if(typeof this.inputSize!="number")throw new Error("getReshapedInputDimensions - inputSize not set, toBatchTensor has not been called yet");let width=this.getInputWidth(batchIdx),height=this.getInputHeight(batchIdx);return computeReshapedDimensions({width,height},this.inputSize)}toBatchTensor(inputSize,isCenterInputs=!0){return this._inputSize=inputSize,tf5.tidy(()=>{let inputTensors=range(this.batchSize,0,1).map(batchIdx=>{let input=this.getInput(batchIdx);if(input instanceof tf5.Tensor){let imgTensor=isTensor4D(input)?input:input.expandDims();return imgTensor=padToSquare(imgTensor,isCenterInputs),(imgTensor.shape[1]!==inputSize||imgTensor.shape[2]!==inputSize)&&(imgTensor=tf5.image.resizeBilinear(imgTensor,[inputSize,inputSize])),imgTensor.as3D(inputSize,inputSize,3)}if(input instanceof env.getEnv().Canvas)return tf5.browser.fromPixels(imageToSquare(input,inputSize,isCenterInputs));throw new Error(`toBatchTensor - at batchIdx ${batchIdx}, expected input to be instanceof tf.Tensor or instanceof HTMLCanvasElement, instead have ${input}`)}),batchTensor=tf5.stack(inputTensors.map(t=>tf5.cast(t,"float32"))).as4D(this.batchSize,inputSize,inputSize,3);return batchTensor})}};async function toNetInput(inputs){if(inputs instanceof NetInput)return inputs;let inputArgArray=Array.isArray(inputs)?inputs:[inputs];if(!inputArgArray.length)throw new Error("toNetInput - empty array passed as input");let getIdxHint=idx=>Array.isArray(inputs)?` at input index ${idx}:`:"",inputArray=inputArgArray.map(resolveInput);return inputArray.forEach((input,i)=>{if(!isMediaElement(input)&&!isTensor3D(input)&&!isTensor4D(input))throw typeof inputArgArray[i]=="string"?new Error(`toNetInput -${getIdxHint(i)} string passed, but could not resolve HTMLElement for element id ${inputArgArray[i]}`):new Error(`toNetInput -${getIdxHint(i)} expected media to be of type HTMLImageElement | HTMLVideoElement | HTMLCanvasElement | tf.Tensor3D, or to be an element id`);if(isTensor4D(input)){let batchSize=input.shape[0];if(batchSize!==1)throw new Error(`toNetInput -${getIdxHint(i)} tf.Tensor4D with batchSize ${batchSize} passed, but not supported in input array`)}}),await Promise.all(inputArray.map(input=>isMediaElement(input)&&awaitMediaLoaded(input))),new NetInput(inputArray,Array.isArray(inputs))}async function extractFaces(input,detections){let{Canvas}=env.getEnv(),canvas=input;if(!(input instanceof Canvas)){let netInput=await toNetInput(input);if(netInput.batchSize>1)throw new Error("extractFaces - batchSize > 1 not supported");let tensorOrCanvas=netInput.getInput(0);canvas=tensorOrCanvas instanceof Canvas?tensorOrCanvas:await imageTensorToCanvas(tensorOrCanvas)}let ctx=getContext2dOrThrow(canvas),boxes=detections.map(det=>det instanceof FaceDetection?det.forSize(canvas.width,canvas.height).box.floor():det).map(box=>box.clipAtImageBorders(canvas.width,canvas.height));return boxes.map(({x,y,width,height})=>{let faceImg=createCanvas({width,height});return getContext2dOrThrow(faceImg).putImageData(ctx.getImageData(x,y,width,height),0,0),faceImg})}var tf6=__toModule(require_tfjs_esm());async function extractFaceTensors(imageTensor,detections){if(!isTensor3D(imageTensor)&&!isTensor4D(imageTensor))throw new Error("extractFaceTensors - expected image tensor to be 3D or 4D");if(isTensor4D(imageTensor)&&imageTensor.shape[0]>1)throw new Error("extractFaceTensors - batchSize > 1 not supported");return tf6.tidy(()=>{let[imgHeight,imgWidth,numChannels]=imageTensor.shape.slice(isTensor4D(imageTensor)?1:0),boxes=detections.map(det=>det instanceof FaceDetection?det.forSize(imgWidth,imgHeight).box:det).map(box=>box.clipAtImageBorders(imgWidth,imgHeight)),faceTensors=boxes.map(({x,y,width,height})=>tf6.slice3d(imageTensor.as3D(imgHeight,imgWidth,numChannels),[y,x,0],[height,width,numChannels]));return faceTensors})}async function fetchOrThrow(url,init){let fetch=env.getEnv().fetch,res=await fetch(url,init);if(!(res.status<400))throw new Error(`failed to fetch: (${res.status}) ${res.statusText}, from url: ${res.url}`);return res}async function fetchImage(uri){let res=await fetchOrThrow(uri),blob=await res.blob();if(!blob.type.startsWith("image/"))throw new Error(`fetchImage - expected blob type to be of type image/*, instead have: ${blob.type}, for url: ${res.url}`);return bufferToImage(blob)}async function fetchJson(uri){return(await fetchOrThrow(uri)).json()}async function fetchNetWeights(uri){return new Float32Array(await(await fetchOrThrow(uri)).arrayBuffer())}var tf7=__toModule(require_tfjs_esm());function getModelUris(uri,defaultModelName){let defaultManifestFilename=`${defaultModelName}-weights_manifest.json`;if(!uri)return{modelBaseUri:"",manifestUri:defaultManifestFilename};if(uri==="/")return{modelBaseUri:"/",manifestUri:`/${defaultManifestFilename}`};let protocol=uri.startsWith("http://")?"http://":uri.startsWith("https://")?"https://":"";uri=uri.replace(protocol,"");let parts=uri.split("/").filter(s=>s),manifestFile=uri.endsWith(".json")?parts[parts.length-1]:defaultManifestFilename,modelBaseUri=protocol+(uri.endsWith(".json")?parts.slice(0,parts.length-1):parts).join("/");return modelBaseUri=uri.startsWith("/")?`/${modelBaseUri}`:modelBaseUri,{modelBaseUri,manifestUri:modelBaseUri==="/"?`/${manifestFile}`:`${modelBaseUri}/${manifestFile}`}}async function loadWeightMap(uri,defaultModelName){let{manifestUri,modelBaseUri}=getModelUris(uri,defaultModelName),manifest=await fetchJson(manifestUri);return tf7.io.loadWeights(manifest,modelBaseUri)}function matchDimensions(input,reference,useMediaDimensions=!1){let{width,height}=useMediaDimensions?getMediaDimensions(reference):reference;return input.width=width,input.height=height,{width,height}}var tf15=__toModule(require_tfjs_esm()),tf8=__toModule(require_tfjs_esm()),NeuralNetwork=class{constructor(_name){this._name=_name;this._params=void 0;this._paramMappings=[]}get params(){return this._params}get paramMappings(){return this._paramMappings}get isLoaded(){return!!this.params}getParamFromPath(paramPath){let{obj,objProp}=this.traversePropertyPath(paramPath);return obj[objProp]}reassignParamFromPath(paramPath,tensor2){let{obj,objProp}=this.traversePropertyPath(paramPath);obj[objProp].dispose(),obj[objProp]=tensor2}getParamList(){return this._paramMappings.map(({paramPath})=>({path:paramPath,tensor:this.getParamFromPath(paramPath)}))}getTrainableParams(){return this.getParamList().filter(param=>param.tensor instanceof tf8.Variable)}getFrozenParams(){return this.getParamList().filter(param=>!(param.tensor instanceof tf8.Variable))}variable(){this.getFrozenParams().forEach(({path,tensor:tensor2})=>{this.reassignParamFromPath(path,tensor2.variable())})}freeze(){this.getTrainableParams().forEach(({path,tensor:variable})=>{let tensor2=tf8.tensor(variable.dataSync());variable.dispose(),this.reassignParamFromPath(path,tensor2)})}dispose(throwOnRedispose=!0){this.getParamList().forEach(param=>{if(throwOnRedispose&¶m.tensor.isDisposed)throw new Error(`param tensor has already been disposed for path ${param.path}`);param.tensor.dispose()}),this._params=void 0}serializeParams(){return new Float32Array(this.getParamList().map(({tensor:tensor2})=>Array.from(tensor2.dataSync())).reduce((flat,arr)=>flat.concat(arr)))}async load(weightsOrUrl){if(weightsOrUrl instanceof Float32Array){this.extractWeights(weightsOrUrl);return}await this.loadFromUri(weightsOrUrl)}async loadFromUri(uri){if(uri&&typeof uri!="string")throw new Error(`${this._name}.loadFromUri - expected model uri`);let weightMap=await loadWeightMap(uri,this.getDefaultModelName());this.loadFromWeightMap(weightMap)}async loadFromDisk(filePath){if(filePath&&typeof filePath!="string")throw new Error(`${this._name}.loadFromDisk - expected model file path`);let{readFile}=env.getEnv(),{manifestUri,modelBaseUri}=getModelUris(filePath,this.getDefaultModelName()),fetchWeightsFromDisk=filePaths=>Promise.all(filePaths.map(filePath2=>readFile(filePath2).then(buf=>buf.buffer))),loadWeights=tf8.io.weightsLoaderFactory(fetchWeightsFromDisk),manifest=JSON.parse((await readFile(manifestUri)).toString()),weightMap=await loadWeights(manifest,modelBaseUri);this.loadFromWeightMap(weightMap)}loadFromWeightMap(weightMap){let{paramMappings,params}=this.extractParamsFromWeigthMap(weightMap);this._paramMappings=paramMappings,this._params=params}extractWeights(weights){let{paramMappings,params}=this.extractParams(weights);this._paramMappings=paramMappings,this._params=params}traversePropertyPath(paramPath){if(!this.params)throw new Error("traversePropertyPath - model has no loaded params");let result=paramPath.split("/").reduce((res,objProp2)=>{if(!res.nextObj.hasOwnProperty(objProp2))throw new Error(`traversePropertyPath - object does not have property ${objProp2}, for path ${paramPath}`);return{obj:res.nextObj,objProp:objProp2,nextObj:res.nextObj[objProp2]}},{nextObj:this.params}),{obj,objProp}=result;if(!obj||!objProp||!(obj[objProp]instanceof tf8.Tensor))throw new Error(`traversePropertyPath - parameter is not a tensor, for path ${paramPath}`);return{obj,objProp}}},tf10=__toModule(require_tfjs_esm()),tf9=__toModule(require_tfjs_esm());function depthwiseSeparableConv(x,params,stride){return tf9.tidy(()=>{let out=tf9.separableConv2d(x,params.depthwise_filter,params.pointwise_filter,stride,"same");return out=tf9.add(out,params.bias),out})}function denseBlock3(x,denseBlockParams,isFirstLayer=!1){return tf10.tidy(()=>{let out1=tf10.relu(isFirstLayer?tf10.add(tf10.conv2d(x,denseBlockParams.conv0.filters,[2,2],"same"),denseBlockParams.conv0.bias):depthwiseSeparableConv(x,denseBlockParams.conv0,[2,2])),out2=depthwiseSeparableConv(out1,denseBlockParams.conv1,[1,1]),in3=tf10.relu(tf10.add(out1,out2)),out3=depthwiseSeparableConv(in3,denseBlockParams.conv2,[1,1]);return tf10.relu(tf10.add(out1,tf10.add(out2,out3)))})}function denseBlock4(x,denseBlockParams,isFirstLayer=!1,isScaleDown=!0){return tf10.tidy(()=>{let out1=tf10.relu(isFirstLayer?tf10.add(tf10.conv2d(x,denseBlockParams.conv0.filters,isScaleDown?[2,2]:[1,1],"same"),denseBlockParams.conv0.bias):depthwiseSeparableConv(x,denseBlockParams.conv0,isScaleDown?[2,2]:[1,1])),out2=depthwiseSeparableConv(out1,denseBlockParams.conv1,[1,1]),in3=tf10.relu(tf10.add(out1,out2)),out3=depthwiseSeparableConv(in3,denseBlockParams.conv2,[1,1]),in4=tf10.relu(tf10.add(out1,tf10.add(out2,out3))),out4=depthwiseSeparableConv(in4,denseBlockParams.conv3,[1,1]);return tf10.relu(tf10.add(out1,tf10.add(out2,tf10.add(out3,out4))))})}var tf11=__toModule(require_tfjs_esm());function convLayer(x,params,padding="same",withRelu=!1){return tf11.tidy(()=>{let out=tf11.add(tf11.conv2d(x,params.filters,[1,1],padding),params.bias);return withRelu?tf11.relu(out):out})}function disposeUnusedWeightTensors(weightMap,paramMappings){Object.keys(weightMap).forEach(path=>{paramMappings.some(pm=>pm.originalPath===path)||weightMap[path].dispose()})}var tf12=__toModule(require_tfjs_esm());function extractConvParamsFactory(extractWeights,paramMappings){return function(channelsIn,channelsOut,filterSize,mappedPrefix){let filters=tf12.tensor4d(extractWeights(channelsIn*channelsOut*filterSize*filterSize),[filterSize,filterSize,channelsIn,channelsOut]),bias=tf12.tensor1d(extractWeights(channelsOut));return paramMappings.push({paramPath:`${mappedPrefix}/filters`},{paramPath:`${mappedPrefix}/bias`}),{filters,bias}}}var tf13=__toModule(require_tfjs_esm());function extractFCParamsFactory(extractWeights,paramMappings){return function(channelsIn,channelsOut,mappedPrefix){let fc_weights=tf13.tensor2d(extractWeights(channelsIn*channelsOut),[channelsIn,channelsOut]),fc_bias=tf13.tensor1d(extractWeights(channelsOut));return paramMappings.push({paramPath:`${mappedPrefix}/weights`},{paramPath:`${mappedPrefix}/bias`}),{weights:fc_weights,bias:fc_bias}}}var tf14=__toModule(require_tfjs_esm()),SeparableConvParams=class{constructor(depthwise_filter,pointwise_filter,bias){this.depthwise_filter=depthwise_filter;this.pointwise_filter=pointwise_filter;this.bias=bias}};function extractSeparableConvParamsFactory(extractWeights,paramMappings){return function(channelsIn,channelsOut,mappedPrefix){let depthwise_filter=tf14.tensor4d(extractWeights(3*3*channelsIn),[3,3,channelsIn,1]),pointwise_filter=tf14.tensor4d(extractWeights(channelsIn*channelsOut),[1,1,channelsIn,channelsOut]),bias=tf14.tensor1d(extractWeights(channelsOut));return paramMappings.push({paramPath:`${mappedPrefix}/depthwise_filter`},{paramPath:`${mappedPrefix}/pointwise_filter`},{paramPath:`${mappedPrefix}/bias`}),new SeparableConvParams(depthwise_filter,pointwise_filter,bias)}}function loadSeparableConvParamsFactory(extractWeightEntry){return function(prefix){let depthwise_filter=extractWeightEntry(`${prefix}/depthwise_filter`,4),pointwise_filter=extractWeightEntry(`${prefix}/pointwise_filter`,4),bias=extractWeightEntry(`${prefix}/bias`,1);return new SeparableConvParams(depthwise_filter,pointwise_filter,bias)}}function extractWeightEntryFactory(weightMap,paramMappings){return function(originalPath,paramRank,mappedPath){let tensor2=weightMap[originalPath];if(!isTensor(tensor2,paramRank))throw new Error(`expected weightMap[${originalPath}] to be a Tensor${paramRank}D, instead have ${tensor2}`);return paramMappings.push({originalPath,paramPath:mappedPath||originalPath}),tensor2}}function extractWeightsFactory(weights){let remainingWeights=weights;function extractWeights(numWeights){let ret=remainingWeights.slice(0,numWeights);return remainingWeights=remainingWeights.slice(numWeights),ret}function getRemainingWeights(){return remainingWeights}return{extractWeights,getRemainingWeights}}function extractorsFactory(extractWeights,paramMappings){let extractConvParams=extractConvParamsFactory(extractWeights,paramMappings),extractSeparableConvParams=extractSeparableConvParamsFactory(extractWeights,paramMappings);function extractDenseBlock3Params(channelsIn,channelsOut,mappedPrefix,isFirstLayer=!1){let conv0=isFirstLayer?extractConvParams(channelsIn,channelsOut,3,`${mappedPrefix}/conv0`):extractSeparableConvParams(channelsIn,channelsOut,`${mappedPrefix}/conv0`),conv1=extractSeparableConvParams(channelsOut,channelsOut,`${mappedPrefix}/conv1`),conv22=extractSeparableConvParams(channelsOut,channelsOut,`${mappedPrefix}/conv2`);return{conv0,conv1,conv2:conv22}}function extractDenseBlock4Params(channelsIn,channelsOut,mappedPrefix,isFirstLayer=!1){let{conv0,conv1,conv2:conv22}=extractDenseBlock3Params(channelsIn,channelsOut,mappedPrefix,isFirstLayer),conv3=extractSeparableConvParams(channelsOut,channelsOut,`${mappedPrefix}/conv3`);return{conv0,conv1,conv2:conv22,conv3}}return{extractDenseBlock3Params,extractDenseBlock4Params}}function extractParams(weights){let paramMappings=[],{extractWeights,getRemainingWeights}=extractWeightsFactory(weights),{extractDenseBlock4Params}=extractorsFactory(extractWeights,paramMappings),dense0=extractDenseBlock4Params(3,32,"dense0",!0),dense1=extractDenseBlock4Params(32,64,"dense1"),dense2=extractDenseBlock4Params(64,128,"dense2"),dense3=extractDenseBlock4Params(128,256,"dense3");if(getRemainingWeights().length!==0)throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`);return{paramMappings,params:{dense0,dense1,dense2,dense3}}}function loadConvParamsFactory(extractWeightEntry){return function(prefix){let filters=extractWeightEntry(`${prefix}/filters`,4),bias=extractWeightEntry(`${prefix}/bias`,1);return{filters,bias}}}function loadParamsFactory(weightMap,paramMappings){let extractWeightEntry=extractWeightEntryFactory(weightMap,paramMappings),extractConvParams=loadConvParamsFactory(extractWeightEntry),extractSeparableConvParams=loadSeparableConvParamsFactory(extractWeightEntry);function extractDenseBlock3Params(prefix,isFirstLayer=!1){let conv0=isFirstLayer?extractConvParams(`${prefix}/conv0`):extractSeparableConvParams(`${prefix}/conv0`),conv1=extractSeparableConvParams(`${prefix}/conv1`),conv22=extractSeparableConvParams(`${prefix}/conv2`);return{conv0,conv1,conv2:conv22}}function extractDenseBlock4Params(prefix,isFirstLayer=!1){let conv0=isFirstLayer?extractConvParams(`${prefix}/conv0`):extractSeparableConvParams(`${prefix}/conv0`),conv1=extractSeparableConvParams(`${prefix}/conv1`),conv22=extractSeparableConvParams(`${prefix}/conv2`),conv3=extractSeparableConvParams(`${prefix}/conv3`);return{conv0,conv1,conv2:conv22,conv3}}return{extractDenseBlock3Params,extractDenseBlock4Params}}function extractParamsFromWeigthMap(weightMap){let paramMappings=[],{extractDenseBlock4Params}=loadParamsFactory(weightMap,paramMappings),params={dense0:extractDenseBlock4Params("dense0",!0),dense1:extractDenseBlock4Params("dense1"),dense2:extractDenseBlock4Params("dense2"),dense3:extractDenseBlock4Params("dense3")};return disposeUnusedWeightTensors(weightMap,paramMappings),{params,paramMappings}}var FaceFeatureExtractor=class extends NeuralNetwork{constructor(){super("FaceFeatureExtractor")}forwardInput(input){let{params}=this;if(!params)throw new Error("FaceFeatureExtractor - load model before inference");return tf15.tidy(()=>{let batchTensor=tf15.cast(input.toBatchTensor(112,!0),"float32"),meanRgb=[122.782,117.001,104.298],normalized=normalize(batchTensor,meanRgb).div(tf15.scalar(255)),out=denseBlock4(normalized,params.dense0,!0);return out=denseBlock4(out,params.dense1),out=denseBlock4(out,params.dense2),out=denseBlock4(out,params.dense3),out=tf15.avgPool(out,[7,7],[2,2],"valid"),out})}async forward(input){return this.forwardInput(await toNetInput(input))}getDefaultModelName(){return"face_feature_extractor_model"}extractParamsFromWeigthMap(weightMap){return extractParamsFromWeigthMap(weightMap)}extractParams(weights){return extractParams(weights)}},tf17=__toModule(require_tfjs_esm()),tf16=__toModule(require_tfjs_esm());function fullyConnectedLayer(x,params){return tf16.tidy(()=>tf16.add(tf16.matMul(x,params.weights),params.bias))}function extractParams3(weights,channelsIn,channelsOut){let paramMappings=[],{extractWeights,getRemainingWeights}=extractWeightsFactory(weights),extractFCParams=extractFCParamsFactory(extractWeights,paramMappings),fc=extractFCParams(channelsIn,channelsOut,"fc");if(getRemainingWeights().length!==0)throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`);return{paramMappings,params:{fc}}}function extractParamsFromWeigthMap3(weightMap){let paramMappings=[],extractWeightEntry=extractWeightEntryFactory(weightMap,paramMappings);function extractFcParams(prefix){let weights=extractWeightEntry(`${prefix}/weights`,2),bias=extractWeightEntry(`${prefix}/bias`,1);return{weights,bias}}let params={fc:extractFcParams("fc")};return disposeUnusedWeightTensors(weightMap,paramMappings),{params,paramMappings}}function seperateWeightMaps(weightMap){let featureExtractorMap={},classifierMap={};return Object.keys(weightMap).forEach(key=>{let map=key.startsWith("fc")?classifierMap:featureExtractorMap;map[key]=weightMap[key]}),{featureExtractorMap,classifierMap}}var FaceProcessor=class extends NeuralNetwork{constructor(_name,faceFeatureExtractor){super(_name);this._faceFeatureExtractor=faceFeatureExtractor}get faceFeatureExtractor(){return this._faceFeatureExtractor}runNet(input){let{params}=this;if(!params)throw new Error(`${this._name} - load model before inference`);return tf17.tidy(()=>{let bottleneckFeatures=input instanceof NetInput?this.faceFeatureExtractor.forwardInput(input):input;return fullyConnectedLayer(bottleneckFeatures.as2D(bottleneckFeatures.shape[0],-1),params.fc)})}dispose(throwOnRedispose=!0){this.faceFeatureExtractor.dispose(throwOnRedispose),super.dispose(throwOnRedispose)}loadClassifierParams(weights){let{params,paramMappings}=this.extractClassifierParams(weights);this._params=params,this._paramMappings=paramMappings}extractClassifierParams(weights){return extractParams3(weights,this.getClassifierChannelsIn(),this.getClassifierChannelsOut())}extractParamsFromWeigthMap(weightMap){let{featureExtractorMap,classifierMap}=seperateWeightMaps(weightMap);return this.faceFeatureExtractor.loadFromWeightMap(featureExtractorMap),extractParamsFromWeigthMap3(classifierMap)}extractParams(weights){let cIn=this.getClassifierChannelsIn(),cOut=this.getClassifierChannelsOut(),classifierWeightSize=cOut*cIn+cOut,featureExtractorWeights=weights.slice(0,weights.length-classifierWeightSize),classifierWeights=weights.slice(weights.length-classifierWeightSize);return this.faceFeatureExtractor.extractWeights(featureExtractorWeights),this.extractClassifierParams(classifierWeights)}},FACE_EXPRESSION_LABELS=["neutral","happy","sad","angry","fearful","disgusted","surprised"],FaceExpressions=class{constructor(probabilities){if(probabilities.length!==7)throw new Error(`FaceExpressions.constructor - expected probabilities.length to be 7, have: ${probabilities.length}`);FACE_EXPRESSION_LABELS.forEach((expression,idx)=>{this[expression]=probabilities[idx]})}asSortedArray(){return FACE_EXPRESSION_LABELS.map(expression=>({expression,probability:this[expression]})).sort((e0,e1)=>e1.probability-e0.probability)}},FaceExpressionNet=class extends FaceProcessor{constructor(faceFeatureExtractor=new FaceFeatureExtractor){super("FaceExpressionNet",faceFeatureExtractor)}forwardInput(input){return tf18.tidy(()=>tf18.softmax(this.runNet(input)))}async forward(input){return this.forwardInput(await toNetInput(input))}async predictExpressions(input){let netInput=await toNetInput(input),out=await this.forwardInput(netInput),probabilitesByBatch=await Promise.all(tf18.unstack(out).map(async t=>{let data=await t.data();return t.dispose(),data}));out.dispose();let predictionsByBatch=probabilitesByBatch.map(probabilites=>new FaceExpressions(probabilites));return netInput.isBatchInput?predictionsByBatch:predictionsByBatch[0]}getDefaultModelName(){return"face_expression_model"}getClassifierChannelsIn(){return 256}getClassifierChannelsOut(){return 7}};function isWithFaceExpressions(obj){return obj.expressions instanceof FaceExpressions}function extendWithFaceExpressions(sourceObj,expressions){let extension={expressions};return Object.assign({},sourceObj,extension)}function drawFaceExpressions(canvasArg,faceExpressions,minConfidence=.1,textFieldAnchor){let faceExpressionsArray=Array.isArray(faceExpressions)?faceExpressions:[faceExpressions];faceExpressionsArray.forEach(e=>{let expr=e instanceof FaceExpressions?e:isWithFaceExpressions(e)?e.expressions:void 0;if(!expr)throw new Error("drawFaceExpressions - expected faceExpressions to be FaceExpressions | WithFaceExpressions<{}> or array thereof");let sorted=expr.asSortedArray(),resultsToDisplay=sorted.filter(expr2=>expr2.probability>minConfidence),anchor=isWithFaceDetection(e)?e.detection.box.bottomLeft:textFieldAnchor||new Point(0,0),drawTextField=new DrawTextField(resultsToDisplay.map(expr2=>`${expr2.expression} (${round(expr2.probability)})`),anchor);drawTextField.draw(canvasArg)})}function isWithFaceLandmarks(obj){return isWithFaceDetection(obj)&&obj.landmarks instanceof FaceLandmarks&&obj.unshiftedLandmarks instanceof FaceLandmarks&&obj.alignedRect instanceof FaceDetection}function extendWithFaceLandmarks(sourceObj,unshiftedLandmarks){let{box:shift}=sourceObj.detection,landmarks=unshiftedLandmarks.shiftBy(shift.x,shift.y),rect=landmarks.align(),{imageDims}=sourceObj.detection,alignedRect=new FaceDetection(sourceObj.detection.score,rect.rescale(imageDims.reverse()),imageDims),extension={landmarks,unshiftedLandmarks,alignedRect};return Object.assign({},sourceObj,extension)}var DrawFaceLandmarksOptions=class{constructor(options={}){let{drawLines=!0,drawPoints=!0,lineWidth,lineColor,pointSize,pointColor}=options;this.drawLines=drawLines,this.drawPoints=drawPoints,this.lineWidth=lineWidth||1,this.pointSize=pointSize||2,this.lineColor=lineColor||"rgba(0, 255, 255, 1)",this.pointColor=pointColor||"rgba(255, 0, 255, 1)"}},DrawFaceLandmarks=class{constructor(faceLandmarks,options={}){this.faceLandmarks=faceLandmarks,this.options=new DrawFaceLandmarksOptions(options)}draw(canvasArg){let ctx=getContext2dOrThrow(canvasArg),{drawLines,drawPoints,lineWidth,lineColor,pointSize,pointColor}=this.options;if(drawLines&&this.faceLandmarks instanceof FaceLandmarks68&&(ctx.strokeStyle=lineColor,ctx.lineWidth=lineWidth,drawContour(ctx,this.faceLandmarks.getJawOutline()),drawContour(ctx,this.faceLandmarks.getLeftEyeBrow()),drawContour(ctx,this.faceLandmarks.getRightEyeBrow()),drawContour(ctx,this.faceLandmarks.getNose()),drawContour(ctx,this.faceLandmarks.getLeftEye(),!0),drawContour(ctx,this.faceLandmarks.getRightEye(),!0),drawContour(ctx,this.faceLandmarks.getMouth(),!0)),drawPoints){ctx.strokeStyle=pointColor,ctx.fillStyle=pointColor;let drawPoint=pt=>{ctx.beginPath(),ctx.arc(pt.x,pt.y,pointSize,0,2*Math.PI),ctx.fill()};this.faceLandmarks.positions.forEach(drawPoint)}}};function drawFaceLandmarks(canvasArg,faceLandmarks){let faceLandmarksArray=Array.isArray(faceLandmarks)?faceLandmarks:[faceLandmarks];faceLandmarksArray.forEach(f=>{let landmarks=f instanceof FaceLandmarks?f:isWithFaceLandmarks(f)?f.landmarks:void 0;if(!landmarks)throw new Error("drawFaceLandmarks - expected faceExpressions to be FaceLandmarks | WithFaceLandmarks> or array thereof");new DrawFaceLandmarks(landmarks).draw(canvasArg)})}var tf20=__toModule(require_tfjs_esm()),tf19=__toModule(require_tfjs_esm());function extractorsFactory3(extractWeights,paramMappings){let extractConvParams=extractConvParamsFactory(extractWeights,paramMappings),extractSeparableConvParams=extractSeparableConvParamsFactory(extractWeights,paramMappings);function extractReductionBlockParams(channelsIn,channelsOut,mappedPrefix){let separable_conv0=extractSeparableConvParams(channelsIn,channelsOut,`${mappedPrefix}/separable_conv0`),separable_conv1=extractSeparableConvParams(channelsOut,channelsOut,`${mappedPrefix}/separable_conv1`),expansion_conv=extractConvParams(channelsIn,channelsOut,1,`${mappedPrefix}/expansion_conv`);return{separable_conv0,separable_conv1,expansion_conv}}function extractMainBlockParams(channels,mappedPrefix){let separable_conv0=extractSeparableConvParams(channels,channels,`${mappedPrefix}/separable_conv0`),separable_conv1=extractSeparableConvParams(channels,channels,`${mappedPrefix}/separable_conv1`),separable_conv2=extractSeparableConvParams(channels,channels,`${mappedPrefix}/separable_conv2`);return{separable_conv0,separable_conv1,separable_conv2}}return{extractConvParams,extractSeparableConvParams,extractReductionBlockParams,extractMainBlockParams}}function extractParams5(weights,numMainBlocks){let paramMappings=[],{extractWeights,getRemainingWeights}=extractWeightsFactory(weights),{extractConvParams,extractSeparableConvParams,extractReductionBlockParams,extractMainBlockParams}=extractorsFactory3(extractWeights,paramMappings),entry_flow_conv_in=extractConvParams(3,32,3,"entry_flow/conv_in"),entry_flow_reduction_block_0=extractReductionBlockParams(32,64,"entry_flow/reduction_block_0"),entry_flow_reduction_block_1=extractReductionBlockParams(64,128,"entry_flow/reduction_block_1"),entry_flow={conv_in:entry_flow_conv_in,reduction_block_0:entry_flow_reduction_block_0,reduction_block_1:entry_flow_reduction_block_1},middle_flow={};range(numMainBlocks,0,1).forEach(idx=>{middle_flow[`main_block_${idx}`]=extractMainBlockParams(128,`middle_flow/main_block_${idx}`)});let exit_flow_reduction_block=extractReductionBlockParams(128,256,"exit_flow/reduction_block"),exit_flow_separable_conv=extractSeparableConvParams(256,512,"exit_flow/separable_conv"),exit_flow={reduction_block:exit_flow_reduction_block,separable_conv:exit_flow_separable_conv};if(getRemainingWeights().length!==0)throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`);return{paramMappings,params:{entry_flow,middle_flow,exit_flow}}}function loadParamsFactory3(weightMap,paramMappings){let extractWeightEntry=extractWeightEntryFactory(weightMap,paramMappings),extractConvParams=loadConvParamsFactory(extractWeightEntry),extractSeparableConvParams=loadSeparableConvParamsFactory(extractWeightEntry);function extractReductionBlockParams(mappedPrefix){let separable_conv0=extractSeparableConvParams(`${mappedPrefix}/separable_conv0`),separable_conv1=extractSeparableConvParams(`${mappedPrefix}/separable_conv1`),expansion_conv=extractConvParams(`${mappedPrefix}/expansion_conv`);return{separable_conv0,separable_conv1,expansion_conv}}function extractMainBlockParams(mappedPrefix){let separable_conv0=extractSeparableConvParams(`${mappedPrefix}/separable_conv0`),separable_conv1=extractSeparableConvParams(`${mappedPrefix}/separable_conv1`),separable_conv2=extractSeparableConvParams(`${mappedPrefix}/separable_conv2`);return{separable_conv0,separable_conv1,separable_conv2}}return{extractConvParams,extractSeparableConvParams,extractReductionBlockParams,extractMainBlockParams}}function extractParamsFromWeigthMap5(weightMap,numMainBlocks){let paramMappings=[],{extractConvParams,extractSeparableConvParams,extractReductionBlockParams,extractMainBlockParams}=loadParamsFactory3(weightMap,paramMappings),entry_flow_conv_in=extractConvParams("entry_flow/conv_in"),entry_flow_reduction_block_0=extractReductionBlockParams("entry_flow/reduction_block_0"),entry_flow_reduction_block_1=extractReductionBlockParams("entry_flow/reduction_block_1"),entry_flow={conv_in:entry_flow_conv_in,reduction_block_0:entry_flow_reduction_block_0,reduction_block_1:entry_flow_reduction_block_1},middle_flow={};range(numMainBlocks,0,1).forEach(idx=>{middle_flow[`main_block_${idx}`]=extractMainBlockParams(`middle_flow/main_block_${idx}`)});let exit_flow_reduction_block=extractReductionBlockParams("exit_flow/reduction_block"),exit_flow_separable_conv=extractSeparableConvParams("exit_flow/separable_conv"),exit_flow={reduction_block:exit_flow_reduction_block,separable_conv:exit_flow_separable_conv};return disposeUnusedWeightTensors(weightMap,paramMappings),{params:{entry_flow,middle_flow,exit_flow},paramMappings}}function conv(x,params,stride){return tf19.add(tf19.conv2d(x,params.filters,stride,"same"),params.bias)}function reductionBlock(x,params,isActivateInput=!0){let out=isActivateInput?tf19.relu(x):x;return out=depthwiseSeparableConv(out,params.separable_conv0,[1,1]),out=depthwiseSeparableConv(tf19.relu(out),params.separable_conv1,[1,1]),out=tf19.maxPool(out,[3,3],[2,2],"same"),out=tf19.add(out,conv(x,params.expansion_conv,[2,2])),out}function mainBlock(x,params){let out=depthwiseSeparableConv(tf19.relu(x),params.separable_conv0,[1,1]);return out=depthwiseSeparableConv(tf19.relu(out),params.separable_conv1,[1,1]),out=depthwiseSeparableConv(tf19.relu(out),params.separable_conv2,[1,1]),out=tf19.add(out,x),out}var TinyXception=class extends NeuralNetwork{constructor(numMainBlocks){super("TinyXception");this._numMainBlocks=numMainBlocks}forwardInput(input){let{params}=this;if(!params)throw new Error("TinyXception - load model before inference");return tf19.tidy(()=>{let batchTensor=tf19.cast(input.toBatchTensor(112,!0),"float32"),meanRgb=[122.782,117.001,104.298],normalized=normalize(batchTensor,meanRgb).div(tf19.scalar(256)),out=tf19.relu(conv(normalized,params.entry_flow.conv_in,[2,2]));return out=reductionBlock(out,params.entry_flow.reduction_block_0,!1),out=reductionBlock(out,params.entry_flow.reduction_block_1),range(this._numMainBlocks,0,1).forEach(idx=>{out=mainBlock(out,params.middle_flow[`main_block_${idx}`])}),out=reductionBlock(out,params.exit_flow.reduction_block),out=tf19.relu(depthwiseSeparableConv(out,params.exit_flow.separable_conv,[1,1])),out})}async forward(input){return this.forwardInput(await toNetInput(input))}getDefaultModelName(){return"tiny_xception_model"}extractParamsFromWeigthMap(weightMap){return extractParamsFromWeigthMap5(weightMap,this._numMainBlocks)}extractParams(weights){return extractParams5(weights,this._numMainBlocks)}};function extractParams7(weights){let paramMappings=[],{extractWeights,getRemainingWeights}=extractWeightsFactory(weights),extractFCParams=extractFCParamsFactory(extractWeights,paramMappings),age=extractFCParams(512,1,"fc/age"),gender=extractFCParams(512,2,"fc/gender");if(getRemainingWeights().length!==0)throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`);return{paramMappings,params:{fc:{age,gender}}}}function extractParamsFromWeigthMap7(weightMap){let paramMappings=[],extractWeightEntry=extractWeightEntryFactory(weightMap,paramMappings);function extractFcParams(prefix){let weights=extractWeightEntry(`${prefix}/weights`,2),bias=extractWeightEntry(`${prefix}/bias`,1);return{weights,bias}}let params={fc:{age:extractFcParams("fc/age"),gender:extractFcParams("fc/gender")}};return disposeUnusedWeightTensors(weightMap,paramMappings),{params,paramMappings}}var Gender;(function(Gender2){Gender2.FEMALE="female",Gender2.MALE="male"})(Gender||(Gender={}));var AgeGenderNet=class extends NeuralNetwork{constructor(faceFeatureExtractor=new TinyXception(2)){super("AgeGenderNet");this._faceFeatureExtractor=faceFeatureExtractor}get faceFeatureExtractor(){return this._faceFeatureExtractor}runNet(input){let{params}=this;if(!params)throw new Error(`${this._name} - load model before inference`);return tf20.tidy(()=>{let bottleneckFeatures=input instanceof NetInput?this.faceFeatureExtractor.forwardInput(input):input,pooled=tf20.avgPool(bottleneckFeatures,[7,7],[2,2],"valid").as2D(bottleneckFeatures.shape[0],-1),age=fullyConnectedLayer(pooled,params.fc.age).as1D(),gender=fullyConnectedLayer(pooled,params.fc.gender);return{age,gender}})}forwardInput(input){return tf20.tidy(()=>{let{age,gender}=this.runNet(input);return{age,gender:tf20.softmax(gender)}})}async forward(input){return this.forwardInput(await toNetInput(input))}async predictAgeAndGender(input){let netInput=await toNetInput(input),out=await this.forwardInput(netInput),ages=tf20.unstack(out.age),genders=tf20.unstack(out.gender),ageAndGenderTensors=ages.map((ageTensor,i)=>({ageTensor,genderTensor:genders[i]})),predictionsByBatch=await Promise.all(ageAndGenderTensors.map(async({ageTensor,genderTensor})=>{let age=(await ageTensor.data())[0],probMale=(await genderTensor.data())[0],isMale=probMale>.5,gender=isMale?Gender.MALE:Gender.FEMALE,genderProbability=isMale?probMale:1-probMale;return ageTensor.dispose(),genderTensor.dispose(),{age,gender,genderProbability}}));return out.age.dispose(),out.gender.dispose(),netInput.isBatchInput?predictionsByBatch:predictionsByBatch[0]}getDefaultModelName(){return"age_gender_model"}dispose(throwOnRedispose=!0){this.faceFeatureExtractor.dispose(throwOnRedispose),super.dispose(throwOnRedispose)}loadClassifierParams(weights){let{params,paramMappings}=this.extractClassifierParams(weights);this._params=params,this._paramMappings=paramMappings}extractClassifierParams(weights){return extractParams7(weights)}extractParamsFromWeigthMap(weightMap){let{featureExtractorMap,classifierMap}=seperateWeightMaps(weightMap);return this.faceFeatureExtractor.loadFromWeightMap(featureExtractorMap),extractParamsFromWeigthMap7(classifierMap)}extractParams(weights){let classifierWeightSize=512*1+1+(512*2+2),featureExtractorWeights=weights.slice(0,weights.length-classifierWeightSize),classifierWeights=weights.slice(weights.length-classifierWeightSize);return this.faceFeatureExtractor.extractWeights(featureExtractorWeights),this.extractClassifierParams(classifierWeights)}};var tf21=__toModule(require_tfjs_esm()),FaceLandmark68NetBase=class extends FaceProcessor{postProcess(output,inputSize,originalDimensions){let inputDimensions=originalDimensions.map(({width,height})=>{let scale2=inputSize/Math.max(height,width);return{width:width*scale2,height:height*scale2}}),batchSize=inputDimensions.length;return tf21.tidy(()=>{let createInterleavedTensor=(fillX,fillY)=>tf21.stack([tf21.fill([68],fillX,"float32"),tf21.fill([68],fillY,"float32")],1).as2D(1,136).as1D(),getPadding=(batchIdx,cond)=>{let{width,height}=inputDimensions[batchIdx];return cond(width,height)?Math.abs(width-height)/2:0},getPaddingX=batchIdx=>getPadding(batchIdx,(w,h)=>w