///
export declare type AgeAndGenderPrediction = {
age: number;
gender: Gender;
genderProbability: number;
};
export declare class AgeGenderNet extends NeuralNetwork {
private _faceFeatureExtractor;
constructor(faceFeatureExtractor?: TinyXception);
get faceFeatureExtractor(): TinyXception;
runNet(input: NetInput | tf.Tensor4D): NetOutput;
forwardInput(input: NetInput | tf.Tensor4D): NetOutput;
forward(input: TNetInput): Promise;
predictAgeAndGender(input: TNetInput): Promise;
protected getDefaultModelName(): string;
dispose(throwOnRedispose?: boolean): void;
loadClassifierParams(weights: Float32Array): void;
extractClassifierParams(weights: Float32Array): {
params: NetParams;
paramMappings: ParamMapping[];
};
protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap): {
params: NetParams;
paramMappings: ParamMapping[];
};
protected extractParams(weights: Float32Array): {
params: NetParams;
paramMappings: ParamMapping[];
};
}
export declare const allFaces: typeof allFacesSsdMobilenetv1;
export declare function allFacesSsdMobilenetv1(input: TNetInput, minConfidence?: number): Promise>>[]>;
export declare function allFacesTinyYolov2(input: TNetInput, forwardParams?: ITinyYolov2Options): Promise>>[]>;
declare enum AnchorPosition {
TOP_LEFT = "TOP_LEFT",
TOP_RIGHT = "TOP_RIGHT",
BOTTOM_LEFT = "BOTTOM_LEFT",
BOTTOM_RIGHT = "BOTTOM_RIGHT"
}
/** @docalias number[] */
declare interface ArrayMap {
R0: number;
R1: number[];
R2: number[][];
R3: number[][][];
R4: number[][][][];
R5: number[][][][][];
R6: number[][][][][][];
}
export declare function awaitMediaLoaded(media: HTMLImageElement | HTMLVideoElement | HTMLCanvasElement): Promise;
export declare type BatchNorm = {
sub: tf.Tensor1D;
truediv: tf.Tensor1D;
};
export declare class BoundingBox extends Box implements IBoundingBox {
constructor(left: number, top: number, right: number, bottom: number, allowNegativeDimensions?: boolean);
}
export declare class Box implements IBoundingBox, IRect {
static isRect(rect: any): boolean;
static assertIsValidBox(box: any, callee: string, allowNegativeDimensions?: boolean): void;
private _x;
private _y;
private _width;
private _height;
constructor(_box: IBoundingBox | IRect, allowNegativeDimensions?: boolean);
get x(): number;
get y(): number;
get width(): number;
get height(): number;
get left(): number;
get top(): number;
get right(): number;
get bottom(): number;
get area(): number;
get topLeft(): Point;
get topRight(): Point;
get bottomLeft(): Point;
get bottomRight(): Point;
round(): Box;
floor(): Box;
toSquare(): Box;
rescale(s: IDimensions | number): Box;
pad(padX: number, padY: number): Box;
clipAtImageBorders(imgWidth: number, imgHeight: number): Box;
shift(sx: number, sy: number): Box;
padAtBorders(imageHeight: number, imageWidth: number): {
dy: number;
edy: number;
dx: number;
edx: number;
y: number;
ey: number;
x: number;
ex: number;
w: number;
h: number;
};
calibrate(region: Box): Box;
}
declare type BoxPredictionParams = {
box_encoding_predictor: ConvParams;
class_predictor: ConvParams;
};
export declare function bufferToImage(buf: Blob): Promise;
export declare class ComposableTask {
then(onfulfilled: (value: T) => T | PromiseLike): Promise;
run(): Promise;
}
export declare class ComputeAllFaceDescriptorsTask>> extends ComputeFaceDescriptorsTaskBase[], TSource[]> {
run(): Promise[]>;
withFaceExpressions(): PredictAllFaceExpressionsWithFaceAlignmentTask>;
withAgeAndGender(): PredictAllAgeAndGenderWithFaceAlignmentTask>;
}
/**
* Computes a 128 entry vector (face descriptor / face embeddings) from the face shown in an image,
* which uniquely represents the features of that persons face. The computed face descriptor can
* be used to measure the similarity between faces, by computing the euclidean distance of two
* face descriptors.
*
* @param inputs The face image extracted from the aligned bounding box of a face. Can
* also be an array of input images, which will be batch processed.
* @returns Face descriptor with 128 entries or array thereof in case of batch input.
*/
export declare const computeFaceDescriptor: (input: TNetInput) => Promise;
export declare class ComputeFaceDescriptorsTaskBase extends ComposableTask {
protected parentTask: ComposableTask | Promise;
protected input: TNetInput;
constructor(parentTask: ComposableTask | Promise, input: TNetInput);
}
declare function computeReshapedDimensions({ width, height }: IDimensions, inputSize: number): Dimensions;
export declare class ComputeSingleFaceDescriptorTask>> extends ComputeFaceDescriptorsTaskBase | undefined, TSource | undefined> {
run(): Promise | undefined>;
withFaceExpressions(): PredictSingleFaceExpressionsWithFaceAlignmentTask>;
withAgeAndGender(): PredictSingleAgeAndGenderWithFaceAlignmentTask>;
}
declare type ConvLayerParams = {
conv: ConvParams;
scale: ScaleLayerParams;
};
declare type ConvParams = {
filters: tf.Tensor4D;
bias: tf.Tensor1D;
};
export declare type ConvWithBatchNorm = {
conv: ConvParams;
bn: BatchNorm;
};
declare function createBrowserEnv(): Environment;
export declare function createCanvas({ width, height }: IDimensions): HTMLCanvasElement;
export declare function createCanvasFromMedia(media: HTMLImageElement | HTMLVideoElement | ImageData, dims?: IDimensions): HTMLCanvasElement;
export declare function createFaceDetectionNet(weights: Float32Array): SsdMobilenetv1;
export declare function createFaceRecognitionNet(weights: Float32Array): FaceRecognitionNet;
declare function createFileSystem(fs?: any): FileSystem_2;
declare function createNodejsEnv(): Environment;
export declare function createSsdMobilenetv1(weights: Float32Array): SsdMobilenetv1;
export declare function createTinyFaceDetector(weights: Float32Array): TinyFaceDetector;
export declare function createTinyYolov2(weights: Float32Array, withSeparableConvs?: boolean): TinyYolov2;
/**
* We wrap data id since we use weak map to avoid memory leaks.
* Since we have our own memory management, we have a reference counter
* mapping a tensor to its data, so there is always a pointer (even if that
* data is otherwise garbage collectable).
* See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/
* Global_Objects/WeakMap
*/
declare type DataId = object;
/** @docalias 'float32'|'int32'|'bool'|'complex64'|'string' */
declare type DataType = keyof DataTypeMap;
declare interface DataTypeMap {
float32: Float32Array;
int32: Int32Array;
bool: Uint8Array;
complex64: Float32Array;
string: string[];
}
export declare type DefaultTinyYolov2NetParams = {
conv0: ConvWithBatchNorm;
conv1: ConvWithBatchNorm;
conv2: ConvWithBatchNorm;
conv3: ConvWithBatchNorm;
conv4: ConvWithBatchNorm;
conv5: ConvWithBatchNorm;
conv6: ConvWithBatchNorm;
conv7: ConvWithBatchNorm;
conv8: ConvParams;
};
declare type DenseBlock3Params = {
conv0: SeparableConvParams | ConvParams;
conv1: SeparableConvParams;
conv2: SeparableConvParams;
};
declare type DenseBlock4Params = DenseBlock3Params & {
conv3: SeparableConvParams;
};
export declare class DetectAllFaceLandmarksTask> extends DetectFaceLandmarksTaskBase[], TSource[]> {
run(): Promise[]>;
withFaceExpressions(): PredictAllFaceExpressionsWithFaceAlignmentTask>;
withAgeAndGender(): PredictAllAgeAndGenderWithFaceAlignmentTask>;
withFaceDescriptors(): ComputeAllFaceDescriptorsTask>;
}
export declare function detectAllFaces(input: TNetInput, options?: FaceDetectionOptions): DetectAllFacesTask;
export declare class DetectAllFacesTask extends DetectFacesTaskBase {
run(): Promise;
private runAndExtendWithFaceDetections;
withFaceLandmarks(useTinyLandmarkNet?: boolean): DetectAllFaceLandmarksTask<{
detection: FaceDetection;
}>;
withFaceExpressions(): PredictAllFaceExpressionsTask<{
detection: FaceDetection;
}>;
withAgeAndGender(): PredictAllAgeAndGenderTask<{
detection: FaceDetection;
}>;
}
/**
* Detects the 68 point face landmark positions of the face shown in an image.
*
* @param inputs The face image extracted from the bounding box of a face. Can
* also be an array of input images, which will be batch processed.
* @returns 68 point face landmarks or array thereof in case of batch input.
*/
export declare const detectFaceLandmarks: (input: TNetInput) => Promise;
export declare class DetectFaceLandmarksTaskBase extends ComposableTask {
protected parentTask: ComposableTask | Promise;
protected input: TNetInput;
protected useTinyLandmarkNet: boolean;
constructor(parentTask: ComposableTask | Promise, input: TNetInput, useTinyLandmarkNet: boolean);
protected get landmarkNet(): FaceLandmark68Net | FaceLandmark68TinyNet;
}
/**
* Detects the 68 point face landmark positions of the face shown in an image
* using a tinier version of the 68 point face landmark model, which is slightly
* faster at inference, but also slightly less accurate.
*
* @param inputs The face image extracted from the bounding box of a face. Can
* also be an array of input images, which will be batch processed.
* @returns 68 point face landmarks or array thereof in case of batch input.
*/
export declare const detectFaceLandmarksTiny: (input: TNetInput) => Promise;
export declare class DetectFacesTaskBase extends ComposableTask {
protected input: TNetInput;
protected options: FaceDetectionOptions;
constructor(input: TNetInput, options?: FaceDetectionOptions);
}
export declare const detectLandmarks: (input: TNetInput) => Promise;
export declare function detectSingleFace(input: TNetInput, options?: FaceDetectionOptions): DetectSingleFaceTask;
export declare class DetectSingleFaceLandmarksTask> extends DetectFaceLandmarksTaskBase | undefined, TSource | undefined> {
run(): Promise | undefined>;
withFaceExpressions(): PredictSingleFaceExpressionsWithFaceAlignmentTask>;
withAgeAndGender(): PredictSingleAgeAndGenderWithFaceAlignmentTask>;
withFaceDescriptor(): ComputeSingleFaceDescriptorTask>;
}
export declare class DetectSingleFaceTask extends DetectFacesTaskBase {
run(): Promise;
private runAndExtendWithFaceDetection;
withFaceLandmarks(useTinyLandmarkNet?: boolean): DetectSingleFaceLandmarksTask<{
detection: FaceDetection;
}>;
withFaceExpressions(): PredictSingleFaceExpressionsTask<{
detection: FaceDetection;
}>;
withAgeAndGender(): PredictSingleAgeAndGenderTask<{
detection: FaceDetection;
}>;
}
export declare class Dimensions implements IDimensions {
private _width;
private _height;
constructor(width: number, height: number);
get width(): number;
get height(): number;
reverse(): Dimensions;
}
declare namespace draw {
export {
drawContour,
drawDetections,
TDrawDetectionsInput,
drawFaceExpressions,
DrawFaceExpressionsInput,
IDrawBoxOptions,
DrawBoxOptions,
DrawBox,
drawFaceLandmarks,
IDrawFaceLandmarksOptions,
DrawFaceLandmarksOptions,
DrawFaceLandmarks,
DrawFaceLandmarksInput,
AnchorPosition,
IDrawTextFieldOptions,
DrawTextFieldOptions,
DrawTextField
}
}
export { draw }
declare class DrawBox {
box: Box;
options: DrawBoxOptions;
constructor(box: IBoundingBox | IRect, options?: IDrawBoxOptions);
draw(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D): void;
}
declare class DrawBoxOptions {
boxColor: string;
lineWidth: number;
drawLabelOptions: DrawTextFieldOptions;
label?: string;
constructor(options?: IDrawBoxOptions);
}
declare function drawContour(ctx: CanvasRenderingContext2D, points: Point[], isClosed?: boolean): void;
declare function drawDetections(canvasArg: string | HTMLCanvasElement, detections: TDrawDetectionsInput | Array): void;
declare function drawFaceExpressions(canvasArg: string | HTMLCanvasElement, faceExpressions: DrawFaceExpressionsInput | Array, minConfidence?: number, textFieldAnchor?: IPoint): void;
declare type DrawFaceExpressionsInput = FaceExpressions | WithFaceExpressions<{}>;
declare class DrawFaceLandmarks {
faceLandmarks: FaceLandmarks;
options: DrawFaceLandmarksOptions;
constructor(faceLandmarks: FaceLandmarks, options?: IDrawFaceLandmarksOptions);
draw(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D): void;
}
declare function drawFaceLandmarks(canvasArg: string | HTMLCanvasElement, faceLandmarks: DrawFaceLandmarksInput | Array): void;
declare type DrawFaceLandmarksInput = FaceLandmarks | WithFaceLandmarks>;
declare class DrawFaceLandmarksOptions {
drawLines: boolean;
drawPoints: boolean;
lineWidth: number;
pointSize: number;
lineColor: string;
pointColor: string;
constructor(options?: IDrawFaceLandmarksOptions);
}
declare class DrawTextField {
text: string[];
anchor: IPoint;
options: DrawTextFieldOptions;
constructor(text: string | string[] | DrawTextField, anchor: IPoint, options?: IDrawTextFieldOptions);
measureWidth(ctx: CanvasRenderingContext2D): number;
measureHeight(): number;
getUpperLeft(ctx: CanvasRenderingContext2D, canvasDims?: IDimensions): IPoint;
draw(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D): void;
}
declare class DrawTextFieldOptions implements IDrawTextFieldOptions {
anchorPosition: AnchorPosition;
backgroundColor: string;
fontColor: string;
fontSize: number;
fontStyle: string;
padding: number;
constructor(options?: IDrawTextFieldOptions);
}
export declare const env: {
getEnv: typeof getEnv;
setEnv: typeof setEnv;
initialize: typeof initialize;
createBrowserEnv: typeof createBrowserEnv;
createFileSystem: typeof createFileSystem;
createNodejsEnv: typeof createNodejsEnv;
monkeyPatch: typeof monkeyPatch;
isBrowser: typeof isBrowser;
isNodejs: typeof isNodejs;
};
export declare type Environment = FileSystem_2 & {
Canvas: typeof HTMLCanvasElement;
CanvasRenderingContext2D: typeof CanvasRenderingContext2D;
Image: typeof HTMLImageElement;
ImageData: typeof ImageData;
Video: typeof HTMLVideoElement;
createCanvasElement: () => HTMLCanvasElement;
createImageElement: () => HTMLImageElement;
createVideoElement: () => HTMLVideoElement;
fetch: (url: string, init?: RequestInit) => Promise;
};
export declare function euclideanDistance(arr1: number[] | Float32Array, arr2: number[] | Float32Array): number;
export declare function extendWithAge(sourceObj: TSource, age: number): WithAge;
export declare function extendWithFaceDescriptor(sourceObj: TSource, descriptor: Float32Array): WithFaceDescriptor;
export declare function extendWithFaceDetection(sourceObj: TSource, detection: FaceDetection): WithFaceDetection;
export declare function extendWithFaceExpressions(sourceObj: TSource, expressions: FaceExpressions): WithFaceExpressions;
export declare function extendWithFaceLandmarks, TFaceLandmarks extends FaceLandmarks = FaceLandmarks68>(sourceObj: TSource, unshiftedLandmarks: TFaceLandmarks): WithFaceLandmarks;
export declare function extendWithGender(sourceObj: TSource, gender: Gender, genderProbability: number): WithGender;
/**
* Extracts the image regions containing the detected faces.
*
* @param input The image that face detection has been performed on.
* @param detections The face detection results or face bounding boxes for that image.
* @returns The Canvases of the corresponding image region for each detected face.
*/
export declare function extractFaces(input: TNetInput, detections: Array): Promise;
/**
* Extracts the tensors of the image regions containing the detected faces.
* Useful if you want to compute the face descriptors for the face images.
* Using this method is faster then extracting a canvas for each face and
* converting them to tensors individually.
*
* @param imageTensor The image tensor that face detection has been performed on.
* @param detections The face detection results or face bounding boxes for that image.
* @returns Tensors of the corresponding image region for each detected face.
*/
export declare function extractFaceTensors(imageTensor: tf.Tensor3D | tf.Tensor4D, detections: Array): Promise;
export declare const FACE_EXPRESSION_LABELS: string[];
export declare class FaceDetection extends ObjectDetection implements IFaceDetecion {
constructor(score: number, relativeBox: Rect, imageDims: IDimensions);
forSize(width: number, height: number): FaceDetection;
}
export declare type FaceDetectionFunction = (input: TNetInput) => Promise;
export declare class FaceDetectionNet extends SsdMobilenetv1 {
}
export declare type FaceDetectionOptions = TinyFaceDetectorOptions | SsdMobilenetv1Options | TinyYolov2Options;
export declare class FaceExpressionNet extends FaceProcessor {
constructor(faceFeatureExtractor?: FaceFeatureExtractor);
forwardInput(input: NetInput | tf.Tensor4D): tf.Tensor2D;
forward(input: TNetInput): Promise;
predictExpressions(input: TNetInput): Promise;
protected getDefaultModelName(): string;
protected getClassifierChannelsIn(): number;
protected getClassifierChannelsOut(): number;
}
export declare class FaceExpressions {
neutral: number;
happy: number;
sad: number;
angry: number;
fearful: number;
disgusted: number;
surprised: number;
constructor(probabilities: number[] | Float32Array);
asSortedArray(): {
expression: string;
probability: number;
}[];
}
declare class FaceFeatureExtractor extends NeuralNetwork implements IFaceFeatureExtractor {
constructor();
forwardInput(input: NetInput): tf.Tensor4D;
forward(input: TNetInput): Promise;
protected getDefaultModelName(): string;
protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap): {
params: FaceFeatureExtractorParams;
paramMappings: ParamMapping[];
};
protected extractParams(weights: Float32Array): {
params: FaceFeatureExtractorParams;
paramMappings: ParamMapping[];
};
}
declare type FaceFeatureExtractorParams = {
dense0: DenseBlock4Params;
dense1: DenseBlock4Params;
dense2: DenseBlock4Params;
dense3: DenseBlock4Params;
};
export declare class FaceLandmark68Net extends FaceLandmark68NetBase {
constructor(faceFeatureExtractor?: FaceFeatureExtractor);
protected getDefaultModelName(): string;
protected getClassifierChannelsIn(): number;
}
declare abstract class FaceLandmark68NetBase extends FaceProcessor {
postProcess(output: tf.Tensor2D, inputSize: number, originalDimensions: IDimensions[]): tf.Tensor2D;
forwardInput(input: NetInput): tf.Tensor2D;
forward(input: TNetInput): Promise;
detectLandmarks(input: TNetInput): Promise;
protected getClassifierChannelsOut(): number;
}
export declare class FaceLandmark68TinyNet extends FaceLandmark68NetBase {
constructor(faceFeatureExtractor?: TinyFaceFeatureExtractor);
protected getDefaultModelName(): string;
protected getClassifierChannelsIn(): number;
}
export declare class FaceLandmarkNet extends FaceLandmark68Net {
}
export declare class FaceLandmarks implements IFaceLandmarks {
protected _shift: Point;
protected _positions: Point[];
protected _imgDims: Dimensions;
constructor(relativeFaceLandmarkPositions: Point[], imgDims: IDimensions, shift?: Point);
get shift(): Point;
get imageWidth(): number;
get imageHeight(): number;
get positions(): Point[];
get relativePositions(): Point[];
forSize(width: number, height: number): T;
shiftBy(x: number, y: number): T;
shiftByPoint(pt: Point): T;
/**
* Aligns the face landmarks after face detection from the relative positions of the faces
* bounding box, or it's current shift. This function should be used to align the face images
* after face detection has been performed, before they are passed to the face recognition net.
* This will make the computed face descriptor more accurate.
*
* @param detection (optional) The bounding box of the face or the face detection result. If
* no argument was passed the position of the face landmarks are assumed to be relative to
* it's current shift.
* @returns The bounding box of the aligned face.
*/
align(detection?: FaceDetection | IRect | IBoundingBox | null, options?: {
useDlibAlignment?: boolean;
minBoxPadding?: number;
}): Box;
private alignDlib;
private alignMinBbox;
protected getRefPointsForAlignment(): Point[];
}
export declare class FaceLandmarks5 extends FaceLandmarks {
protected getRefPointsForAlignment(): Point[];
}
export declare class FaceLandmarks68 extends FaceLandmarks {
getJawOutline(): Point[];
getLeftEyeBrow(): Point[];
getRightEyeBrow(): Point[];
getNose(): Point[];
getLeftEye(): Point[];
getRightEye(): Point[];
getMouth(): Point[];
protected getRefPointsForAlignment(): Point[];
}
export declare class FaceMatch implements IFaceMatch {
private _label;
private _distance;
constructor(label: string, distance: number);
get label(): string;
get distance(): number;
toString(withDistance?: boolean): string;
}
export declare class FaceMatcher {
private _labeledDescriptors;
private _distanceThreshold;
constructor(inputs: LabeledFaceDescriptors | WithFaceDescriptor | Float32Array | Array | Float32Array>, distanceThreshold?: number);
get labeledDescriptors(): LabeledFaceDescriptors[];
get distanceThreshold(): number;
computeMeanDistance(queryDescriptor: Float32Array, descriptors: Float32Array[]): number;
matchDescriptor(queryDescriptor: Float32Array): FaceMatch;
findBestMatch(queryDescriptor: Float32Array): FaceMatch;
toJSON(): any;
static fromJSON(json: any): FaceMatcher;
}
declare abstract class FaceProcessor extends NeuralNetwork {
protected _faceFeatureExtractor: IFaceFeatureExtractor;
constructor(_name: string, faceFeatureExtractor: IFaceFeatureExtractor);
get faceFeatureExtractor(): IFaceFeatureExtractor;
protected abstract getDefaultModelName(): string;
protected abstract getClassifierChannelsIn(): number;
protected abstract getClassifierChannelsOut(): number;
runNet(input: NetInput | tf.Tensor4D): tf.Tensor2D;
dispose(throwOnRedispose?: boolean): void;
loadClassifierParams(weights: Float32Array): void;
extractClassifierParams(weights: Float32Array): {
params: NetParams_2;
paramMappings: ParamMapping[];
};
protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap): {
params: NetParams_2;
paramMappings: ParamMapping[];
};
protected extractParams(weights: Float32Array): {
params: NetParams_2;
paramMappings: ParamMapping[];
};
}
export declare class FaceRecognitionNet extends NeuralNetwork {
constructor();
forwardInput(input: NetInput): tf.Tensor2D;
forward(input: TNetInput): Promise;
computeFaceDescriptor(input: TNetInput): Promise;
protected getDefaultModelName(): string;
protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap): {
params: NetParams_3;
paramMappings: ParamMapping[];
};
protected extractParams(weights: Float32Array): {
params: NetParams_3;
paramMappings: ParamMapping[];
};
}
declare type FCParams = {
weights: tf.Tensor2D;
bias: tf.Tensor1D;
};
export declare function fetchImage(uri: string): Promise;
export declare function fetchJson(uri: string): Promise;
export declare function fetchNetWeights(uri: string): Promise;
export declare function fetchOrThrow(url: string, init?: RequestInit): Promise;
export declare function fetchVideo(uri: string): Promise;
declare type FileSystem_2 = {
readFile: (filePath: string) => Promise;
};
export { FileSystem_2 as FileSystem }
export declare enum Gender {
FEMALE = "female",
MALE = "male"
}
declare function getCenterPoint(pts: Point[]): Point;
export declare function getContext2dOrThrow(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D): CanvasRenderingContext2D;
declare function getEnv(): Environment;
export declare function getMediaDimensions(input: HTMLImageElement | HTMLCanvasElement | HTMLVideoElement | IDimensions): Dimensions;
export declare interface IBoundingBox {
left: number;
top: number;
right: number;
bottom: number;
}
export declare interface IDimensions {
width: number;
height: number;
}
declare interface IDrawBoxOptions {
boxColor?: string;
lineWidth?: number;
drawLabelOptions?: IDrawTextFieldOptions;
label?: string;
}
declare interface IDrawFaceLandmarksOptions {
drawLines?: boolean;
drawPoints?: boolean;
lineWidth?: number;
pointSize?: number;
lineColor?: string;
pointColor?: string;
}
declare interface IDrawTextFieldOptions {
anchorPosition?: AnchorPosition;
backgroundColor?: string;
fontColor?: string;
fontSize?: number;
fontStyle?: string;
padding?: number;
}
export declare interface IFaceDetecion {
score: number;
box: Box;
}
declare interface IFaceFeatureExtractor extends NeuralNetwork {
forwardInput(input: NetInput): tf.Tensor4D;
forward(input: TNetInput): Promise;
}
export declare interface IFaceLandmarks {
positions: Point[];
shift: Point;
}
export declare interface IFaceMatch {
label: string;
distance: number;
}
export declare function imageTensorToCanvas(imgTensor: tf.Tensor, canvas?: HTMLCanvasElement): Promise;
export declare function imageToSquare(input: HTMLImageElement | HTMLCanvasElement, inputSize: number, centerImage?: boolean): HTMLCanvasElement;
declare function initialize(): void | null;
export declare function inverseSigmoid(x: number): number;
export declare function iou(box1: Box, box2: Box, isIOU?: boolean): number;
export declare interface IPoint {
x: number;
y: number;
}
export declare interface IRect {
x: number;
y: number;
width: number;
height: number;
}
declare function isBrowser(): boolean;
declare function isDimensions(obj: any): boolean;
declare function isEven(num: number): boolean;
declare function isFloat(num: number): boolean;
export declare function isMediaElement(input: any): boolean;
export declare function isMediaLoaded(media: HTMLImageElement | HTMLVideoElement): boolean;
declare function isNodejs(): boolean;
export declare interface ISsdMobilenetv1Options {
minConfidence?: number;
maxResults?: number;
}
declare function isTensor(tensor: any, dim: number): boolean;
declare function isTensor1D(tensor: any): tensor is tf.Tensor1D;
declare function isTensor2D(tensor: any): tensor is tf.Tensor2D;
declare function isTensor3D(tensor: any): tensor is tf.Tensor3D;
declare function isTensor4D(tensor: any): tensor is tf.Tensor4D;
declare function isValidNumber(num: any): boolean;
declare function isValidProbablitiy(num: any): boolean;
export declare function isWithAge(obj: any): obj is WithAge<{}>;
export declare function isWithFaceDetection(obj: any): obj is WithFaceDetection<{}>;
export declare function isWithFaceExpressions(obj: any): obj is WithFaceExpressions<{}>;
export declare function isWithFaceLandmarks(obj: any): obj is WithFaceLandmarks, FaceLandmarks>;
export declare function isWithGender(obj: any): obj is WithGender<{}>;
export declare type ITinyFaceDetectorOptions = ITinyYolov2Options;
export declare interface ITinyYolov2Options {
inputSize?: number;
scoreThreshold?: number;
}
export declare class LabeledBox extends Box {
static assertIsValidLabeledBox(box: any, callee: string): void;
private _label;
constructor(box: IBoundingBox | IRect | any, label: number);
get label(): number;
}
export declare class LabeledFaceDescriptors {
private _label;
private _descriptors;
constructor(label: string, descriptors: Float32Array[]);
get label(): string;
get descriptors(): Float32Array[];
toJSON(): any;
static fromJSON(json: any): LabeledFaceDescriptors;
}
export declare const loadAgeGenderModel: (url: string) => Promise;
export declare const loadFaceDetectionModel: (url: string) => Promise;
export declare const loadFaceExpressionModel: (url: string) => Promise;
export declare const loadFaceLandmarkModel: (url: string) => Promise;
export declare const loadFaceLandmarkTinyModel: (url: string) => Promise;
export declare const loadFaceRecognitionModel: (url: string) => Promise;
export declare const loadSsdMobilenetv1Model: (url: string) => Promise;
export declare const loadTinyFaceDetectorModel: (url: string) => Promise;
export declare const loadTinyYolov2Model: (url: string) => Promise;
export declare function loadWeightMap(uri: string | undefined, defaultModelName: string): Promise;
export declare const locateFaces: (input: TNetInput, options: SsdMobilenetv1Options) => Promise;
export declare function matchDimensions(input: IDimensions, reference: IDimensions, useMediaDimensions?: boolean): {
width: number;
height: number;
};
export declare function minBbox(pts: IPoint[]): BoundingBox;
export declare type MobilenetParams = {
conv0: SeparableConvParams | ConvParams;
conv1: SeparableConvParams;
conv2: SeparableConvParams;
conv3: SeparableConvParams;
conv4: SeparableConvParams;
conv5: SeparableConvParams;
conv6?: SeparableConvParams;
conv7?: SeparableConvParams;
conv8: ConvParams;
};
declare namespace MobileNetV1 {
type DepthwiseConvParams = {
filters: tf.Tensor4D;
batch_norm_scale: tf.Tensor1D;
batch_norm_offset: tf.Tensor1D;
batch_norm_mean: tf.Tensor1D;
batch_norm_variance: tf.Tensor1D;
};
type ConvPairParams = {
depthwise_conv: DepthwiseConvParams;
pointwise_conv: PointwiseConvParams;
};
type Params = {
conv_0: PointwiseConvParams;
conv_1: ConvPairParams;
conv_2: ConvPairParams;
conv_3: ConvPairParams;
conv_4: ConvPairParams;
conv_5: ConvPairParams;
conv_6: ConvPairParams;
conv_7: ConvPairParams;
conv_8: ConvPairParams;
conv_9: ConvPairParams;
conv_10: ConvPairParams;
conv_11: ConvPairParams;
conv_12: ConvPairParams;
conv_13: ConvPairParams;
};
}
declare function monkeyPatch(env: Partial): void;
/** @docalias {[name: string]: Tensor} */
declare type NamedTensorMap = {
[name: string]: Tensor;
};
export declare class NetInput {
private _imageTensors;
private _canvases;
private _batchSize;
private _treatAsBatchInput;
private _inputDimensions;
private _inputSize;
constructor(inputs: Array, treatAsBatchInput?: boolean);
get imageTensors(): Array;
get canvases(): HTMLCanvasElement[];
get isBatchInput(): boolean;
get batchSize(): number;
get inputDimensions(): number[][];
get inputSize(): number | undefined;
get reshapedInputDimensions(): Dimensions[];
getInput(batchIdx: number): tf.Tensor3D | tf.Tensor4D | HTMLCanvasElement;
getInputDimensions(batchIdx: number): number[];
getInputHeight(batchIdx: number): number;
getInputWidth(batchIdx: number): number;
getReshapedInputDimensions(batchIdx: number): Dimensions;
/**
* Create a batch tensor from all input canvases and tensors
* with size [batchSize, inputSize, inputSize, 3].
*
* @param inputSize Height and width of the tensor.
* @param isCenterImage (optional, default: false) If true, add an equal amount of padding on
* both sides of the minor dimension oof the image.
* @returns The batch tensor.
*/
toBatchTensor(inputSize: number, isCenterInputs?: boolean): tf.Tensor4D;
}
export declare type NetOutput = {
age: tf.Tensor1D;
gender: tf.Tensor2D;
};
export declare type NetParams = {
fc: {
age: FCParams;
gender: FCParams;
};
};
declare type NetParams_2 = {
fc: FCParams;
};
declare type NetParams_3 = {
conv32_down: ConvLayerParams;
conv32_1: ResidualLayerParams;
conv32_2: ResidualLayerParams;
conv32_3: ResidualLayerParams;
conv64_down: ResidualLayerParams;
conv64_1: ResidualLayerParams;
conv64_2: ResidualLayerParams;
conv64_3: ResidualLayerParams;
conv128_down: ResidualLayerParams;
conv128_1: ResidualLayerParams;
conv128_2: ResidualLayerParams;
conv256_down: ResidualLayerParams;
conv256_1: ResidualLayerParams;
conv256_2: ResidualLayerParams;
conv256_down_out: ResidualLayerParams;
fc: tf.Tensor2D;
};
declare type NetParams_4 = {
mobilenetv1: MobileNetV1.Params;
prediction_layer: PredictionLayerParams;
output_layer: OutputLayerParams;
};
export declare const nets: {
ssdMobilenetv1: SsdMobilenetv1;
tinyFaceDetector: TinyFaceDetector;
tinyYolov2: TinyYolov2;
faceLandmark68Net: FaceLandmark68Net;
faceLandmark68TinyNet: FaceLandmark68TinyNet;
faceRecognitionNet: FaceRecognitionNet;
faceExpressionNet: FaceExpressionNet;
ageGenderNet: AgeGenderNet;
};
export declare abstract class NeuralNetwork {
constructor(name: string);
protected _params: TNetParams | undefined;
protected _paramMappings: ParamMapping[];
_name: any;
get params(): TNetParams | undefined;
get paramMappings(): ParamMapping[];
get isLoaded(): boolean;
getParamFromPath(paramPath: string): tf.Tensor;
reassignParamFromPath(paramPath: string, tensor: tf.Tensor): void;
getParamList(): {
path: string;
tensor: tf.Tensor;
}[];
getTrainableParams(): {
path: string;
tensor: tf.Tensor;
}[];
getFrozenParams(): {
path: string;
tensor: tf.Tensor;
}[];
variable(): void;
freeze(): void;
dispose(throwOnRedispose?: boolean): void;
serializeParams(): Float32Array;
load(weightsOrUrl: Float32Array | string | undefined): Promise;
loadFromUri(uri: string | undefined): Promise;
loadFromDisk(filePath: string | undefined): Promise;
loadFromWeightMap(weightMap: tf.NamedTensorMap): void;
extractWeights(weights: Float32Array): void;
private traversePropertyPath;
protected abstract getDefaultModelName(): string;
protected abstract extractParamsFromWeightMap(weightMap: tf.NamedTensorMap): {
params: TNetParams;
paramMappings: ParamMapping[];
};
protected abstract extractParams(weights: Float32Array): {
params: TNetParams;
paramMappings: ParamMapping[];
};
}
export declare function nonMaxSuppression(boxes: Box[], scores: number[], iouThreshold: number, isIOU?: boolean): number[];
export declare function normalize(x: tf.Tensor4D, meanRgb: number[]): tf.Tensor4D;
declare type NumericDataType = 'float32' | 'int32' | 'bool' | 'complex64';
export declare class ObjectDetection {
private _score;
private _classScore;
private _className;
private _box;
private _imageDims;
constructor(score: number, classScore: number, className: string, relativeBox: IRect, imageDims: IDimensions);
get score(): number;
get classScore(): number;
get className(): string;
get box(): Box;
get imageDims(): Dimensions;
get imageWidth(): number;
get imageHeight(): number;
get relativeBox(): Box;
forSize(width: number, height: number): ObjectDetection;
}
declare type OutputLayerParams = {
extra_dim: tf.Tensor3D;
};
/**
* Pads the smaller dimension of an image tensor with zeros, such that width === height.
*
* @param imgTensor The image tensor.
* @param isCenterImage (optional, default: false) If true, add an equal amount of padding on
* both sides of the minor dimension oof the image.
* @returns The padded tensor with width === height.
*/
export declare function padToSquare(imgTensor: tf.Tensor4D, isCenterImage?: boolean): tf.Tensor4D;
declare type ParamMapping = {
originalPath?: string;
paramPath: string;
};
export declare class Point implements IPoint {
private _x;
private _y;
constructor(x: number, y: number);
get x(): number;
get y(): number;
add(pt: IPoint): Point;
sub(pt: IPoint): Point;
mul(pt: IPoint): Point;
div(pt: IPoint): Point;
abs(): Point;
magnitude(): number;
floor(): Point;
}
declare type PointwiseConvParams = {
filters: tf.Tensor4D;
batch_norm_offset: tf.Tensor1D;
};
/**
* Predicts age and gender from a face image.
*
* @param inputs The face image extracted from the bounding box of a face. Can
* also be an array of input images, which will be batch processed.
* @returns Predictions with age, gender and gender probability or array thereof in case of batch input.
*/
export declare const predictAgeAndGender: (input: TNetInput) => Promise;
declare class PredictAgeAndGenderTaskBase extends ComposableTask {
protected parentTask: ComposableTask | Promise;
protected input: TNetInput;
protected extractedFaces?: any[] | undefined;
constructor(parentTask: ComposableTask | Promise, input: TNetInput, extractedFaces?: any[] | undefined);
}
declare class PredictAllAgeAndGenderTask> extends PredictAgeAndGenderTaskBase>[], TSource[]> {
run(): Promise>[]>;
withFaceExpressions(): PredictAllFaceExpressionsTask>>;
}
declare class PredictAllAgeAndGenderWithFaceAlignmentTask>> extends PredictAllAgeAndGenderTask {
withFaceExpressions(): PredictAllFaceExpressionsWithFaceAlignmentTask>>;
withFaceDescriptors(): ComputeAllFaceDescriptorsTask>>;
}
declare class PredictAllFaceExpressionsTask> extends PredictFaceExpressionsTaskBase[], TSource[]> {
run(): Promise[]>;
withAgeAndGender(): PredictAllAgeAndGenderTask>;
}
declare class PredictAllFaceExpressionsWithFaceAlignmentTask>> extends PredictAllFaceExpressionsTask {
withAgeAndGender(): PredictAllAgeAndGenderWithFaceAlignmentTask>;
withFaceDescriptors(): ComputeAllFaceDescriptorsTask>;
}
export declare class PredictedBox extends LabeledBox {
static assertIsValidPredictedBox(box: any, callee: string): void;
private _score;
private _classScore;
constructor(box: IBoundingBox | IRect | any, label: number, score: number, classScore: number);
get score(): number;
get classScore(): number;
}
declare class PredictFaceExpressionsTaskBase extends ComposableTask {
protected parentTask: ComposableTask | Promise;
protected input: TNetInput;
protected extractedFaces?: any[] | undefined;
constructor(parentTask: ComposableTask | Promise, input: TNetInput, extractedFaces?: any[] | undefined);
}
declare type PredictionLayerParams = {
conv_0: PointwiseConvParams;
conv_1: PointwiseConvParams;
conv_2: PointwiseConvParams;
conv_3: PointwiseConvParams;
conv_4: PointwiseConvParams;
conv_5: PointwiseConvParams;
conv_6: PointwiseConvParams;
conv_7: PointwiseConvParams;
box_predictor_0: BoxPredictionParams;
box_predictor_1: BoxPredictionParams;
box_predictor_2: BoxPredictionParams;
box_predictor_3: BoxPredictionParams;
box_predictor_4: BoxPredictionParams;
box_predictor_5: BoxPredictionParams;
};
declare class PredictSingleAgeAndGenderTask> extends PredictAgeAndGenderTaskBase> | undefined, TSource | undefined> {
run(): Promise> | undefined>;
withFaceExpressions(): PredictSingleFaceExpressionsTask>>;
}
declare class PredictSingleAgeAndGenderWithFaceAlignmentTask>> extends PredictSingleAgeAndGenderTask {
withFaceExpressions(): PredictSingleFaceExpressionsWithFaceAlignmentTask>>;
withFaceDescriptor(): ComputeSingleFaceDescriptorTask>>;
}
declare class PredictSingleFaceExpressionsTask> extends PredictFaceExpressionsTaskBase | undefined, TSource | undefined> {
run(): Promise | undefined>;
withAgeAndGender(): PredictSingleAgeAndGenderTask>;
}
declare class PredictSingleFaceExpressionsWithFaceAlignmentTask>> extends PredictSingleFaceExpressionsTask {
withAgeAndGender(): PredictSingleAgeAndGenderWithFaceAlignmentTask>;
withFaceDescriptor(): ComputeSingleFaceDescriptorTask>;
}
declare function range(num: number, start: number, step: number): number[];
declare enum Rank {
R0 = "R0",
R1 = "R1",
R2 = "R2",
R3 = "R3",
R4 = "R4",
R5 = "R5",
R6 = "R6"
}
/**
* Recognizes the facial expressions from a face image.
*
* @param inputs The face image extracted from the bounding box of a face. Can
* also be an array of input images, which will be batch processed.
* @returns Facial expressions with corresponding probabilities or array thereof in case of batch input.
*/
export declare const recognizeFaceExpressions: (input: TNetInput) => Promise;
export declare class Rect extends Box implements IRect {
constructor(x: number, y: number, width: number, height: number, allowNegativeDimensions?: boolean);
}
declare interface RecursiveArray {
[index: number]: T | RecursiveArray;
}
declare type ReductionBlockParams = {
separable_conv0: SeparableConvParams;
separable_conv1: SeparableConvParams;
expansion_conv: ConvParams;
};
declare type ResidualLayerParams = {
conv1: ConvLayerParams;
conv2: ConvLayerParams;
};
export declare function resizeResults(results: T, dimensions: IDimensions): T;
export declare function resolveInput(arg: string | any): any;
declare function round(num: number, prec?: number): number;
declare type ScaleLayerParams = {
weights: tf.Tensor1D;
biases: tf.Tensor1D;
};
declare class SeparableConvParams {
depthwise_filter: tf.Tensor4D;
pointwise_filter: tf.Tensor4D;
bias: tf.Tensor1D;
constructor(depthwise_filter: tf.Tensor4D, pointwise_filter: tf.Tensor4D, bias: tf.Tensor1D);
}
declare function setEnv(env: Environment): void;
/**
* @license
* Copyright 2017 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
///
/** @docalias number[] */
declare interface ShapeMap {
R0: number[];
R1: [number];
R2: [number, number];
R3: [number, number, number];
R4: [number, number, number, number];
R5: [number, number, number, number, number];
R6: [number, number, number, number, number, number];
}
export declare function shuffleArray(inputArray: any[]): any[];
export declare function sigmoid(x: number): number;
declare interface SingleValueMap {
bool: boolean;
int32: number;
float32: number;
complex64: number;
string: string;
}
export declare class SsdMobilenetv1 extends NeuralNetwork {
constructor();
forwardInput(input: NetInput): any;
forward(input: TNetInput): Promise;
locateFaces(input: TNetInput, options?: ISsdMobilenetv1Options): Promise;
protected getDefaultModelName(): string;
protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap): {
params: NetParams_4;
paramMappings: ParamMapping[];
};
protected extractParams(weights: Float32Array): {
params: NetParams_4;
paramMappings: ParamMapping[];
};
}
/**
* Attempts to detect all faces in an image using SSD Mobilenetv1 Network.
*
* @param input The input image.
* @param options (optional, default: see SsdMobilenetv1Options constructor for default parameters).
* @returns Bounding box of each face with score.
*/
export declare const ssdMobilenetv1: (input: TNetInput, options: SsdMobilenetv1Options) => Promise;
export declare class SsdMobilenetv1Options {
protected _name: string;
private _minConfidence;
private _maxResults;
constructor({ minConfidence, maxResults }?: ISsdMobilenetv1Options);
get minConfidence(): number;
get maxResults(): number;
}
declare type TDrawDetectionsInput = IRect | IBoundingBox | FaceDetection | WithFaceDetection<{}>;
declare namespace Tensor { }
/**
* A `tf.Tensor` object represents an immutable, multidimensional array of
* numbers that has a shape and a data type.
*
* For performance reasons, functions that create tensors do not necessarily
* perform a copy of the data passed to them (e.g. if the data is passed as a
* `Float32Array`), and changes to the data will change the tensor. This is not
* a feature and is not supported. To avoid this behavior, use the tensor before
* changing the input data or create a copy with `copy = tf.add(yourTensor, 0)`.
*
* See `tf.tensor` for details on how to create a `tf.Tensor`.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
declare class Tensor {
/** Unique id of this tensor. */
readonly id: number;
/**
* Id of the bucket holding the data for this tensor. Multiple arrays can
* point to the same bucket (e.g. when calling array.reshape()).
*/
dataId: DataId;
/** The shape of the tensor. */
readonly shape: ShapeMap[R];
/** Number of elements in the tensor. */
readonly size: number;
/** The data type for the array. */
readonly dtype: DataType;
/** The rank type for the array (see `Rank` enum). */
readonly rankType: R;
/** Whether this tensor has been globally kept. */
kept: boolean;
/** The id of the scope this tensor is being tracked in. */
scopeId: number;
/**
* Number of elements to skip in each dimension when indexing. See
* https://docs.scipy.org/doc/numpy/reference/generated/\
* numpy.ndarray.strides.html
*/
readonly strides: number[];
constructor(shape: ShapeMap[R], dtype: DataType, dataId: DataId, id: number);
readonly rank: number;
/**
* Returns a promise of `tf.TensorBuffer` that holds the underlying data.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
buffer(): Promise>;
/**
* Returns a `tf.TensorBuffer` that holds the underlying data.
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
bufferSync(): TensorBuffer;
/**
* Returns the tensor data as a nested array. The transfer of data is done
* asynchronously.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
array(): Promise;
/**
* Returns the tensor data as a nested array. The transfer of data is done
* synchronously.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
arraySync(): ArrayMap[R];
/**
* Asynchronously downloads the values from the `tf.Tensor`. Returns a
* promise of `TypedArray` that resolves when the computation has finished.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
data(): Promise;
/**
* Synchronously downloads the values from the `tf.Tensor`. This blocks the
* UI thread until the values are ready, which can cause performance issues.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
dataSync(): DataTypeMap[D];
/** Returns the underlying bytes of the tensor's data. */
bytes(): Promise;
/**
* Disposes `tf.Tensor` from memory.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
dispose(): void;
protected isDisposedInternal: boolean;
readonly isDisposed: boolean;
throwIfDisposed(): void;
/**
* Prints the `tf.Tensor`. See `tf.print` for details.
*
* @param verbose Whether to print verbose information about the tensor,
* including dtype and size.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
print(verbose?: boolean): void;
/**
* Returns a copy of the tensor. See `tf.clone` for details.
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
clone(this: T): T;
/**
* Returns a human-readable description of the tensor. Useful for logging.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
toString(verbose?: boolean): string;
variable(trainable?: boolean, name?: string, dtype?: DataType): Variable;
}
/** @doclink Tensor */
declare type Tensor1D = Tensor;
/** @doclink Tensor */
declare type Tensor2D = Tensor;
/** @doclink Tensor */
declare type Tensor3D = Tensor;
/** @doclink Tensor */
declare type Tensor4D = Tensor;
/** @doclink Tensor */
declare type Tensor5D = Tensor;
/**
* A mutable object, similar to `tf.Tensor`, that allows users to set values
* at locations before converting to an immutable `tf.Tensor`.
*
* See `tf.buffer` for creating a tensor buffer.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
declare class TensorBuffer {
dtype: D;
size: number;
shape: ShapeMap[R];
strides: number[];
values: DataTypeMap[D];
constructor(shape: ShapeMap[R], dtype: D, values?: DataTypeMap[D]);
/**
* Sets a value in the buffer at a given location.
*
* @param value The value to set.
* @param locs The location indices.
*
* @doc {heading: 'Tensors', subheading: 'Creation'}
*/
set(value: SingleValueMap[D], ...locs: number[]): void;
/**
* Returns the value in the buffer at the provided location.
*
* @param locs The location indices.
*
* @doc {heading: 'Tensors', subheading: 'Creation'}
*/
get(...locs: number[]): SingleValueMap[D];
locToIndex(locs: number[]): number;
indexToLoc(index: number): number[];
readonly rank: number;
/**
* Creates an immutable `tf.Tensor` object from the buffer.
*
* @doc {heading: 'Tensors', subheading: 'Creation'}
*/
toTensor(): Tensor;
}
/** @docalias TypedArray|Array */
declare type TensorLike = TypedArray | number | boolean | string | RecursiveArray | RecursiveArray | RecursiveArray | Uint8Array[];
declare namespace tf {
export {
version_2 as version,
Tensor,
TensorLike,
Rank,
Tensor1D,
Tensor2D,
Tensor3D,
Tensor4D,
Tensor5D,
NamedTensorMap
}
}
export { tf }
export declare class TinyFaceDetector extends TinyYolov2Base {
constructor();
get anchors(): Point[];
locateFaces(input: TNetInput, forwardParams: ITinyYolov2Options): Promise;
protected getDefaultModelName(): string;
protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap): {
params: TinyYolov2NetParams;
paramMappings: ParamMapping[];
};
}
/**
* Attempts to detect all faces in an image using the Tiny Face Detector.
*
* @param input The input image.
* @param options (optional, default: see TinyFaceDetectorOptions constructor for default parameters).
* @returns Bounding box of each face with score.
*/
export declare const tinyFaceDetector: (input: TNetInput, options: TinyFaceDetectorOptions) => Promise;
export declare class TinyFaceDetectorOptions extends TinyYolov2Options {
protected _name: string;
}
declare class TinyFaceFeatureExtractor extends NeuralNetwork implements IFaceFeatureExtractor {
constructor();
forwardInput(input: NetInput): tf.Tensor4D;
forward(input: TNetInput): Promise;
protected getDefaultModelName(): string;
protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap): {
params: TinyFaceFeatureExtractorParams;
paramMappings: ParamMapping[];
};
protected extractParams(weights: Float32Array): {
params: TinyFaceFeatureExtractorParams;
paramMappings: ParamMapping[];
};
}
declare type TinyFaceFeatureExtractorParams = {
dense0: DenseBlock3Params;
dense1: DenseBlock3Params;
dense2: DenseBlock3Params;
};
declare class TinyXception extends NeuralNetwork {
private _numMainBlocks;
constructor(numMainBlocks: number);
forwardInput(input: NetInput): tf.Tensor4D;
forward(input: TNetInput): Promise;
protected getDefaultModelName(): string;
protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap): {
params: TinyXceptionParams;
paramMappings: ParamMapping[];
};
protected extractParams(weights: Float32Array): {
params: TinyXceptionParams;
paramMappings: ParamMapping[];
};
}
declare type TinyXceptionParams = {
entry_flow: {
conv_in: ConvParams;
reduction_block_0: ReductionBlockParams;
reduction_block_1: ReductionBlockParams;
};
middle_flow: any;
exit_flow: {
reduction_block: ReductionBlockParams;
separable_conv: SeparableConvParams;
};
};
export declare class TinyYolov2 extends TinyYolov2Base {
constructor(withSeparableConvs?: boolean);
get withSeparableConvs(): boolean;
get anchors(): Point[];
locateFaces(input: TNetInput, forwardParams: ITinyYolov2Options): Promise;
protected getDefaultModelName(): string;
protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap): {
params: TinyYolov2NetParams;
paramMappings: ParamMapping[];
};
}
/**
* Attempts to detect all faces in an image using the Tiny Yolov2 Network.
*
* @param input The input image.
* @param options (optional, default: see TinyYolov2Options constructor for default parameters).
* @returns Bounding box of each face with score.
*/
export declare const tinyYolov2: (input: TNetInput, options: ITinyYolov2Options) => Promise;
declare class TinyYolov2Base extends NeuralNetwork {
static DEFAULT_FILTER_SIZES: number[];
private _config;
constructor(config: TinyYolov2Config);
get config(): TinyYolov2Config;
get withClassScores(): boolean;
get boxEncodingSize(): number;
runTinyYolov2(x: tf.Tensor4D, params: DefaultTinyYolov2NetParams): tf.Tensor4D;
runMobilenet(x: tf.Tensor4D, params: MobilenetParams): tf.Tensor4D;
forwardInput(input: NetInput, inputSize: number): tf.Tensor4D;
forward(input: TNetInput, inputSize: number): Promise;
detect(input: TNetInput, forwardParams?: ITinyYolov2Options): Promise;
protected getDefaultModelName(): string;
protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap): {
params: TinyYolov2NetParams;
paramMappings: ParamMapping[];
};
protected extractParams(weights: Float32Array): {
params: TinyYolov2NetParams;
paramMappings: ParamMapping[];
};
protected extractBoxes(outputTensor: tf.Tensor4D, inputBlobDimensions: Dimensions, scoreThreshold?: number): Promise;
private extractPredictedClass;
}
export declare type TinyYolov2Config = {
withSeparableConvs: boolean;
iouThreshold: number;
anchors: Point[];
classes: string[];
meanRgb?: [number, number, number];
withClassScores?: boolean;
filterSizes?: number[];
isFirstLayerConv2d?: boolean;
};
export declare type TinyYolov2NetParams = DefaultTinyYolov2NetParams | MobilenetParams;
export declare class TinyYolov2Options {
protected _name: string;
private _inputSize;
private _scoreThreshold;
constructor({ inputSize, scoreThreshold }?: ITinyYolov2Options);
get inputSize(): number;
get scoreThreshold(): number;
}
export declare type TMediaElement = HTMLImageElement | HTMLVideoElement | HTMLCanvasElement;
export declare type TNetInput = TNetInputArg | Array | NetInput | tf.Tensor4D;
export declare type TNetInputArg = string | TResolvedNetInput;
/**
* Validates the input to make sure, they are valid net inputs and awaits all media elements
* to be finished loading.
*
* @param input The input, which can be a media element or an array of different media elements.
* @returns A NetInput instance, which can be passed into one of the neural networks.
*/
export declare function toNetInput(inputs: TNetInput): Promise;
export declare type TResolvedNetInput = TMediaElement | tf.Tensor3D | tf.Tensor4D;
declare type TypedArray = Float32Array | Int32Array | Uint8Array;
declare namespace utils {
export {
isTensor,
isTensor1D,
isTensor2D,
isTensor3D,
isTensor4D,
isFloat,
isEven,
round,
isDimensions,
computeReshapedDimensions,
getCenterPoint,
range,
isValidNumber,
isValidProbablitiy
}
}
export { utils }
export declare function validateConfig(config: any): void;
/**
* A mutable `tf.Tensor`, useful for persisting state, e.g. for training.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
declare class Variable extends Tensor {
trainable: boolean;
name: string;
constructor(initialValue: Tensor, trainable: boolean, name: string, tensorId: number);
/**
* Assign a new `tf.Tensor` to this variable. The new `tf.Tensor` must have
* the same shape and dtype as the old `tf.Tensor`.
*
* @param newValue New tensor to be assigned to this variable.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
assign(newValue: Tensor): void;
dispose(): void;
}
export declare const version: {
faceapi: string;
node: boolean;
browser: boolean;
};
declare const version_2: {
'tfjs-core': string;
'tfjs-backend-cpu': string;
'tfjs-backend-webgl': string;
'tfjs-data': string;
'tfjs-layers': string;
'tfjs-converter': string;
tfjs: string;
};
export declare type WithAge = TSource & {
age: number;
};
export declare type WithFaceDescriptor = TSource & {
descriptor: Float32Array;
};
export declare type WithFaceDetection = TSource & {
detection: FaceDetection;
};
export declare type WithFaceExpressions = TSource & {
expressions: FaceExpressions;
};
export declare type WithFaceLandmarks, TFaceLandmarks extends FaceLandmarks = FaceLandmarks68> = TSource & {
landmarks: TFaceLandmarks;
unshiftedLandmarks: TFaceLandmarks;
alignedRect: FaceDetection;
angle: {
roll: number | undefined;
pitch: number | undefined;
yaw: number | undefined;
};
};
export declare type WithGender = TSource & {
gender: Gender;
genderProbability: number;
};
export { }