///
declare const add: typeof add_;
/**
* Adds two `tf.Tensor`s element-wise, A + B. Supports broadcasting.
*
*
* ```js
* const a = tf.tensor1d([1, 2, 3, 4]);
* const b = tf.tensor1d([10, 20, 30, 40]);
*
* a.add(b).print(); // or tf.add(a, b)
* ```
*
* ```js
* // Broadcast add a with b.
* const a = tf.scalar(5);
* const b = tf.tensor1d([10, 20, 30, 40]);
*
* a.add(b).print(); // or tf.add(a, b)
* ```
* @param a The first `tf.Tensor` to add.
* @param b The second `tf.Tensor` to add. Must have the same type as `a`.
*
* @doc {heading: 'Operations', subheading: 'Arithmetic'}
*/
declare function add_(a: Tensor | TensorLike, b: Tensor | TensorLike): T;
export declare type AgeAndGenderPrediction = {
age: number;
gender: Gender;
genderProbability: number;
};
export declare class AgeGenderNet extends NeuralNetwork {
private _faceFeatureExtractor;
constructor(faceFeatureExtractor?: TinyXception);
get faceFeatureExtractor(): TinyXception;
runNet(input: NetInput | tf.Tensor4D): NetOutput;
forwardInput(input: NetInput | tf.Tensor4D): NetOutput;
forward(input: TNetInput): Promise;
predictAgeAndGender(input: TNetInput): Promise;
protected getDefaultModelName(): string;
dispose(throwOnRedispose?: boolean): void;
loadClassifierParams(weights: Float32Array): void;
extractClassifierParams(weights: Float32Array): {
params: NetParams;
paramMappings: ParamMapping[];
};
protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap): {
params: NetParams;
paramMappings: ParamMapping[];
};
protected extractParams(weights: Float32Array): {
params: NetParams;
paramMappings: ParamMapping[];
};
}
export declare const allFaces: typeof allFacesSsdMobilenetv1;
export declare function allFacesSsdMobilenetv1(input: TNetInput, minConfidence?: number): Promise>>[]>;
export declare function allFacesTinyYolov2(input: TNetInput, forwardParams?: ITinyYolov2Options): Promise>>[]>;
declare enum AnchorPosition {
TOP_LEFT = "TOP_LEFT",
TOP_RIGHT = "TOP_RIGHT",
BOTTOM_LEFT = "BOTTOM_LEFT",
BOTTOM_RIGHT = "BOTTOM_RIGHT"
}
/** @docalias number[] */
declare interface ArrayMap {
R0: number;
R1: number[];
R2: number[][];
R3: number[][][];
R4: number[][][][];
R5: number[][][][][];
R6: number[][][][][][];
}
declare const avgPool: typeof avgPool_;
/**
* Computes the 2D average pooling of an image.
*
* @param x The input tensor, of rank 4 or rank 3 of shape
* `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed.
* @param filterSize The filter size: `[filterHeight, filterWidth]`. If
* `filterSize` is a single number, then `filterHeight == filterWidth`.
* @param strides The strides of the pooling: `[strideHeight, strideWidth]`. If
* `strides` is a single number, then `strideHeight == strideWidth`.
* @param pad The type of padding algorithm:
* - `same` and stride 1: output will be of same size as input,
* regardless of filter size.
* - `valid`: output will be smaller than input if filter is larger
* than 1x1.
* - For more info, see this guide:
* [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
* https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
* @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
* provided, it will default to truncate.
*/
declare function avgPool_(x: T | TensorLike, filterSize: [number, number] | number, strides: [number, number] | number, pad: 'valid' | 'same' | number | conv_util.ExplicitPadding, dimRoundingMode?: 'floor' | 'round' | 'ceil'): T;
export declare function awaitMediaLoaded(media: HTMLImageElement | HTMLVideoElement | HTMLCanvasElement): Promise;
export declare type BatchNorm = {
sub: tf.Tensor1D;
truediv: tf.Tensor1D;
};
declare const batchNorm: typeof batchNorm_;
/**
* Batch normalization.
*
* As described in
* [http://arxiv.org/abs/1502.03167](http://arxiv.org/abs/1502.03167).
*
* Mean, variance, scale, and offset can be of two shapes:
* - The same shape as the input.
* - In the common case, the depth dimension is the last dimension of x, so
* the values would be an `tf.Tensor1D` of shape [depth].
*
* Also available are stricter rank-specific methods with the same signature
* as this method that assert that parameters passed are of given rank
* - `tf.batchNorm2d`
* - `tf.batchNorm3d`
* - `tf.batchNorm4d`
*
* @param x The input Tensor.
* @param mean A mean Tensor.
* @param variance A variance Tensor.
* @param offset An offset Tensor.
* @param scale A scale Tensor.
* @param varianceEpsilon A small float number to avoid dividing by 0.
*
* @doc {heading: 'Operations', subheading: 'Normalization'}
*/
declare function batchNorm_(x: Tensor | TensorLike, mean: Tensor | Tensor1D | TensorLike, variance: Tensor | Tensor1D | TensorLike, offset?: Tensor | Tensor1D | TensorLike, scale?: Tensor | Tensor1D | TensorLike, varianceEpsilon?: number): Tensor;
export declare class BoundingBox extends Box implements IBoundingBox {
constructor(left: number, top: number, right: number, bottom: number, allowNegativeDimensions?: boolean);
}
export declare class Box implements IBoundingBox, IRect {
static isRect(rect: any): boolean;
static assertIsValidBox(box: any, callee: string, allowNegativeDimensions?: boolean): void;
private _x;
private _y;
private _width;
private _height;
constructor(_box: IBoundingBox | IRect, allowNegativeDimensions?: boolean);
get x(): number;
get y(): number;
get width(): number;
get height(): number;
get left(): number;
get top(): number;
get right(): number;
get bottom(): number;
get area(): number;
get topLeft(): Point;
get topRight(): Point;
get bottomLeft(): Point;
get bottomRight(): Point;
round(): Box;
floor(): Box;
toSquare(): Box;
rescale(s: IDimensions | number): Box;
pad(padX: number, padY: number): Box;
clipAtImageBorders(imgWidth: number, imgHeight: number): Box;
shift(sx: number, sy: number): Box;
padAtBorders(imageHeight: number, imageWidth: number): {
dy: number;
edy: number;
dx: number;
edx: number;
y: number;
ey: number;
x: number;
ex: number;
w: number;
h: number;
};
calibrate(region: Box): Box;
}
declare type BoxPredictionParams = {
box_encoding_predictor: ConvParams;
class_predictor: ConvParams;
};
export declare function bufferToImage(buf: Blob): Promise;
declare const cast: typeof cast_;
/**
* Casts a `tf.Tensor` to a new dtype.
*
* ```js
* const x = tf.tensor1d([1.5, 2.5, 3]);
* tf.cast(x, 'int32').print();
* ```
* @param x The input tensor to be casted.
* @param dtype The dtype to cast the input tensor to.
*
* @doc {heading: 'Tensors', subheading: 'Transformations'}
*/
declare function cast_(x: T | TensorLike, dtype: DataType): T;
declare const clipByValue: typeof clipByValue_;
/**
* Clips values element-wise. `max(min(x, clipValueMax), clipValueMin)`
*
* ```js
* const x = tf.tensor1d([-1, 2, -3, 4]);
*
* x.clipByValue(-2, 3).print(); // or tf.clipByValue(x, -2, 3)
* ```
* @param x The input tensor.
* @param clipValueMin Lower-bound of range to be clipped to.
* @param clipValueMax Upper-bound of range to be clipped to.
*
* @doc {heading: 'Operations', subheading: 'Basic math'}
*/
declare function clipByValue_(x: T | TensorLike, clipValueMin: number, clipValueMax: number): T;
export declare class ComposableTask {
then(onfulfilled: (value: T) => T | PromiseLike): Promise;
run(): Promise;
}
export declare class ComputeAllFaceDescriptorsTask>> extends ComputeFaceDescriptorsTaskBase[], TSource[]> {
run(): Promise[]>;
withFaceExpressions(): PredictAllFaceExpressionsWithFaceAlignmentTask>;
withAgeAndGender(): PredictAllAgeAndGenderWithFaceAlignmentTask>;
}
/**
* Computes the information for a forward pass of a convolution/pooling
* operation.
*/
declare function computeConv2DInfo(inShape: [number, number, number, number], filterShape: [number, number, number, number], strides: number | [number, number], dilations: number | [number, number], pad: 'same' | 'valid' | number | ExplicitPadding, roundingMode?: 'floor' | 'round' | 'ceil', depthwise?: boolean, dataFormat?: 'channelsFirst' | 'channelsLast'): Conv2DInfo;
/**
* Computes the information for a forward pass of a 3D convolution/pooling
* operation.
*/
declare function computeConv3DInfo(inShape: [number, number, number, number, number], filterShape: [number, number, number, number, number], strides: number | [number, number, number], dilations: number | [number, number, number], pad: 'same' | 'valid' | number, depthwise?: boolean, dataFormat?: 'channelsFirst' | 'channelsLast', roundingMode?: 'floor' | 'round' | 'ceil'): Conv3DInfo;
declare function computeDefaultPad(inputShape: [number, number] | [number, number, number, number], fieldSize: number, stride: number, dilation?: number): number;
/**
*
* @param inputShape Input tensor shape is of the following dimensions:
* `[batch, height, width, inChannels]`.
* @param filterShape The filter shape is of the following dimensions:
* `[filterHeight, filterWidth, depth]`.
* @param strides The strides of the sliding window for each dimension of the
* input tensor: `[strideHeight, strideWidth]`.
* If `strides` is a single number,
* then `strideHeight == strideWidth`.
* @param pad The type of padding algorithm.
* - `same` and stride 1: output will be of same size as input,
* regardless of filter size.
* - `valid`: output will be smaller than input if filter is larger
* than 1*1x1.
* - For more info, see this guide:
* [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
* https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
* @param dataFormat The data format of the input and output data.
* Defaults to 'NHWC'.
* @param dilations The dilation rates: `[dilationHeight, dilationWidth]`.
* Defaults to `[1, 1]`. If `dilations` is a single number, then
* `dilationHeight == dilationWidth`.
*/
declare function computeDilation2DInfo(inputShape: [number, number, number, number], filterShape: [number, number, number], strides: number | [number, number], pad: 'same' | 'valid' | number, dataFormat: 'NHWC', dilations: number | [number, number]): Conv2DInfo;
/**
* Computes a 128 entry vector (face descriptor / face embeddings) from the face shown in an image,
* which uniquely represents the features of that persons face. The computed face descriptor can
* be used to measure the similarity between faces, by computing the euclidean distance of two
* face descriptors.
*
* @param inputs The face image extracted from the aligned bounding box of a face. Can
* also be an array of input images, which will be batch processed.
* @returns Face descriptor with 128 entries or array thereof in case of batch input.
*/
export declare const computeFaceDescriptor: (input: TNetInput) => Promise;
export declare class ComputeFaceDescriptorsTaskBase extends ComposableTask {
protected parentTask: ComposableTask | Promise;
protected input: TNetInput;
constructor(parentTask: ComposableTask | Promise, input: TNetInput);
}
declare function computePool2DInfo(inShape: [number, number, number, number], filterSize: [number, number] | number, strides: number | [number, number], dilations: number | [number, number], pad: 'same' | 'valid' | number | ExplicitPadding, roundingMode?: 'floor' | 'round' | 'ceil', dataFormat?: 'channelsFirst' | 'channelsLast'): Conv2DInfo;
/**
* Computes the information for a forward pass of a pooling3D operation.
*/
declare function computePool3DInfo(inShape: [number, number, number, number, number], filterSize: number | [number, number, number], strides: number | [number, number, number], dilations: number | [number, number, number], pad: 'same' | 'valid' | number, roundingMode?: 'floor' | 'round' | 'ceil', dataFormat?: 'NDHWC' | 'NCDHW'): Conv3DInfo;
declare function computeReshapedDimensions({ width, height }: IDimensions, inputSize: number): Dimensions;
export declare class ComputeSingleFaceDescriptorTask>> extends ComputeFaceDescriptorsTaskBase | undefined, TSource | undefined> {
run(): Promise | undefined>;
withFaceExpressions(): PredictSingleFaceExpressionsWithFaceAlignmentTask>;
withAgeAndGender(): PredictSingleAgeAndGenderWithFaceAlignmentTask>;
}
declare const concat: typeof concat_;
/**
* Concatenates a list of `tf.Tensor`s along a given axis.
*
* The tensors ranks and types must match, and their sizes must match in all
* dimensions except `axis`.
*
* Also available are stricter rank-specific methods that assert that
* `tensors` are of the given rank:
* - `tf.concat1d`
* - `tf.concat2d`
* - `tf.concat3d`
* - `tf.concat4d`
*
* Except `tf.concat1d` (which does not have axis param), all methods have
* same signature as this method.
*
* ```js
* const a = tf.tensor1d([1, 2]);
* const b = tf.tensor1d([3, 4]);
* a.concat(b).print(); // or a.concat(b)
* ```
*
* ```js
* const a = tf.tensor1d([1, 2]);
* const b = tf.tensor1d([3, 4]);
* const c = tf.tensor1d([5, 6]);
* tf.concat([a, b, c]).print();
* ```
*
* ```js
* const a = tf.tensor2d([[1, 2], [10, 20]]);
* const b = tf.tensor2d([[3, 4], [30, 40]]);
* const axis = 1;
* tf.concat([a, b], axis).print();
* ```
* @param tensors A list of tensors to concatenate.
* @param axis The axis to concate along. Defaults to 0 (the first dim).
*
* @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}
*/
declare function concat_(tensors: Array, axis?: number): T;
declare const conv2d: typeof conv2d_;
/**
* Computes a 2D convolution over the input x.
*
* @param x The input tensor, of rank 4 or rank 3, of shape
* `[batch, height, width, inChannels]`. If rank 3, batch of 1 is
* assumed.
* @param filter The filter, rank 4, of shape
* `[filterHeight, filterWidth, inDepth, outDepth]`.
* @param strides The strides of the convolution: `[strideHeight,
* strideWidth]`.
* @param pad The type of padding algorithm.
* - `same` and stride 1: output will be of same size as input,
* regardless of filter size.
* - `valid`: output will be smaller than input if filter is larger
* than 1x1.
* - For more info, see this guide:
* [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
* https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
* @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to
* "NHWC". Specify the data format of the input and output data. With the
* default format "NHWC", the data is stored in the order of: [batch,
* height, width, channels].
* @param dilations The dilation rates: `[dilationHeight, dilationWidth]`
* in which we sample input values across the height and width dimensions
* in atrous convolution. Defaults to `[1, 1]`. If `dilations` is a single
* number, then `dilationHeight == dilationWidth`. If it is greater than
* 1, then all values of `strides` must be 1.
* @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
* provided, it will default to truncate.
*
* @doc {heading: 'Operations', subheading: 'Convolution'}
*/
declare function conv2d_(x: T | TensorLike, filter: Tensor4D | TensorLike, strides: [number, number] | number, pad: 'valid' | 'same' | number | conv_util.ExplicitPadding, dataFormat?: 'NHWC' | 'NCHW', dilations?: [number, number] | number, dimRoundingMode?: 'floor' | 'round' | 'ceil'): T;
/**
* Information about the forward pass of a convolution/pooling operation.
* It includes input and output shape, strides, filter size and padding
* information.
*/
declare type Conv2DInfo = {
batchSize: number;
inHeight: number;
inWidth: number;
inChannels: number;
outHeight: number;
outWidth: number;
outChannels: number;
dataFormat: 'channelsFirst' | 'channelsLast';
strideHeight: number;
strideWidth: number;
dilationHeight: number;
dilationWidth: number;
filterHeight: number;
filterWidth: number;
effectiveFilterHeight: number;
effectiveFilterWidth: number;
padInfo: PadInfo;
inShape: [number, number, number, number];
outShape: [number, number, number, number];
filterShape: [number, number, number, number];
};
/**
* Information about the forward pass of a 3D convolution/pooling operation.
* It includes input and output shape, strides, filter size and padding
* information.
*/
declare type Conv3DInfo = {
batchSize: number;
inDepth: number;
inHeight: number;
inWidth: number;
inChannels: number;
outDepth: number;
outHeight: number;
outWidth: number;
outChannels: number;
dataFormat: 'channelsFirst' | 'channelsLast';
strideDepth: number;
strideHeight: number;
strideWidth: number;
dilationDepth: number;
dilationHeight: number;
dilationWidth: number;
filterDepth: number;
filterHeight: number;
filterWidth: number;
effectiveFilterDepth: number;
effectiveFilterHeight: number;
effectiveFilterWidth: number;
padInfo: PadInfo3D;
inShape: [number, number, number, number, number];
outShape: [number, number, number, number, number];
filterShape: [number, number, number, number, number];
};
declare namespace conv_util {
export {
computeDilation2DInfo,
computePool2DInfo,
computePool3DInfo,
computeConv2DInfo,
computeConv3DInfo,
computeDefaultPad,
tupleValuesAreOne,
eitherStridesOrDilationsAreOne,
convertConv2DDataFormat,
ExplicitPadding,
PadInfo,
PadInfo3D,
Conv2DInfo,
Conv3DInfo
}
}
/**
* Convert Conv2D dataFormat from 'NHWC'|'NCHW' to
* 'channelsLast'|'channelsFirst'
* @param dataFormat in 'NHWC'|'NCHW' mode
* @return dataFormat in 'channelsLast'|'channelsFirst' mode
* @throws unknown dataFormat
*/
declare function convertConv2DDataFormat(dataFormat: 'NHWC' | 'NCHW'): 'channelsLast' | 'channelsFirst';
declare type ConvLayerParams = {
conv: ConvParams;
scale: ScaleLayerParams;
};
declare type ConvParams = {
filters: tf.Tensor4D;
bias: tf.Tensor1D;
};
export declare type ConvWithBatchNorm = {
conv: ConvParams;
bn: BatchNorm;
};
declare function createBrowserEnv(): Environment;
export declare function createCanvas({ width, height }: IDimensions): HTMLCanvasElement;
export declare function createCanvasFromMedia(media: HTMLImageElement | HTMLVideoElement | ImageData, dims?: IDimensions): HTMLCanvasElement;
export declare function createFaceDetectionNet(weights: Float32Array): SsdMobilenetv1;
export declare function createFaceRecognitionNet(weights: Float32Array): FaceRecognitionNet;
declare function createFileSystem(fs?: any): FileSystem_2;
declare function createNodejsEnv(): Environment;
export declare function createSsdMobilenetv1(weights: Float32Array): SsdMobilenetv1;
export declare function createTinyFaceDetector(weights: Float32Array): TinyFaceDetector;
export declare function createTinyYolov2(weights: Float32Array, withSeparableConvs?: boolean): TinyYolov2;
/**
* We wrap data id since we use weak map to avoid memory leaks.
* Since we have our own memory management, we have a reference counter
* mapping a tensor to its data, so there is always a pointer (even if that
* data is otherwise garbage collectable).
* See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/
* Global_Objects/WeakMap
*/
declare type DataId = object;
/** @docalias 'float32'|'int32'|'bool'|'complex64'|'string' */
declare type DataType = keyof DataTypeMap;
declare interface DataTypeMap {
float32: Float32Array;
int32: Int32Array;
bool: Uint8Array;
complex64: Float32Array;
string: string[];
}
export declare type DefaultTinyYolov2NetParams = {
conv0: ConvWithBatchNorm;
conv1: ConvWithBatchNorm;
conv2: ConvWithBatchNorm;
conv3: ConvWithBatchNorm;
conv4: ConvWithBatchNorm;
conv5: ConvWithBatchNorm;
conv6: ConvWithBatchNorm;
conv7: ConvWithBatchNorm;
conv8: ConvParams;
};
declare type DenseBlock3Params = {
conv0: SeparableConvParams | ConvParams;
conv1: SeparableConvParams;
conv2: SeparableConvParams;
};
declare type DenseBlock4Params = DenseBlock3Params & {
conv3: SeparableConvParams;
};
declare const depthwiseConv2d: typeof depthwiseConv2d_;
/**
* Depthwise 2D convolution.
*
* Given a 4D `input` array and a `filter` array of shape
* `[filterHeight, filterWidth, inChannels, channelMultiplier]` containing
* `inChannels` convolutional filters of depth 1, this op applies a
* different filter to each input channel (expanding from 1 channel to
* `channelMultiplier` channels for each), then concatenates the results
* together. The output has `inChannels * channelMultiplier` channels.
*
* See
* [https://www.tensorflow.org/api_docs/python/tf/nn/depthwise_conv2d](
* https://www.tensorflow.org/api_docs/python/tf/nn/depthwise_conv2d)
* for more details.
*
* @param x The input tensor, of rank 4 or rank 3, of shape
* `[batch, height, width, inChannels]`. If rank 3, batch of 1 is
* assumed.
* @param filter The filter tensor, rank 4, of shape
* `[filterHeight, filterWidth, inChannels, channelMultiplier]`.
* @param strides The strides of the convolution: `[strideHeight,
* strideWidth]`. If strides is a single number, then `strideHeight ==
* strideWidth`.
* @param pad The type of padding algorithm.
* - `same` and stride 1: output will be of same size as input,
* regardless of filter size.
* - `valid`: output will be smaller than input if filter is larger
* than 1x1.
* - For more info, see this guide:
* [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
* https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
* @param dilations The dilation rates: `[dilationHeight, dilationWidth]`
* in which we sample input values across the height and width dimensions
* in atrous convolution. Defaults to `[1, 1]`. If `rate` is a single
* number, then `dilationHeight == dilationWidth`. If it is greater than
* 1, then all values of `strides` must be 1.
* @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to
* "NHWC". Specify the data format of the input and output data. With the
* default format "NHWC", the data is stored in the order of: [batch,
* height, width, channels]. Only "NHWC" is currently supported.
* @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
* provided, it will default to truncate.
*
* @doc {heading: 'Operations', subheading: 'Convolution'}
*/
declare function depthwiseConv2d_(x: T | TensorLike, filter: Tensor4D | TensorLike, strides: [number, number] | number, pad: 'valid' | 'same' | number | ExplicitPadding, dataFormat?: 'NHWC' | 'NCHW', dilations?: [number, number] | number, dimRoundingMode?: 'floor' | 'round' | 'ceil'): T;
export declare class DetectAllFaceLandmarksTask> extends DetectFaceLandmarksTaskBase[], TSource[]> {
run(): Promise[]>;
withFaceExpressions(): PredictAllFaceExpressionsWithFaceAlignmentTask>;
withAgeAndGender(): PredictAllAgeAndGenderWithFaceAlignmentTask>;
withFaceDescriptors(): ComputeAllFaceDescriptorsTask>;
}
export declare function detectAllFaces(input: TNetInput, options?: FaceDetectionOptions): DetectAllFacesTask;
export declare class DetectAllFacesTask extends DetectFacesTaskBase {
run(): Promise;
private runAndExtendWithFaceDetections;
withFaceLandmarks(useTinyLandmarkNet?: boolean): DetectAllFaceLandmarksTask<{
detection: FaceDetection;
}>;
withFaceExpressions(): PredictAllFaceExpressionsTask<{
detection: FaceDetection;
}>;
withAgeAndGender(): PredictAllAgeAndGenderTask<{
detection: FaceDetection;
}>;
}
/**
* Detects the 68 point face landmark positions of the face shown in an image.
*
* @param inputs The face image extracted from the bounding box of a face. Can
* also be an array of input images, which will be batch processed.
* @returns 68 point face landmarks or array thereof in case of batch input.
*/
export declare const detectFaceLandmarks: (input: TNetInput) => Promise;
export declare class DetectFaceLandmarksTaskBase extends ComposableTask {
protected parentTask: ComposableTask | Promise;
protected input: TNetInput;
protected useTinyLandmarkNet: boolean;
constructor(parentTask: ComposableTask | Promise, input: TNetInput, useTinyLandmarkNet: boolean);
protected get landmarkNet(): FaceLandmark68Net | FaceLandmark68TinyNet;
}
/**
* Detects the 68 point face landmark positions of the face shown in an image
* using a tinier version of the 68 point face landmark model, which is slightly
* faster at inference, but also slightly less accurate.
*
* @param inputs The face image extracted from the bounding box of a face. Can
* also be an array of input images, which will be batch processed.
* @returns 68 point face landmarks or array thereof in case of batch input.
*/
export declare const detectFaceLandmarksTiny: (input: TNetInput) => Promise;
export declare class DetectFacesTaskBase extends ComposableTask {
protected input: TNetInput;
protected options: FaceDetectionOptions;
constructor(input: TNetInput, options?: FaceDetectionOptions);
}
export declare const detectLandmarks: (input: TNetInput) => Promise;
export declare function detectSingleFace(input: TNetInput, options?: FaceDetectionOptions): DetectSingleFaceTask;
export declare class DetectSingleFaceLandmarksTask> extends DetectFaceLandmarksTaskBase | undefined, TSource | undefined> {
run(): Promise | undefined>;
withFaceExpressions(): PredictSingleFaceExpressionsWithFaceAlignmentTask>;
withAgeAndGender(): PredictSingleAgeAndGenderWithFaceAlignmentTask>;
withFaceDescriptor(): ComputeSingleFaceDescriptorTask>;
}
export declare class DetectSingleFaceTask extends DetectFacesTaskBase {
run(): Promise;
private runAndExtendWithFaceDetection;
withFaceLandmarks(useTinyLandmarkNet?: boolean): DetectSingleFaceLandmarksTask<{
detection: FaceDetection;
}>;
withFaceExpressions(): PredictSingleFaceExpressionsTask<{
detection: FaceDetection;
}>;
withAgeAndGender(): PredictSingleAgeAndGenderTask<{
detection: FaceDetection;
}>;
}
export declare class Dimensions implements IDimensions {
private _width;
private _height;
constructor(width: number, height: number);
get width(): number;
get height(): number;
reverse(): Dimensions;
}
declare const div: typeof div_;
/**
* Divides two `tf.Tensor`s element-wise, A / B. Supports broadcasting.
*
* ```js
* const a = tf.tensor1d([1, 4, 9, 16]);
* const b = tf.tensor1d([1, 2, 3, 4]);
*
* a.div(b).print(); // or tf.div(a, b)
* ```
*
* ```js
* // Broadcast div a with b.
* const a = tf.tensor1d([2, 4, 6, 8]);
* const b = tf.scalar(2);
*
* a.div(b).print(); // or tf.div(a, b)
* ```
*
* @param a The first tensor as the numerator.
* @param b The second tensor as the denominator. Must have the same dtype as
* `a`.
*
* @doc {heading: 'Operations', subheading: 'Arithmetic'}
*/
declare function div_(a: Tensor | TensorLike, b: Tensor | TensorLike): T;
declare namespace draw {
export {
drawContour,
drawDetections,
TDrawDetectionsInput,
drawFaceExpressions,
DrawFaceExpressionsInput,
IDrawBoxOptions,
DrawBoxOptions,
DrawBox,
drawFaceLandmarks,
IDrawFaceLandmarksOptions,
DrawFaceLandmarksOptions,
DrawFaceLandmarks,
DrawFaceLandmarksInput,
AnchorPosition,
IDrawTextFieldOptions,
DrawTextFieldOptions,
DrawTextField
}
}
export { draw }
declare class DrawBox {
box: Box;
options: DrawBoxOptions;
constructor(box: IBoundingBox | IRect, options?: IDrawBoxOptions);
draw(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D): void;
}
declare class DrawBoxOptions {
boxColor: string;
lineWidth: number;
drawLabelOptions: DrawTextFieldOptions;
label?: string;
constructor(options?: IDrawBoxOptions);
}
declare function drawContour(ctx: CanvasRenderingContext2D, points: Point[], isClosed?: boolean): void;
declare function drawDetections(canvasArg: string | HTMLCanvasElement, detections: TDrawDetectionsInput | Array): void;
declare function drawFaceExpressions(canvasArg: string | HTMLCanvasElement, faceExpressions: DrawFaceExpressionsInput | Array, minConfidence?: number, textFieldAnchor?: IPoint): void;
declare type DrawFaceExpressionsInput = FaceExpressions | WithFaceExpressions<{}>;
declare class DrawFaceLandmarks {
faceLandmarks: FaceLandmarks;
options: DrawFaceLandmarksOptions;
constructor(faceLandmarks: FaceLandmarks, options?: IDrawFaceLandmarksOptions);
draw(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D): void;
}
declare function drawFaceLandmarks(canvasArg: string | HTMLCanvasElement, faceLandmarks: DrawFaceLandmarksInput | Array): void;
declare type DrawFaceLandmarksInput = FaceLandmarks | WithFaceLandmarks>;
declare class DrawFaceLandmarksOptions {
drawLines: boolean;
drawPoints: boolean;
lineWidth: number;
pointSize: number;
lineColor: string;
pointColor: string;
constructor(options?: IDrawFaceLandmarksOptions);
}
declare class DrawTextField {
text: string[];
anchor: IPoint;
options: DrawTextFieldOptions;
constructor(text: string | string[] | DrawTextField, anchor: IPoint, options?: IDrawTextFieldOptions);
measureWidth(ctx: CanvasRenderingContext2D): number;
measureHeight(): number;
getUpperLeft(ctx: CanvasRenderingContext2D, canvasDims?: IDimensions): IPoint;
draw(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D): void;
}
declare class DrawTextFieldOptions implements IDrawTextFieldOptions {
anchorPosition: AnchorPosition;
backgroundColor: string;
fontColor: string;
fontSize: number;
fontStyle: string;
padding: number;
constructor(options?: IDrawTextFieldOptions);
}
declare function eitherStridesOrDilationsAreOne(strides: number | number[], dilations: number | number[]): boolean;
declare let ENV: Environment_2;
export declare const env: {
getEnv: typeof getEnv;
setEnv: typeof setEnv;
initialize: typeof initialize;
createBrowserEnv: typeof createBrowserEnv;
createFileSystem: typeof createFileSystem;
createNodejsEnv: typeof createNodejsEnv;
monkeyPatch: typeof monkeyPatch;
isBrowser: typeof isBrowser;
isNodejs: typeof isNodejs;
};
export declare type Environment = FileSystem_2 & {
Canvas: typeof HTMLCanvasElement;
CanvasRenderingContext2D: typeof CanvasRenderingContext2D;
Image: typeof HTMLImageElement;
ImageData: typeof ImageData;
Video: typeof HTMLVideoElement;
createCanvasElement: () => HTMLCanvasElement;
createImageElement: () => HTMLImageElement;
createVideoElement: () => HTMLVideoElement;
fetch: (url: string, init?: RequestInit) => Promise;
};
/**
* The environment contains evaluated flags as well as the registered platform.
* This is always used as a global singleton and can be retrieved with
* `tf.env()`.
*
* @doc {heading: 'Environment'}
*/
declare class Environment_2 {
global: any;
private flags;
private flagRegistry;
private urlFlags;
platformName: string;
platform: Platform;
getQueryParams: typeof getQueryParams;
constructor(global: any);
setPlatform(platformName: string, platform: Platform): void;
registerFlag(flagName: string, evaluationFn: FlagEvaluationFn, setHook?: (value: FlagValue) => void): void;
getAsync(flagName: string): Promise;
get(flagName: string): FlagValue;
getNumber(flagName: string): number;
getBool(flagName: string): boolean;
getFlags(): Flags;
readonly features: Flags;
set(flagName: string, value: FlagValue): void;
private evaluateFlag;
setFlags(flags: Flags): void;
reset(): void;
private populateURLFlags;
}
export declare function euclideanDistance(arr1: number[] | Float32Array, arr2: number[] | Float32Array): number;
declare const exp: typeof exp_;
/**
* Computes exponential of the input `tf.Tensor` element-wise. `e ^ x`
*
* ```js
* const x = tf.tensor1d([1, 2, -3]);
*
* x.exp().print(); // or tf.exp(x)
* ```
* @param x The input tensor.
*
* @doc {heading: 'Operations', subheading: 'Basic math'}
*/
declare function exp_(x: T | TensorLike): T;
declare const expandDims: typeof expandDims_;
/**
* Returns a `tf.Tensor` that has expanded rank, by inserting a dimension
* into the tensor's shape.
*
* ```js
* const x = tf.tensor1d([1, 2, 3, 4]);
* const axis = 1;
* x.expandDims(axis).print();
* ```
*
* @param x The input tensor whose dimensions to be expanded.
* @param axis The dimension index at which to insert shape of `1`. Defaults
* to 0 (the first dimension).
*
* @doc {heading: 'Tensors', subheading: 'Transformations'}
*/
declare function expandDims_(x: Tensor | TensorLike, axis?: number): T;
declare type ExplicitPadding = [[number, number], [number, number], [number, number], [number, number]];
export declare function extendWithAge(sourceObj: TSource, age: number): WithAge;
export declare function extendWithFaceDescriptor(sourceObj: TSource, descriptor: Float32Array): WithFaceDescriptor;
export declare function extendWithFaceDetection(sourceObj: TSource, detection: FaceDetection): WithFaceDetection;
export declare function extendWithFaceExpressions(sourceObj: TSource, expressions: FaceExpressions): WithFaceExpressions;
export declare function extendWithFaceLandmarks, TFaceLandmarks extends FaceLandmarks = FaceLandmarks68>(sourceObj: TSource, unshiftedLandmarks: TFaceLandmarks): WithFaceLandmarks;
export declare function extendWithGender(sourceObj: TSource, gender: Gender, genderProbability: number): WithGender;
/**
* Extracts the image regions containing the detected faces.
*
* @param input The image that face detection has been performed on.
* @param detections The face detection results or face bounding boxes for that image.
* @returns The Canvases of the corresponding image region for each detected face.
*/
export declare function extractFaces(input: TNetInput, detections: Array): Promise;
/**
* Extracts the tensors of the image regions containing the detected faces.
* Useful if you want to compute the face descriptors for the face images.
* Using this method is faster then extracting a canvas for each face and
* converting them to tensors individually.
*
* @param imageTensor The image tensor that face detection has been performed on.
* @param detections The face detection results or face bounding boxes for that image.
* @returns Tensors of the corresponding image region for each detected face.
*/
export declare function extractFaceTensors(imageTensor: tf.Tensor3D | tf.Tensor4D, detections: Array): Promise;
export declare const FACE_EXPRESSION_LABELS: string[];
export declare class FaceDetection extends ObjectDetection implements IFaceDetecion {
constructor(score: number, relativeBox: Rect, imageDims: IDimensions);
forSize(width: number, height: number): FaceDetection;
}
export declare type FaceDetectionFunction = (input: TNetInput) => Promise;
export declare class FaceDetectionNet extends SsdMobilenetv1 {
}
export declare type FaceDetectionOptions = TinyFaceDetectorOptions | SsdMobilenetv1Options | TinyYolov2Options;
export declare class FaceExpressionNet extends FaceProcessor {
constructor(faceFeatureExtractor?: FaceFeatureExtractor);
forwardInput(input: NetInput | tf.Tensor4D): tf.Tensor2D;
forward(input: TNetInput): Promise;
predictExpressions(input: TNetInput): Promise;
protected getDefaultModelName(): string;
protected getClassifierChannelsIn(): number;
protected getClassifierChannelsOut(): number;
}
export declare class FaceExpressions {
neutral: number;
happy: number;
sad: number;
angry: number;
fearful: number;
disgusted: number;
surprised: number;
constructor(probabilities: number[] | Float32Array);
asSortedArray(): {
expression: string;
probability: number;
}[];
}
declare class FaceFeatureExtractor extends NeuralNetwork implements IFaceFeatureExtractor {
constructor();
forwardInput(input: NetInput): tf.Tensor4D;
forward(input: TNetInput): Promise;
protected getDefaultModelName(): string;
protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap): {
params: FaceFeatureExtractorParams;
paramMappings: ParamMapping[];
};
protected extractParams(weights: Float32Array): {
params: FaceFeatureExtractorParams;
paramMappings: ParamMapping[];
};
}
declare type FaceFeatureExtractorParams = {
dense0: DenseBlock4Params;
dense1: DenseBlock4Params;
dense2: DenseBlock4Params;
dense3: DenseBlock4Params;
};
export declare class FaceLandmark68Net extends FaceLandmark68NetBase {
constructor(faceFeatureExtractor?: FaceFeatureExtractor);
protected getDefaultModelName(): string;
protected getClassifierChannelsIn(): number;
}
declare abstract class FaceLandmark68NetBase extends FaceProcessor {
postProcess(output: tf.Tensor2D, inputSize: number, originalDimensions: IDimensions[]): tf.Tensor2D;
forwardInput(input: NetInput): tf.Tensor2D;
forward(input: TNetInput): Promise;
detectLandmarks(input: TNetInput): Promise;
protected getClassifierChannelsOut(): number;
}
export declare class FaceLandmark68TinyNet extends FaceLandmark68NetBase {
constructor(faceFeatureExtractor?: TinyFaceFeatureExtractor);
protected getDefaultModelName(): string;
protected getClassifierChannelsIn(): number;
}
export declare class FaceLandmarkNet extends FaceLandmark68Net {
}
export declare class FaceLandmarks implements IFaceLandmarks {
protected _shift: Point;
protected _positions: Point[];
protected _imgDims: Dimensions;
constructor(relativeFaceLandmarkPositions: Point[], imgDims: IDimensions, shift?: Point);
get shift(): Point;
get imageWidth(): number;
get imageHeight(): number;
get positions(): Point[];
get relativePositions(): Point[];
forSize(width: number, height: number): T;
shiftBy(x: number, y: number): T;
shiftByPoint(pt: Point): T;
/**
* Aligns the face landmarks after face detection from the relative positions of the faces
* bounding box, or it's current shift. This function should be used to align the face images
* after face detection has been performed, before they are passed to the face recognition net.
* This will make the computed face descriptor more accurate.
*
* @param detection (optional) The bounding box of the face or the face detection result. If
* no argument was passed the position of the face landmarks are assumed to be relative to
* it's current shift.
* @returns The bounding box of the aligned face.
*/
align(detection?: FaceDetection | IRect | IBoundingBox | null, options?: {
useDlibAlignment?: boolean;
minBoxPadding?: number;
}): Box;
private alignDlib;
private alignMinBbox;
protected getRefPointsForAlignment(): Point[];
}
export declare class FaceLandmarks5 extends FaceLandmarks {
protected getRefPointsForAlignment(): Point[];
}
export declare class FaceLandmarks68 extends FaceLandmarks {
getJawOutline(): Point[];
getLeftEyeBrow(): Point[];
getRightEyeBrow(): Point[];
getNose(): Point[];
getLeftEye(): Point[];
getRightEye(): Point[];
getMouth(): Point[];
protected getRefPointsForAlignment(): Point[];
}
export declare class FaceMatch implements IFaceMatch {
private _label;
private _distance;
constructor(label: string, distance: number);
get label(): string;
get distance(): number;
toString(withDistance?: boolean): string;
}
export declare class FaceMatcher {
private _labeledDescriptors;
private _distanceThreshold;
constructor(inputs: LabeledFaceDescriptors | WithFaceDescriptor | Float32Array | Array | Float32Array>, distanceThreshold?: number);
get labeledDescriptors(): LabeledFaceDescriptors[];
get distanceThreshold(): number;
computeMeanDistance(queryDescriptor: Float32Array, descriptors: Float32Array[]): number;
matchDescriptor(queryDescriptor: Float32Array): FaceMatch;
findBestMatch(queryDescriptor: Float32Array): FaceMatch;
toJSON(): any;
static fromJSON(json: any): FaceMatcher;
}
declare abstract class FaceProcessor extends NeuralNetwork {
protected _faceFeatureExtractor: IFaceFeatureExtractor;
constructor(_name: string, faceFeatureExtractor: IFaceFeatureExtractor);
get faceFeatureExtractor(): IFaceFeatureExtractor;
protected abstract getDefaultModelName(): string;
protected abstract getClassifierChannelsIn(): number;
protected abstract getClassifierChannelsOut(): number;
runNet(input: NetInput | tf.Tensor4D): tf.Tensor2D;
dispose(throwOnRedispose?: boolean): void;
loadClassifierParams(weights: Float32Array): void;
extractClassifierParams(weights: Float32Array): {
params: NetParams_2;
paramMappings: ParamMapping[];
};
protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap): {
params: NetParams_2;
paramMappings: ParamMapping[];
};
protected extractParams(weights: Float32Array): {
params: NetParams_2;
paramMappings: ParamMapping[];
};
}
export declare class FaceRecognitionNet extends NeuralNetwork {
constructor();
forwardInput(input: NetInput): tf.Tensor2D;
forward(input: TNetInput): Promise;
computeFaceDescriptor(input: TNetInput): Promise;
protected getDefaultModelName(): string;
protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap): {
params: NetParams_3;
paramMappings: ParamMapping[];
};
protected extractParams(weights: Float32Array): {
params: NetParams_3;
paramMappings: ParamMapping[];
};
}
declare type FCParams = {
weights: tf.Tensor2D;
bias: tf.Tensor1D;
};
export declare function fetchImage(uri: string): Promise;
export declare function fetchJson(uri: string): Promise;
export declare function fetchNetWeights(uri: string): Promise;
export declare function fetchOrThrow(url: string, init?: RequestInit): Promise;
export declare function fetchVideo(uri: string): Promise;
declare type FileSystem_2 = {
readFile: (filePath: string) => Promise;
};
export { FileSystem_2 as FileSystem }
/**
* Creates a `tf.Tensor` filled with a scalar value.
*
* ```js
* tf.fill([2, 2], 4).print();
* ```
*
* @param shape An array of integers defining the output tensor shape.
* @param value The scalar value to fill the tensor with.
* @param dtype The type of an element in the resulting tensor. Defaults to
* 'float'.
*
* @doc {heading: 'Tensors', subheading: 'Creation'}
*/
declare function fill(shape: ShapeMap[R], value: number | string, dtype?: DataType): Tensor;
declare type FlagEvaluationFn = (() => FlagValue) | (() => Promise);
declare type Flags = {
[featureName: string]: FlagValue;
};
declare type FlagValue = number | boolean;
export declare enum Gender {
FEMALE = "female",
MALE = "male"
}
declare function getCenterPoint(pts: Point[]): Point;
export declare function getContext2dOrThrow(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D): CanvasRenderingContext2D;
declare function getEnv(): Environment;
export declare function getMediaDimensions(input: HTMLImageElement | HTMLCanvasElement | HTMLVideoElement | IDimensions): Dimensions;
declare function getQueryParams(queryString: string): {
[key: string]: string;
};
export declare interface IBoundingBox {
left: number;
top: number;
right: number;
bottom: number;
}
export declare interface IDimensions {
width: number;
height: number;
}
declare interface IDrawBoxOptions {
boxColor?: string;
lineWidth?: number;
drawLabelOptions?: IDrawTextFieldOptions;
label?: string;
}
declare interface IDrawFaceLandmarksOptions {
drawLines?: boolean;
drawPoints?: boolean;
lineWidth?: number;
pointSize?: number;
lineColor?: string;
pointColor?: string;
}
declare interface IDrawTextFieldOptions {
anchorPosition?: AnchorPosition;
backgroundColor?: string;
fontColor?: string;
fontSize?: number;
fontStyle?: string;
padding?: number;
}
export declare interface IFaceDetecion {
score: number;
box: Box;
}
declare interface IFaceFeatureExtractor extends NeuralNetwork {
forwardInput(input: NetInput): tf.Tensor4D;
forward(input: TNetInput): Promise;
}
export declare interface IFaceLandmarks {
positions: Point[];
shift: Point;
}
export declare interface IFaceMatch {
label: string;
distance: number;
}
export declare function imageTensorToCanvas(imgTensor: tf.Tensor, canvas?: HTMLCanvasElement): Promise;
export declare function imageToSquare(input: HTMLImageElement | HTMLCanvasElement, inputSize: number, centerImage?: boolean): HTMLCanvasElement;
declare function initialize(): void | null;
export declare function inverseSigmoid(x: number): number;
export declare function iou(box1: Box, box2: Box, isIOU?: boolean): number;
export declare interface IPoint {
x: number;
y: number;
}
export declare interface IRect {
x: number;
y: number;
width: number;
height: number;
}
declare function isBrowser(): boolean;
declare function isDimensions(obj: any): boolean;
declare function isEven(num: number): boolean;
declare function isFloat(num: number): boolean;
export declare function isMediaElement(input: any): boolean;
export declare function isMediaLoaded(media: HTMLImageElement | HTMLVideoElement): boolean;
declare function isNodejs(): boolean;
export declare interface ISsdMobilenetv1Options {
minConfidence?: number;
maxResults?: number;
}
declare function isTensor(tensor: any, dim: number): boolean;
declare function isTensor1D(tensor: any): tensor is tf.Tensor1D;
declare function isTensor2D(tensor: any): tensor is tf.Tensor2D;
declare function isTensor3D(tensor: any): tensor is tf.Tensor3D;
declare function isTensor4D(tensor: any): tensor is tf.Tensor4D;
declare function isValidNumber(num: any): boolean;
declare function isValidProbablitiy(num: any): boolean;
export declare function isWithAge(obj: any): obj is WithAge<{}>;
export declare function isWithFaceDetection(obj: any): obj is WithFaceDetection<{}>;
export declare function isWithFaceExpressions(obj: any): obj is WithFaceExpressions<{}>;
export declare function isWithFaceLandmarks(obj: any): obj is WithFaceLandmarks, FaceLandmarks>;
export declare function isWithGender(obj: any): obj is WithGender<{}>;
export declare type ITinyFaceDetectorOptions = ITinyYolov2Options;
export declare interface ITinyYolov2Options {
inputSize?: number;
scoreThreshold?: number;
}
export declare class LabeledBox extends Box {
static assertIsValidLabeledBox(box: any, callee: string): void;
private _label;
constructor(box: IBoundingBox | IRect | any, label: number);
get label(): number;
}
export declare class LabeledFaceDescriptors {
private _label;
private _descriptors;
constructor(label: string, descriptors: Float32Array[]);
get label(): string;
get descriptors(): Float32Array[];
toJSON(): any;
static fromJSON(json: any): LabeledFaceDescriptors;
}
export declare const loadAgeGenderModel: (url: string) => Promise;
export declare const loadFaceDetectionModel: (url: string) => Promise;
export declare const loadFaceExpressionModel: (url: string) => Promise;
export declare const loadFaceLandmarkModel: (url: string) => Promise;
export declare const loadFaceLandmarkTinyModel: (url: string) => Promise;
export declare const loadFaceRecognitionModel: (url: string) => Promise;
export declare const loadSsdMobilenetv1Model: (url: string) => Promise;
export declare const loadTinyFaceDetectorModel: (url: string) => Promise;
export declare const loadTinyYolov2Model: (url: string) => Promise;
export declare function loadWeightMap(uri: string | undefined, defaultModelName: string): Promise;
export declare const locateFaces: (input: TNetInput, options: SsdMobilenetv1Options) => Promise;
export declare function matchDimensions(input: IDimensions, reference: IDimensions, useMediaDimensions?: boolean): {
width: number;
height: number;
};
declare const matMul: typeof matMul_;
/**
* Computes the dot product of two matrices, A * B. These must be matrices.
*
* ```js
* const a = tf.tensor2d([1, 2], [1, 2]);
* const b = tf.tensor2d([1, 2, 3, 4], [2, 2]);
*
* a.matMul(b).print(); // or tf.matMul(a, b)
* ```
* @param a First matrix in dot product operation.
* @param b Second matrix in dot product operation.
* @param transposeA If true, `a` is transposed before multiplication.
* @param transposeB If true, `b` is transposed before multiplication.
*
* @doc {heading: 'Operations', subheading: 'Matrices'}
*/
declare function matMul_(a: Tensor | TensorLike, b: Tensor | TensorLike, transposeA?: boolean, transposeB?: boolean): T;
declare const maxPool: typeof maxPool_;
/**
* Computes the 2D max pooling of an image.
*
* @param x The input tensor, of rank 4 or rank 3 of shape
* `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed.
* @param filterSize The filter size: `[filterHeight, filterWidth]`. If
* `filterSize` is a single number, then `filterHeight == filterWidth`.
* @param strides The strides of the pooling: `[strideHeight, strideWidth]`. If
* `strides` is a single number, then `strideHeight == strideWidth`.
* @param dilations The dilation rates: `[dilationHeight, dilationWidth]`
* in which we sample input values across the height and width dimensions
* in dilated pooling. Defaults to `[1, 1]`. If `dilations` is a single
* number, then `dilationHeight == dilationWidth`. If it is greater than
* 1, then all values of `strides` must be 1.
* @param pad The type of padding algorithm.
* - `same` and stride 1: output will be of same size as input,
* regardless of filter size.
* - `valid`: output will be smaller than input if filter is larger
* than 1x1.
* - For more info, see this guide:
* [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
* https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
* @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
* provided, it will default to truncate.
*/
declare function maxPool_(x: T | TensorLike, filterSize: [number, number] | number, strides: [number, number] | number, pad: 'valid' | 'same' | number | conv_util.ExplicitPadding, dimRoundingMode?: 'floor' | 'round' | 'ceil'): T;
export declare function minBbox(pts: IPoint[]): BoundingBox;
export declare type MobilenetParams = {
conv0: SeparableConvParams | ConvParams;
conv1: SeparableConvParams;
conv2: SeparableConvParams;
conv3: SeparableConvParams;
conv4: SeparableConvParams;
conv5: SeparableConvParams;
conv6?: SeparableConvParams;
conv7?: SeparableConvParams;
conv8: ConvParams;
};
declare namespace MobileNetV1 {
type DepthwiseConvParams = {
filters: tf.Tensor4D;
batch_norm_scale: tf.Tensor1D;
batch_norm_offset: tf.Tensor1D;
batch_norm_mean: tf.Tensor1D;
batch_norm_variance: tf.Tensor1D;
};
type ConvPairParams = {
depthwise_conv: DepthwiseConvParams;
pointwise_conv: PointwiseConvParams;
};
type Params = {
conv_0: PointwiseConvParams;
conv_1: ConvPairParams;
conv_2: ConvPairParams;
conv_3: ConvPairParams;
conv_4: ConvPairParams;
conv_5: ConvPairParams;
conv_6: ConvPairParams;
conv_7: ConvPairParams;
conv_8: ConvPairParams;
conv_9: ConvPairParams;
conv_10: ConvPairParams;
conv_11: ConvPairParams;
conv_12: ConvPairParams;
conv_13: ConvPairParams;
};
}
declare function monkeyPatch(env: Partial): void;
declare const mul: typeof mul_;
/**
* Multiplies two `tf.Tensor`s element-wise, A * B. Supports broadcasting.
*
* We also expose `tf.mulStrict` which has the same signature as this op and
* asserts that `a` and `b` are the same shape (does not broadcast).
*
* ```js
* const a = tf.tensor1d([1, 2, 3, 4]);
* const b = tf.tensor1d([2, 3, 4, 5]);
*
* a.mul(b).print(); // or tf.mul(a, b)
* ```
*
* ```js
* // Broadcast mul a with b.
* const a = tf.tensor1d([1, 2, 3, 4]);
* const b = tf.scalar(5);
*
* a.mul(b).print(); // or tf.mul(a, b)
* ```
* @param a The first tensor to multiply.
* @param b The second tensor to multiply. Must have the same dtype as `a`.
*
* @doc {heading: 'Operations', subheading: 'Arithmetic'}
*/
declare function mul_(a: Tensor | TensorLike, b: Tensor | TensorLike): T;
/** @docalias {[name: string]: Tensor} */
declare type NamedTensorMap = {
[name: string]: Tensor;
};
export declare class NetInput {
private _imageTensors;
private _canvases;
private _batchSize;
private _treatAsBatchInput;
private _inputDimensions;
private _inputSize;
constructor(inputs: Array, treatAsBatchInput?: boolean);
get imageTensors(): Array;
get canvases(): HTMLCanvasElement[];
get isBatchInput(): boolean;
get batchSize(): number;
get inputDimensions(): number[][];
get inputSize(): number | undefined;
get reshapedInputDimensions(): Dimensions[];
getInput(batchIdx: number): tf.Tensor3D | tf.Tensor4D | HTMLCanvasElement;
getInputDimensions(batchIdx: number): number[];
getInputHeight(batchIdx: number): number;
getInputWidth(batchIdx: number): number;
getReshapedInputDimensions(batchIdx: number): Dimensions;
/**
* Create a batch tensor from all input canvases and tensors
* with size [batchSize, inputSize, inputSize, 3].
*
* @param inputSize Height and width of the tensor.
* @param isCenterImage (optional, default: false) If true, add an equal amount of padding on
* both sides of the minor dimension oof the image.
* @returns The batch tensor.
*/
toBatchTensor(inputSize: number, isCenterInputs?: boolean): tf.Tensor4D;
}
export declare type NetOutput = {
age: tf.Tensor1D;
gender: tf.Tensor2D;
};
export declare type NetParams = {
fc: {
age: FCParams;
gender: FCParams;
};
};
declare type NetParams_2 = {
fc: FCParams;
};
declare type NetParams_3 = {
conv32_down: ConvLayerParams;
conv32_1: ResidualLayerParams;
conv32_2: ResidualLayerParams;
conv32_3: ResidualLayerParams;
conv64_down: ResidualLayerParams;
conv64_1: ResidualLayerParams;
conv64_2: ResidualLayerParams;
conv64_3: ResidualLayerParams;
conv128_down: ResidualLayerParams;
conv128_1: ResidualLayerParams;
conv128_2: ResidualLayerParams;
conv256_down: ResidualLayerParams;
conv256_1: ResidualLayerParams;
conv256_2: ResidualLayerParams;
conv256_down_out: ResidualLayerParams;
fc: tf.Tensor2D;
};
declare type NetParams_4 = {
mobilenetv1: MobileNetV1.Params;
prediction_layer: PredictionLayerParams;
output_layer: OutputLayerParams;
};
export declare const nets: {
ssdMobilenetv1: SsdMobilenetv1;
tinyFaceDetector: TinyFaceDetector;
tinyYolov2: TinyYolov2;
faceLandmark68Net: FaceLandmark68Net;
faceLandmark68TinyNet: FaceLandmark68TinyNet;
faceRecognitionNet: FaceRecognitionNet;
faceExpressionNet: FaceExpressionNet;
ageGenderNet: AgeGenderNet;
};
export declare abstract class NeuralNetwork {
constructor(name: string);
protected _params: TNetParams | undefined;
protected _paramMappings: ParamMapping[];
_name: any;
get params(): TNetParams | undefined;
get paramMappings(): ParamMapping[];
get isLoaded(): boolean;
getParamFromPath(paramPath: string): tf.Tensor;
reassignParamFromPath(paramPath: string, tensor: tf.Tensor): void;
getParamList(): {
path: string;
tensor: tf.Tensor;
}[];
getTrainableParams(): {
path: string;
tensor: tf.Tensor;
}[];
getFrozenParams(): {
path: string;
tensor: tf.Tensor;
}[];
variable(): void;
freeze(): void;
dispose(throwOnRedispose?: boolean): void;
serializeParams(): Float32Array;
load(weightsOrUrl: Float32Array | string | undefined): Promise;
loadFromUri(uri: string | undefined): Promise;
loadFromDisk(filePath: string | undefined): Promise;
loadFromWeightMap(weightMap: tf.NamedTensorMap): void;
extractWeights(weights: Float32Array): void;
private traversePropertyPath;
protected abstract getDefaultModelName(): string;
protected abstract extractParamsFromWeightMap(weightMap: tf.NamedTensorMap): {
params: TNetParams;
paramMappings: ParamMapping[];
};
protected abstract extractParams(weights: Float32Array): {
params: TNetParams;
paramMappings: ParamMapping[];
};
}
export declare function nonMaxSuppression(boxes: Box[], scores: number[], iouThreshold: number, isIOU?: boolean): number[];
export declare function normalize(x: tf.Tensor4D, meanRgb: number[]): tf.Tensor4D;
declare type NumericDataType = 'float32' | 'int32' | 'bool' | 'complex64';
export declare class ObjectDetection {
private _score;
private _classScore;
private _className;
private _box;
private _imageDims;
constructor(score: number, classScore: number, className: string, relativeBox: IRect, imageDims: IDimensions);
get score(): number;
get classScore(): number;
get className(): string;
get box(): Box;
get imageDims(): Dimensions;
get imageWidth(): number;
get imageHeight(): number;
get relativeBox(): Box;
forSize(width: number, height: number): ObjectDetection;
}
declare type OutputLayerParams = {
extra_dim: tf.Tensor3D;
};
declare const pad: typeof pad_;
/**
* Pads a `tf.Tensor` with a given value and paddings.
*
* This operation implements `CONSTANT` mode. For `REFLECT` and `SYMMETRIC`,
* refer to `tf.mirrorPad`
*
* Also available are stricter rank-specific methods with the same signature
* as this method that assert that `paddings` is of given length.
* - `tf.pad1d`
* - `tf.pad2d`
* - `tf.pad3d`
* - `tf.pad4d`
*
* ```js
* const x = tf.tensor1d([1, 2, 3, 4]);
* x.pad([[1, 2]]).print();
* ```
* @param x The tensor to pad.
* @param paddings An array of length `R` (the rank of the tensor), where
* each element is a length-2 tuple of ints `[padBefore, padAfter]`,
* specifying how much to pad along each dimension of the tensor.
* @param constantValue The pad value to use. Defaults to 0.
*
* @doc {heading: 'Tensors', subheading: 'Transformations'}
*/
declare function pad_(x: T | TensorLike, paddings: Array<[number, number]>, constantValue?: number): T;
declare type PadInfo = {
top: number;
left: number;
right: number;
bottom: number;
type: PadType;
};
declare type PadInfo3D = {
top: number;
left: number;
right: number;
bottom: number;
front: number;
back: number;
type: PadType;
};
/**
* Pads the smaller dimension of an image tensor with zeros, such that width === height.
*
* @param imgTensor The image tensor.
* @param isCenterImage (optional, default: false) If true, add an equal amount of padding on
* both sides of the minor dimension oof the image.
* @returns The padded tensor with width === height.
*/
export declare function padToSquare(imgTensor: tf.Tensor4D, isCenterImage?: boolean): tf.Tensor4D;
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
///
declare type PadType = 'SAME' | 'VALID' | 'NUMBER' | 'EXPLICIT';
declare type ParamMapping = {
originalPath?: string;
paramPath: string;
};
/**
* At any given time a single platform is active and represents and
* implementation of this interface. In practice, a platform is an environment
* where TensorFlow.js can be executed, e.g. the browser or Node.js.
*/
declare interface Platform {
/**
* Makes an HTTP request.
* @param path The URL path to make a request to
* @param init The request init. See init here:
* https://developer.mozilla.org/en-US/docs/Web/API/Request/Request
*/
fetch(path: string, requestInits?: RequestInit, options?: RequestDetails): Promise;
/**
* Returns the current high-resolution time in milliseconds relative to an
* arbitrary time in the past. It works across different platforms (node.js,
* browsers).
*/
now(): number;
/**
* Encode the provided string into an array of bytes using the provided
* encoding.
*/
encode(text: string, encoding: string): Uint8Array;
/** Decode the provided bytes into a string using the provided encoding. */
decode(bytes: Uint8Array, encoding: string): string;
}
export declare class Point implements IPoint {
private _x;
private _y;
constructor(x: number, y: number);
get x(): number;
get y(): number;
add(pt: IPoint): Point;
sub(pt: IPoint): Point;
mul(pt: IPoint): Point;
div(pt: IPoint): Point;
abs(): Point;
magnitude(): number;
floor(): Point;
}
declare type PointwiseConvParams = {
filters: tf.Tensor4D;
batch_norm_offset: tf.Tensor1D;
};
/**
* Predicts age and gender from a face image.
*
* @param inputs The face image extracted from the bounding box of a face. Can
* also be an array of input images, which will be batch processed.
* @returns Predictions with age, gender and gender probability or array thereof in case of batch input.
*/
export declare const predictAgeAndGender: (input: TNetInput) => Promise;
declare class PredictAgeAndGenderTaskBase extends ComposableTask {
protected parentTask: ComposableTask | Promise;
protected input: TNetInput;
protected extractedFaces?: any[] | undefined;
constructor(parentTask: ComposableTask | Promise, input: TNetInput, extractedFaces?: any[] | undefined);
}
declare class PredictAllAgeAndGenderTask> extends PredictAgeAndGenderTaskBase>[], TSource[]> {
run(): Promise>[]>;
withFaceExpressions(): PredictAllFaceExpressionsTask>>;
}
declare class PredictAllAgeAndGenderWithFaceAlignmentTask>> extends PredictAllAgeAndGenderTask {
withFaceExpressions(): PredictAllFaceExpressionsWithFaceAlignmentTask>>;
withFaceDescriptors(): ComputeAllFaceDescriptorsTask>>;
}
declare class PredictAllFaceExpressionsTask> extends PredictFaceExpressionsTaskBase[], TSource[]> {
run(): Promise[]>;
withAgeAndGender(): PredictAllAgeAndGenderTask>;
}
declare class PredictAllFaceExpressionsWithFaceAlignmentTask>> extends PredictAllFaceExpressionsTask {
withAgeAndGender(): PredictAllAgeAndGenderWithFaceAlignmentTask>;
withFaceDescriptors(): ComputeAllFaceDescriptorsTask>;
}
export declare class PredictedBox extends LabeledBox {
static assertIsValidPredictedBox(box: any, callee: string): void;
private _score;
private _classScore;
constructor(box: IBoundingBox | IRect | any, label: number, score: number, classScore: number);
get score(): number;
get classScore(): number;
}
declare class PredictFaceExpressionsTaskBase extends ComposableTask {
protected parentTask: ComposableTask | Promise;
protected input: TNetInput;
protected extractedFaces?: any[] | undefined;
constructor(parentTask: ComposableTask | Promise, input: TNetInput, extractedFaces?: any[] | undefined);
}
declare type PredictionLayerParams = {
conv_0: PointwiseConvParams;
conv_1: PointwiseConvParams;
conv_2: PointwiseConvParams;
conv_3: PointwiseConvParams;
conv_4: PointwiseConvParams;
conv_5: PointwiseConvParams;
conv_6: PointwiseConvParams;
conv_7: PointwiseConvParams;
box_predictor_0: BoxPredictionParams;
box_predictor_1: BoxPredictionParams;
box_predictor_2: BoxPredictionParams;
box_predictor_3: BoxPredictionParams;
box_predictor_4: BoxPredictionParams;
box_predictor_5: BoxPredictionParams;
};
declare class PredictSingleAgeAndGenderTask> extends PredictAgeAndGenderTaskBase> | undefined, TSource | undefined> {
run(): Promise> | undefined>;
withFaceExpressions(): PredictSingleFaceExpressionsTask>>;
}
declare class PredictSingleAgeAndGenderWithFaceAlignmentTask>> extends PredictSingleAgeAndGenderTask {
withFaceExpressions(): PredictSingleFaceExpressionsWithFaceAlignmentTask>>;
withFaceDescriptor(): ComputeSingleFaceDescriptorTask>>;
}
declare class PredictSingleFaceExpressionsTask> extends PredictFaceExpressionsTaskBase | undefined, TSource | undefined> {
run(): Promise | undefined>;
withAgeAndGender(): PredictSingleAgeAndGenderTask>;
}
declare class PredictSingleFaceExpressionsWithFaceAlignmentTask>> extends PredictSingleFaceExpressionsTask {
withAgeAndGender(): PredictSingleAgeAndGenderWithFaceAlignmentTask>;
withFaceDescriptor(): ComputeSingleFaceDescriptorTask>;
}
declare function range(num: number, start: number, step: number): number[];
declare enum Rank {
R0 = "R0",
R1 = "R1",
R2 = "R2",
R3 = "R3",
R4 = "R4",
R5 = "R5",
R6 = "R6"
}
/**
* Recognizes the facial expressions from a face image.
*
* @param inputs The face image extracted from the bounding box of a face. Can
* also be an array of input images, which will be batch processed.
* @returns Facial expressions with corresponding probabilities or array thereof in case of batch input.
*/
export declare const recognizeFaceExpressions: (input: TNetInput) => Promise;
export declare class Rect extends Box implements IRect {
constructor(x: number, y: number, width: number, height: number, allowNegativeDimensions?: boolean);
}
declare interface RecursiveArray {
[index: number]: T | RecursiveArray;
}
declare type ReductionBlockParams = {
separable_conv0: SeparableConvParams;
separable_conv1: SeparableConvParams;
expansion_conv: ConvParams;
};
declare const relu: typeof relu_;
/**
* Computes rectified linear element-wise: `max(x, 0)`.
*
* ```js
* const x = tf.tensor1d([-1, 2, -3, 4]);
*
* x.relu().print(); // or tf.relu(x)
* ```
* @param x The input tensor. If the dtype is `bool`, the output dtype will be
* `int32'.
*
* @doc {heading: 'Operations', subheading: 'Basic math'}
*/
declare function relu_(x: T | TensorLike): T;
/**
* Additional options for Platform.fetch
*/
declare interface RequestDetails {
/**
* Is this request for a binary file (as opposed to a json file)
*/
isBinary?: boolean;
}
declare const reshape: typeof reshape_;
/**
* Reshapes a `tf.Tensor` to a given shape.
*
* Given an input tensor, returns a new tensor with the same values as the
* input tensor with shape `shape`.
*
* If one component of shape is the special value -1, the size of that
* dimension is computed so that the total size remains constant. In
* particular, a shape of [-1] flattens into 1-D. At most one component of
* shape can be -1.
*
* If shape is 1-D or higher, then the operation returns a tensor with shape
* shape filled with the values of tensor. In this case, the number of
* elements implied by shape must be the same as the number of elements in
* tensor.
*
* ```js
* const x = tf.tensor1d([1, 2, 3, 4]);
* x.reshape([2, 2]).print();
* ```
*
* @param x The input tensor to be reshaped.
* @param shape An array of integers defining the output tensor shape.
*
* @doc {heading: 'Tensors', subheading: 'Transformations'}
*/
declare function reshape_(x: Tensor | TensorLike, shape: ShapeMap[R]): Tensor;
declare type ResidualLayerParams = {
conv1: ConvLayerParams;
conv2: ConvLayerParams;
};
export declare function resizeResults(results: T, dimensions: IDimensions): T;
export declare function resolveInput(arg: string | any): any;
declare function round(num: number, prec?: number): number;
/** @doclink Tensor */
declare type Scalar = Tensor;
/**
* Creates rank-0 `tf.Tensor` (scalar) with the provided value and dtype.
*
* The same functionality can be achieved with `tf.tensor`, but in general
* we recommend using `tf.scalar` as it makes the code more readable.
*
* ```js
* tf.scalar(3.14).print();
* ```
*
* @param value The value of the scalar.
* @param dtype The data type.
*
* @doc {heading: 'Tensors', subheading: 'Creation'}
*/
declare function scalar(value: number | boolean | string | Uint8Array, dtype?: DataType): Scalar;
declare type ScaleLayerParams = {
weights: tf.Tensor1D;
biases: tf.Tensor1D;
};
/** @docalias Function */
declare type ScopeFn = () => T;
declare const separableConv2d: typeof separableConv2d_;
/**
* 2-D convolution with separable filters.
*
* Performs a depthwise convolution that acts separately on channels followed
* by a pointwise convolution that mixes channels. Note that this is
* separability between dimensions [1, 2] and 3, not spatial separability
* between dimensions 1 and 2.
*
* See
* [https://www.tensorflow.org/api_docs/python/tf/nn/separable_conv2d](
* https://www.tensorflow.org/api_docs/python/tf/nn/separable_conv2d)
* for more details.
*
* @param x The input tensor, of rank 4 or rank 3, of shape
* `[batch, height, width, inChannels]`. If rank 3, batch of 1 is
* assumed.
* @param depthwiseFilter The depthwise filter tensor, rank 4, of shape
* `[filterHeight, filterWidth, inChannels, channelMultiplier]`. This is
* the filter used in the first step.
* @param pointwiseFilter The pointwise filter tensor, rank 4, of shape
* `[1, 1, inChannels * channelMultiplier, outChannels]`. This is
* the filter used in the second step.
* @param strides The strides of the convolution: `[strideHeight,
* strideWidth]`. If strides is a single number, then `strideHeight ==
* strideWidth`.
* @param pad The type of padding algorithm.
* - `same` and stride 1: output will be of same size as input,
* regardless of filter size.
* - `valid`: output will be smaller than input if filter is larger
* than 1x1.
* - For more info, see this guide:
* [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
* https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
* @param dilations The dilation rates: `[dilationHeight, dilationWidth]`
* in which we sample input values across the height and width dimensions
* in atrous convolution. Defaults to `[1, 1]`. If `rate` is a single
* number, then `dilationHeight == dilationWidth`. If it is greater than
* 1, then all values of `strides` must be 1.
* @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to
* "NHWC". Specify the data format of the input and output data. With the
* default format "NHWC", the data is stored in the order of: [batch,
* height, width, channels]. Only "NHWC" is currently supported.
*
* @doc {heading: 'Operations', subheading: 'Convolution'}
*/
declare function separableConv2d_(x: T | TensorLike, depthwiseFilter: Tensor4D | TensorLike, pointwiseFilter: Tensor4D | TensorLike, strides: [number, number] | number, pad: 'valid' | 'same', dilation?: [number, number] | number, dataFormat?: 'NHWC' | 'NCHW'): T;
declare class SeparableConvParams {
depthwise_filter: tf.Tensor4D;
pointwise_filter: tf.Tensor4D;
bias: tf.Tensor1D;
constructor(depthwise_filter: tf.Tensor4D, pointwise_filter: tf.Tensor4D, bias: tf.Tensor1D);
}
declare function setEnv(env: Environment): void;
/**
* @license
* Copyright 2017 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
///
/** @docalias number[] */
declare interface ShapeMap {
R0: number[];
R1: [number];
R2: [number, number];
R3: [number, number, number];
R4: [number, number, number, number];
R5: [number, number, number, number, number];
R6: [number, number, number, number, number, number];
}
export declare function shuffleArray(inputArray: any[]): any[];
export declare function sigmoid(x: number): number;
/**
* Computes sigmoid element-wise, `1 / (1 + exp(-x))`
*
* ```js
* const x = tf.tensor1d([0, -1, 2, -3]);
*
* x.sigmoid().print(); // or tf.sigmoid(x)
* ```
* @param x The input tensor.
*
* @doc {heading: 'Operations', subheading: 'Basic math'}
*/
declare function sigmoid_(x: T | TensorLike): T;
declare const sigmoid_2: typeof sigmoid_;
declare interface SingleValueMap {
bool: boolean;
int32: number;
float32: number;
complex64: number;
string: string;
}
declare const slice: typeof slice_;
declare const slice3d: typeof slice3d_;
/**
* Extracts a 3D slice from a 3D array starting at coordinates `begin` and
* is of size `size`. See `slice` for details.
*/
declare function slice3d_(x: Tensor3D | TensorLike, begin: [number, number, number], size: [number, number, number]): Tensor3D;
/**
* Extracts a slice from a `tf.Tensor` starting at coordinates `begin`
* and is of size `size`.
*
* Also available are stricter rank-specific methods with the same signature
* as this method that assert that `x` is of the given rank:
* - `tf.slice1d`
* - `tf.slice2d`
* - `tf.slice3d`
* - `tf.slice4d`
*
* ```js
* const x = tf.tensor1d([1, 2, 3, 4]);
*
* x.slice([1], [2]).print();
* ```
*
* ```js
* const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
*
* x.slice([1, 0], [1, 2]).print();
* ```
* @param x The input `tf.Tensor` to slice from.
* @param begin The coordinates to start the slice from. The length can be
* less than the rank of x - the rest of the axes will have implicit 0 as
* start. Can also be a single number, in which case it specifies the
* first axis.
* @param size The size of the slice. The length can be less than the rank of
* x - the rest of the axes will have implicit -1. A value of -1 requests
* the rest of the dimensions in the axis. Can also be a single number,
* in which case it specifies the size of the first axis.
*
* @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}
*/
declare function slice_>(x: T | TensorLike, begin: number | number[], size?: number | number[]): T;
declare const softmax: typeof softmax_;
/**
* Computes the softmax normalized vector given the logits.
*
* ```js
* const a = tf.tensor1d([1, 2, 3]);
*
* a.softmax().print(); // or tf.softmax(a)
* ```
*
* ```js
* const a = tf.tensor2d([2, 4, 6, 1, 2, 3], [2, 3]);
*
* a.softmax().print(); // or tf.softmax(a)
* ```
*
* @param logits The logits array.
* @param dim The dimension softmax would be performed on. Defaults to `-1`
* which indicates the last dimension.
*
* @doc {heading: 'Operations', subheading: 'Normalization'}
*/
declare function softmax_(logits: T | TensorLike, dim?: number): T;
export declare class SsdMobilenetv1 extends NeuralNetwork {
constructor();
forwardInput(input: NetInput): any;
forward(input: TNetInput): Promise;
locateFaces(input: TNetInput, options?: ISsdMobilenetv1Options): Promise;
protected getDefaultModelName(): string;
protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap): {
params: NetParams_4;
paramMappings: ParamMapping[];
};
protected extractParams(weights: Float32Array): {
params: NetParams_4;
paramMappings: ParamMapping[];
};
}
/**
* Attempts to detect all faces in an image using SSD Mobilenetv1 Network.
*
* @param input The input image.
* @param options (optional, default: see SsdMobilenetv1Options constructor for default parameters).
* @returns Bounding box of each face with score.
*/
export declare const ssdMobilenetv1: (input: TNetInput, options: SsdMobilenetv1Options) => Promise;
export declare class SsdMobilenetv1Options {
protected _name: string;
private _minConfidence;
private _maxResults;
constructor({ minConfidence, maxResults }?: ISsdMobilenetv1Options);
get minConfidence(): number;
get maxResults(): number;
}
declare const stack: typeof stack_;
/**
* Stacks a list of rank-`R` `tf.Tensor`s into one rank-`(R+1)` `tf.Tensor`.
*
* ```js
* const a = tf.tensor1d([1, 2]);
* const b = tf.tensor1d([3, 4]);
* const c = tf.tensor1d([5, 6]);
* tf.stack([a, b, c]).print();
* ```
*
* @param tensors A list of tensor objects with the same shape and dtype.
* @param axis The axis to stack along. Defaults to 0 (the first dim).
*
* @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}
*/
declare function stack_(tensors: Array, axis?: number): Tensor;
declare const sub: typeof sub_;
/**
* Subtracts two `tf.Tensor`s element-wise, A - B. Supports broadcasting.
*
* ```js
* const a = tf.tensor1d([10, 20, 30, 40]);
* const b = tf.tensor1d([1, 2, 3, 4]);
*
* a.sub(b).print(); // or tf.sub(a, b)
* ```
*
* ```js
* // Broadcast subtract a with b.
* const a = tf.tensor1d([10, 20, 30, 40]);
* const b = tf.scalar(5);
*
* a.sub(b).print(); // or tf.sub(a, b)
* ```
* @param a The first `tf.Tensor` to subtract from.
* @param b The second `tf.Tensor` to be subtracted. Must have the same dtype as
* `a`.
*
* @doc {heading: 'Operations', subheading: 'Arithmetic'}
*/
declare function sub_(a: Tensor | TensorLike, b: Tensor | TensorLike): T;
declare type TDrawDetectionsInput = IRect | IBoundingBox | FaceDetection | WithFaceDetection<{}>;
declare namespace Tensor { }
/**
* A `tf.Tensor` object represents an immutable, multidimensional array of
* numbers that has a shape and a data type.
*
* For performance reasons, functions that create tensors do not necessarily
* perform a copy of the data passed to them (e.g. if the data is passed as a
* `Float32Array`), and changes to the data will change the tensor. This is not
* a feature and is not supported. To avoid this behavior, use the tensor before
* changing the input data or create a copy with `copy = tf.add(yourTensor, 0)`.
*
* See `tf.tensor` for details on how to create a `tf.Tensor`.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
declare class Tensor {
/** Unique id of this tensor. */
readonly id: number;
/**
* Id of the bucket holding the data for this tensor. Multiple arrays can
* point to the same bucket (e.g. when calling array.reshape()).
*/
dataId: DataId;
/** The shape of the tensor. */
readonly shape: ShapeMap[R];
/** Number of elements in the tensor. */
readonly size: number;
/** The data type for the array. */
readonly dtype: DataType;
/** The rank type for the array (see `Rank` enum). */
readonly rankType: R;
/** Whether this tensor has been globally kept. */
kept: boolean;
/** The id of the scope this tensor is being tracked in. */
scopeId: number;
/**
* Number of elements to skip in each dimension when indexing. See
* https://docs.scipy.org/doc/numpy/reference/generated/\
* numpy.ndarray.strides.html
*/
readonly strides: number[];
constructor(shape: ShapeMap[R], dtype: DataType, dataId: DataId, id: number);
readonly rank: number;
/**
* Returns a promise of `tf.TensorBuffer` that holds the underlying data.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
buffer(): Promise>;
/**
* Returns a `tf.TensorBuffer` that holds the underlying data.
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
bufferSync(): TensorBuffer;
/**
* Returns the tensor data as a nested array. The transfer of data is done
* asynchronously.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
array(): Promise;
/**
* Returns the tensor data as a nested array. The transfer of data is done
* synchronously.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
arraySync(): ArrayMap[R];
/**
* Asynchronously downloads the values from the `tf.Tensor`. Returns a
* promise of `TypedArray` that resolves when the computation has finished.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
data(): Promise;
/**
* Synchronously downloads the values from the `tf.Tensor`. This blocks the
* UI thread until the values are ready, which can cause performance issues.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
dataSync(): DataTypeMap[D];
/** Returns the underlying bytes of the tensor's data. */
bytes(): Promise;
/**
* Disposes `tf.Tensor` from memory.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
dispose(): void;
protected isDisposedInternal: boolean;
readonly isDisposed: boolean;
throwIfDisposed(): void;
/**
* Prints the `tf.Tensor`. See `tf.print` for details.
*
* @param verbose Whether to print verbose information about the tensor,
* including dtype and size.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
print(verbose?: boolean): void;
/**
* Returns a copy of the tensor. See `tf.clone` for details.
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
clone(this: T): T;
/**
* Returns a human-readable description of the tensor. Useful for logging.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
toString(verbose?: boolean): string;
variable(trainable?: boolean, name?: string, dtype?: DataType): Variable;
}
/**
* Creates a `tf.Tensor` with the provided values, shape and dtype.
*
* ```js
* // Pass an array of values to create a vector.
* tf.tensor([1, 2, 3, 4]).print();
* ```
*
* ```js
* // Pass a nested array of values to make a matrix or a higher
* // dimensional tensor.
* tf.tensor([[1, 2], [3, 4]]).print();
* ```
*
* ```js
* // Pass a flat array and specify a shape yourself.
* tf.tensor([1, 2, 3, 4], [2, 2]).print();
* ```
*
* @param values The values of the tensor. Can be nested array of numbers,
* or a flat array, or a `TypedArray`. If the values are strings,
* they will be encoded as utf-8 and kept as `Uint8Array[]`.
* @param shape The shape of the tensor. Optional. If not provided,
* it is inferred from `values`.
* @param dtype The data type.
*
* @doc {heading: 'Tensors', subheading: 'Creation'}
*/
declare function tensor(values: TensorLike, shape?: ShapeMap[R], dtype?: DataType): Tensor;
/** @doclink Tensor */
declare type Tensor1D = Tensor;
/**
* Creates rank-1 `tf.Tensor` with the provided values, shape and dtype.
*
* The same functionality can be achieved with `tf.tensor`, but in general
* we recommend using `tf.tensor1d` as it makes the code more readable.
*
* ```js
* tf.tensor1d([1, 2, 3]).print();
* ```
*
* @param values The values of the tensor. Can be array of numbers,
* or a `TypedArray`.
* @param dtype The data type.
*
* @doc {heading: 'Tensors', subheading: 'Creation'}
*/
declare function tensor1d(values: TensorLike1D, dtype?: DataType): Tensor1D;
/** @doclink Tensor */
declare type Tensor2D = Tensor;
/**
* Creates rank-2 `tf.Tensor` with the provided values, shape and dtype.
*
* The same functionality can be achieved with `tf.tensor`, but in general
* we recommend using `tf.tensor2d` as it makes the code more readable.
*
* ```js
* // Pass a nested array.
* tf.tensor2d([[1, 2], [3, 4]]).print();
* ```
* ```js
* // Pass a flat array and specify a shape.
* tf.tensor2d([1, 2, 3, 4], [2, 2]).print();
* ```
*
* @param values The values of the tensor. Can be nested array of numbers,
* or a flat array, or a `TypedArray`.
* @param shape The shape of the tensor. If not provided, it is inferred from
* `values`.
* @param dtype The data type.
*
* @doc {heading: 'Tensors', subheading: 'Creation'}
*/
declare function tensor2d(values: TensorLike2D, shape?: [number, number], dtype?: DataType): Tensor2D;
/** @doclink Tensor */
declare type Tensor3D = Tensor;
/**
* Creates rank-3 `tf.Tensor` with the provided values, shape and dtype.
*
* The same functionality can be achieved with `tf.tensor`, but in general
* we recommend using `tf.tensor3d` as it makes the code more readable.
*
* ```js
* // Pass a nested array.
* tf.tensor3d([[[1], [2]], [[3], [4]]]).print();
* ```
* ```js
* // Pass a flat array and specify a shape.
* tf.tensor3d([1, 2, 3, 4], [2, 2, 1]).print();
* ```
*
* @param values The values of the tensor. Can be nested array of numbers,
* or a flat array, or a `TypedArray`.
* @param shape The shape of the tensor. If not provided, it is inferred from
* `values`.
* @param dtype The data type.
*
* @doc {heading: 'Tensors', subheading: 'Creation'}
*/
declare function tensor3d(values: TensorLike3D, shape?: [number, number, number], dtype?: DataType): Tensor3D;
/** @doclink Tensor */
declare type Tensor4D = Tensor;
/**
* Creates rank-4 `tf.Tensor` with the provided values, shape and dtype.
*
* The same functionality can be achieved with `tf.tensor`, but in general
* we recommend using `tf.tensor4d` as it makes the code more readable.
*
* ```js
* // Pass a nested array.
* tf.tensor4d([[[[1], [2]], [[3], [4]]]]).print();
* ```
* ```js
* // Pass a flat array and specify a shape.
* tf.tensor4d([1, 2, 3, 4], [1, 2, 2, 1]).print();
* ```
*
* @param values The values of the tensor. Can be nested array of numbers,
* or a flat array, or a `TypedArray`.
* @param shape The shape of the tensor. Optional. If not provided,
* it is inferred from `values`.
* @param dtype The data type.
*
* @doc {heading: 'Tensors', subheading: 'Creation'}
*/
declare function tensor4d(values: TensorLike4D, shape?: [number, number, number, number], dtype?: DataType): Tensor4D;
/** @doclink Tensor */
declare type Tensor5D = Tensor;
/**
* A mutable object, similar to `tf.Tensor`, that allows users to set values
* at locations before converting to an immutable `tf.Tensor`.
*
* See `tf.buffer` for creating a tensor buffer.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
declare class TensorBuffer {
dtype: D;
size: number;
shape: ShapeMap[R];
strides: number[];
values: DataTypeMap[D];
constructor(shape: ShapeMap[R], dtype: D, values?: DataTypeMap[D]);
/**
* Sets a value in the buffer at a given location.
*
* @param value The value to set.
* @param locs The location indices.
*
* @doc {heading: 'Tensors', subheading: 'Creation'}
*/
set(value: SingleValueMap[D], ...locs: number[]): void;
/**
* Returns the value in the buffer at the provided location.
*
* @param locs The location indices.
*
* @doc {heading: 'Tensors', subheading: 'Creation'}
*/
get(...locs: number[]): SingleValueMap[D];
locToIndex(locs: number[]): number;
indexToLoc(index: number): number[];
readonly rank: number;
/**
* Creates an immutable `tf.Tensor` object from the buffer.
*
* @doc {heading: 'Tensors', subheading: 'Creation'}
*/
toTensor(): Tensor;
}
/**
* @docalias void|number|string|TypedArray|Tensor|Tensor[]|{[key:
* string]:Tensor|number|string}
*/
declare type TensorContainer = void | Tensor | string | number | boolean | TensorContainerObject | TensorContainerArray | Float32Array | Int32Array | Uint8Array;
declare interface TensorContainerArray extends Array