update tfjs

pull/130/head
Vladimir Mandic 2022-10-09 13:40:33 -04:00
parent 0cbfd9b01b
commit 96fed4f123
9 changed files with 1094 additions and 1091 deletions

View File

@ -9,8 +9,10 @@
## Changelog ## Changelog
### **HEAD -> master** 2022/09/25 mandic00@live.com ### **HEAD -> master** 2022/09/29 mandic00@live.com
- create funding.yml
- add node-wasm demo
### **1.7.4** 2022/09/25 mandic00@live.com ### **1.7.4** 2022/09/25 mandic00@live.com

File diff suppressed because one or more lines are too long

702
dist/face-api.esm.js vendored

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

702
dist/face-api.js vendored

File diff suppressed because one or more lines are too long

714
dist/tfjs.esm.js vendored

File diff suppressed because one or more lines are too long

View File

@ -4,4 +4,4 @@
author: <https://github.com/vladmandic>' author: <https://github.com/vladmandic>'
*/ */
var e="3.20.0";var s="3.20.0";var t="3.20.0";var i="3.20.0";var n="3.20.0";var r="3.20.0";var l="3.20.0";var a="3.20.0";var G={tfjs:e,"tfjs-core":s,"tfjs-data":t,"tfjs-layers":i,"tfjs-converter":n,"tfjs-backend-cpu":r,"tfjs-backend-webgl":l,"tfjs-backend-wasm":a};export{G as version}; var e="3.21.0";var s="3.21.0";var t="3.21.0";var i="3.21.0";var n="3.21.0";var r="3.21.0";var l="3.21.0";var a="3.21.0";var G={tfjs:e,"tfjs-core":s,"tfjs-data":t,"tfjs-layers":i,"tfjs-converter":n,"tfjs-backend-cpu":r,"tfjs-backend-webgl":l,"tfjs-backend-wasm":a};export{G as version};

View File

@ -44,26 +44,26 @@
"devDependencies": { "devDependencies": {
"@canvas/image": "^1.0.1", "@canvas/image": "^1.0.1",
"@microsoft/api-extractor": "^7.32.0", "@microsoft/api-extractor": "^7.32.0",
"@tensorflow/tfjs": "^3.20.0", "@tensorflow/tfjs": "^3.21.0",
"@tensorflow/tfjs-backend-cpu": "^3.20.0", "@tensorflow/tfjs-backend-cpu": "^3.21.0",
"@tensorflow/tfjs-backend-wasm": "^3.20.0", "@tensorflow/tfjs-backend-wasm": "^3.21.0",
"@tensorflow/tfjs-backend-webgl": "^3.20.0", "@tensorflow/tfjs-backend-webgl": "^3.21.0",
"@tensorflow/tfjs-backend-webgpu": "0.0.1-alpha.13", "@tensorflow/tfjs-backend-webgpu": "0.0.1-alpha.14",
"@tensorflow/tfjs-converter": "^3.20.0", "@tensorflow/tfjs-converter": "^3.21.0",
"@tensorflow/tfjs-core": "^3.20.0", "@tensorflow/tfjs-core": "^3.21.0",
"@tensorflow/tfjs-data": "^3.20.0", "@tensorflow/tfjs-data": "^3.21.0",
"@tensorflow/tfjs-layers": "^3.20.0", "@tensorflow/tfjs-layers": "^3.21.0",
"@tensorflow/tfjs-node": "^3.20.0", "@tensorflow/tfjs-node": "^3.21.1",
"@tensorflow/tfjs-node-gpu": "^3.20.0", "@tensorflow/tfjs-node-gpu": "^3.21.0",
"@types/node": "^18.7.23", "@types/node": "^18.8.3",
"@types/offscreencanvas": "^2019.7.0", "@types/offscreencanvas": "^2019.7.0",
"@typescript-eslint/eslint-plugin": "^5.38.1", "@typescript-eslint/eslint-plugin": "^5.39.0",
"@typescript-eslint/parser": "^5.38.1", "@typescript-eslint/parser": "^5.39.0",
"@vladmandic/build": "^0.7.14", "@vladmandic/build": "^0.7.14",
"@vladmandic/pilogger": "^0.4.6", "@vladmandic/pilogger": "^0.4.6",
"@vladmandic/tfjs": "github:vladmandic/tfjs", "@vladmandic/tfjs": "github:vladmandic/tfjs",
"esbuild": "^0.15.9", "esbuild": "^0.15.10",
"eslint": "^8.24.0", "eslint": "^8.25.0",
"eslint-config-airbnb-base": "^15.0.0", "eslint-config-airbnb-base": "^15.0.0",
"eslint-plugin-import": "^2.26.0", "eslint-plugin-import": "^2.26.0",
"eslint-plugin-json": "^3.1.0", "eslint-plugin-json": "^3.1.0",

21
types/face-api.d.ts vendored
View File

@ -124,7 +124,7 @@ declare const batchNorm: typeof batchNorm_;
* Mean, variance, scale, and offset can be of two shapes: * Mean, variance, scale, and offset can be of two shapes:
* - The same shape as the input. * - The same shape as the input.
* - In the common case, the depth dimension is the last dimension of x, so * - In the common case, the depth dimension is the last dimension of x, so
* the values would be an `tf.Tensor1D` of shape [depth]. * the values would be a `tf.Tensor1D` of shape [depth].
* *
* Also available are stricter rank-specific methods with the same signature * Also available are stricter rank-specific methods with the same signature
* as this method that assert that parameters passed are of given rank * as this method that assert that parameters passed are of given rank
@ -241,8 +241,8 @@ declare const clipByValue: typeof clipByValue_;
* x.clipByValue(-2, 3).print(); // or tf.clipByValue(x, -2, 3) * x.clipByValue(-2, 3).print(); // or tf.clipByValue(x, -2, 3)
* ``` * ```
* @param x The input tensor. * @param x The input tensor.
* @param clipValueMin Lower-bound of range to be clipped to. * @param clipValueMin Lower bound of range to be clipped to.
* @param clipValueMax Upper-bound of range to be clipped to. * @param clipValueMax Upper bound of range to be clipped to.
* *
* @doc {heading: 'Operations', subheading: 'Basic math'} * @doc {heading: 'Operations', subheading: 'Basic math'}
*/ */
@ -370,7 +370,7 @@ declare const concat: typeof concat_;
* tf.concat([a, b], axis).print(); * tf.concat([a, b], axis).print();
* ``` * ```
* @param tensors A list of tensors to concatenate. * @param tensors A list of tensors to concatenate.
* @param axis The axis to concate along. Defaults to 0 (the first dim). * @param axis The axis to concatenate along. Defaults to 0 (the first dim).
* *
* @doc {heading: 'Tensors', subheading: 'Slicing and Joining'} * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}
*/ */
@ -932,7 +932,7 @@ declare const expandDims: typeof expandDims_;
* x.expandDims(axis).print(); * x.expandDims(axis).print();
* ``` * ```
* *
* @param x The input tensor whose dimensions to be expanded. * @param x The input tensor whose dimensions are to be expanded.
* @param axis The dimension index at which to insert shape of `1`. Defaults * @param axis The dimension index at which to insert shape of `1`. Defaults
* to 0 (the first dimension). * to 0 (the first dimension).
* *
@ -1701,7 +1701,7 @@ declare const pad: typeof pad_;
* Pads a `tf.Tensor` with a given value and paddings. * Pads a `tf.Tensor` with a given value and paddings.
* *
* This operation implements `CONSTANT` mode. For `REFLECT` and `SYMMETRIC`, * This operation implements `CONSTANT` mode. For `REFLECT` and `SYMMETRIC`,
* refer to `tf.mirrorPad` * refer to `tf.mirrorPad`.
* *
* Also available are stricter rank-specific methods with the same signature * Also available are stricter rank-specific methods with the same signature
* as this method that assert that `paddings` is of given length. * as this method that assert that `paddings` is of given length.
@ -1802,6 +1802,7 @@ declare interface Platform {
encode(text: string, encoding: string): Uint8Array; encode(text: string, encoding: string): Uint8Array;
/** Decode the provided bytes into a string using the provided encoding. */ /** Decode the provided bytes into a string using the provided encoding. */
decode(bytes: Uint8Array, encoding: string): string; decode(bytes: Uint8Array, encoding: string): string;
setTimeoutCustom?(functionRef: Function, delay: number): void;
} }
export declare class Point implements IPoint { export declare class Point implements IPoint {
@ -1959,7 +1960,7 @@ declare const relu: typeof relu_;
* x.relu().print(); // or tf.relu(x) * x.relu().print(); // or tf.relu(x)
* ``` * ```
* @param x The input tensor. If the dtype is `bool`, the output dtype will be * @param x The input tensor. If the dtype is `bool`, the output dtype will be
* `int32'. * `int32`.
* *
* @doc {heading: 'Operations', subheading: 'Basic math'} * @doc {heading: 'Operations', subheading: 'Basic math'}
*/ */
@ -2395,7 +2396,7 @@ declare class Tensor<R extends Rank = Rank> {
* This means that the texture will use the RGBA channels to store value. * This means that the texture will use the RGBA channels to store value.
* *
* For WebGPU backend, the data will be stored on a buffer. There is no * For WebGPU backend, the data will be stored on a buffer. There is no
* parameter, so can not use an user defined size to create the buffer. * parameter, so can not use a user-defined size to create the buffer.
* *
* @param options: * @param options:
* For WebGL, * For WebGL,
@ -2776,9 +2777,9 @@ declare const tile: typeof tile_;
* Construct a tensor by repeating it the number of times given by reps. * Construct a tensor by repeating it the number of times given by reps.
* *
* This operation creates a new tensor by replicating `input` `reps` * This operation creates a new tensor by replicating `input` `reps`
* times. The output tensor's i'th dimension has `input.shape[i] * * times. The output tensor's `i`th dimension has `input.shape[i] *
* reps[i]` elements, and the values of `input` are replicated * reps[i]` elements, and the values of `input` are replicated
* `reps[i]` times along the i'th dimension. For example, tiling * `reps[i]` times along the `i`th dimension. For example, tiling
* `[a, b, c, d]` by `[2]` produces `[a, b, c, d, a, b, c, d]`. * `[a, b, c, d]` by `[2]` produces `[a, b, c, d, a, b, c, d]`.
* *
* ```js * ```js