mirror of https://github.com/vladmandic/human
strong typings
parent
606bebee10
commit
01990ad117
|
@ -78244,7 +78244,9 @@ function enhance(input2) {
|
|||
const image3 = tidy(() => {
|
||||
const box3 = [[0.05, 0.15, 0.85, 0.85]];
|
||||
const tensor2 = input2.image || input2.tensor;
|
||||
const crop = tensor2.shape.length === 3 ? image.cropAndResize(tensor2.expandDims(0), box3, [0], [model5.inputs[0].shape[2], model5.inputs[0].shape[1]]) : image.cropAndResize(tensor2, box3, [0], [model5.inputs[0].shape[2], model5.inputs[0].shape[1]]);
|
||||
if (!(tensor2 instanceof Tensor))
|
||||
return null;
|
||||
const crop = tensor2.shape.length === 3 ? image.cropAndResize(expandDims(tensor2, 0), box3, [0], [model5.inputs[0].shape[2], model5.inputs[0].shape[1]]) : image.cropAndResize(tensor2, box3, [0], [model5.inputs[0].shape[2], model5.inputs[0].shape[1]]);
|
||||
const rgb3 = [0.2989, 0.587, 0.114];
|
||||
const [red, green, blue] = split(crop, 3, 3);
|
||||
const redNorm = mul(red, rgb3[0]);
|
||||
|
@ -99184,7 +99186,7 @@ var Human = class {
|
|||
return null;
|
||||
if (!input2)
|
||||
return "input is not defined";
|
||||
if (this.tf.ENV.flags.IS_NODE && !(input2 instanceof this.tf.Tensor)) {
|
||||
if (this.tf.ENV.flags.IS_NODE && !(input2 instanceof Tensor)) {
|
||||
return "input must be a tensor";
|
||||
}
|
||||
try {
|
||||
|
@ -99236,7 +99238,7 @@ var Human = class {
|
|||
});
|
||||
_calculateFaceAngle.set(this, (mesh) => {
|
||||
if (!mesh || mesh.length < 300)
|
||||
return {};
|
||||
return {roll: null, yaw: null, pitch: null};
|
||||
const radians = (a12, a22, b1, b2) => Math.atan2(b2 - a22, b1 - a12);
|
||||
const degrees = (theta) => Math.abs(theta * 180 / Math.PI % 360);
|
||||
const angle = {
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -4396,7 +4396,9 @@ function enhance(input) {
|
|||
const image13 = tf8.tidy(() => {
|
||||
const box3 = [[0.05, 0.15, 0.85, 0.85]];
|
||||
const tensor = input.image || input.tensor;
|
||||
const crop = tensor.shape.length === 3 ? tf8.image.cropAndResize(tensor.expandDims(0), box3, [0], [model4.inputs[0].shape[2], model4.inputs[0].shape[1]]) : tf8.image.cropAndResize(tensor, box3, [0], [model4.inputs[0].shape[2], model4.inputs[0].shape[1]]);
|
||||
if (!(tensor instanceof tf8.Tensor))
|
||||
return null;
|
||||
const crop = tensor.shape.length === 3 ? tf8.image.cropAndResize(tf8.expandDims(tensor, 0), box3, [0], [model4.inputs[0].shape[2], model4.inputs[0].shape[1]]) : tf8.image.cropAndResize(tensor, box3, [0], [model4.inputs[0].shape[2], model4.inputs[0].shape[1]]);
|
||||
const rgb3 = [0.2989, 0.587, 0.114];
|
||||
const [red, green, blue] = tf8.split(crop, 3, 3);
|
||||
const redNorm = tf8.mul(red, rgb3[0]);
|
||||
|
@ -25351,7 +25353,7 @@ var Human = class {
|
|||
return null;
|
||||
if (!input)
|
||||
return "input is not defined";
|
||||
if (this.tf.ENV.flags.IS_NODE && !(input instanceof this.tf.Tensor)) {
|
||||
if (this.tf.ENV.flags.IS_NODE && !(input instanceof tf18.Tensor)) {
|
||||
return "input must be a tensor";
|
||||
}
|
||||
try {
|
||||
|
@ -25403,7 +25405,7 @@ var Human = class {
|
|||
});
|
||||
_calculateFaceAngle.set(this, (mesh) => {
|
||||
if (!mesh || mesh.length < 300)
|
||||
return {};
|
||||
return {roll: null, yaw: null, pitch: null};
|
||||
const radians = (a1, a2, b1, b2) => Math.atan2(b2 - a2, b1 - a1);
|
||||
const degrees = (theta) => Math.abs(theta * 180 / Math.PI % 360);
|
||||
const angle = {
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -78244,7 +78244,9 @@ function enhance(input2) {
|
|||
const image3 = tidy(() => {
|
||||
const box3 = [[0.05, 0.15, 0.85, 0.85]];
|
||||
const tensor2 = input2.image || input2.tensor;
|
||||
const crop = tensor2.shape.length === 3 ? image.cropAndResize(tensor2.expandDims(0), box3, [0], [model5.inputs[0].shape[2], model5.inputs[0].shape[1]]) : image.cropAndResize(tensor2, box3, [0], [model5.inputs[0].shape[2], model5.inputs[0].shape[1]]);
|
||||
if (!(tensor2 instanceof Tensor))
|
||||
return null;
|
||||
const crop = tensor2.shape.length === 3 ? image.cropAndResize(expandDims(tensor2, 0), box3, [0], [model5.inputs[0].shape[2], model5.inputs[0].shape[1]]) : image.cropAndResize(tensor2, box3, [0], [model5.inputs[0].shape[2], model5.inputs[0].shape[1]]);
|
||||
const rgb3 = [0.2989, 0.587, 0.114];
|
||||
const [red, green, blue] = split(crop, 3, 3);
|
||||
const redNorm = mul(red, rgb3[0]);
|
||||
|
@ -99184,7 +99186,7 @@ var Human = class {
|
|||
return null;
|
||||
if (!input2)
|
||||
return "input is not defined";
|
||||
if (this.tf.ENV.flags.IS_NODE && !(input2 instanceof this.tf.Tensor)) {
|
||||
if (this.tf.ENV.flags.IS_NODE && !(input2 instanceof Tensor)) {
|
||||
return "input must be a tensor";
|
||||
}
|
||||
try {
|
||||
|
@ -99236,7 +99238,7 @@ var Human = class {
|
|||
});
|
||||
_calculateFaceAngle.set(this, (mesh) => {
|
||||
if (!mesh || mesh.length < 300)
|
||||
return {};
|
||||
return {roll: null, yaw: null, pitch: null};
|
||||
const radians = (a12, a22, b1, b2) => Math.atan2(b2 - a22, b1 - a12);
|
||||
const degrees = (theta) => Math.abs(theta * 180 / Math.PI % 360);
|
||||
const angle = {
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -78251,7 +78251,9 @@ return a / b;`;
|
|||
const image3 = tidy(() => {
|
||||
const box3 = [[0.05, 0.15, 0.85, 0.85]];
|
||||
const tensor2 = input2.image || input2.tensor;
|
||||
const crop = tensor2.shape.length === 3 ? image.cropAndResize(tensor2.expandDims(0), box3, [0], [model5.inputs[0].shape[2], model5.inputs[0].shape[1]]) : image.cropAndResize(tensor2, box3, [0], [model5.inputs[0].shape[2], model5.inputs[0].shape[1]]);
|
||||
if (!(tensor2 instanceof Tensor))
|
||||
return null;
|
||||
const crop = tensor2.shape.length === 3 ? image.cropAndResize(expandDims(tensor2, 0), box3, [0], [model5.inputs[0].shape[2], model5.inputs[0].shape[1]]) : image.cropAndResize(tensor2, box3, [0], [model5.inputs[0].shape[2], model5.inputs[0].shape[1]]);
|
||||
const rgb3 = [0.2989, 0.587, 0.114];
|
||||
const [red, green, blue] = split(crop, 3, 3);
|
||||
const redNorm = mul(red, rgb3[0]);
|
||||
|
@ -99191,7 +99193,7 @@ lBhEMohlFerLlBjEMohMVTEARDKCITsAk2AEgAAAkAAAAAAAAAAAAAAAAAAAAAAAASAAAAAAAAD/
|
|||
return null;
|
||||
if (!input2)
|
||||
return "input is not defined";
|
||||
if (this.tf.ENV.flags.IS_NODE && !(input2 instanceof this.tf.Tensor)) {
|
||||
if (this.tf.ENV.flags.IS_NODE && !(input2 instanceof Tensor)) {
|
||||
return "input must be a tensor";
|
||||
}
|
||||
try {
|
||||
|
@ -99243,7 +99245,7 @@ lBhEMohlFerLlBjEMohMVTEARDKCITsAk2AEgAAAkAAAAAAAAAAAAAAAAAAAAAAAASAAAAAAAAD/
|
|||
});
|
||||
_calculateFaceAngle.set(this, (mesh) => {
|
||||
if (!mesh || mesh.length < 300)
|
||||
return {};
|
||||
return {roll: null, yaw: null, pitch: null};
|
||||
const radians = (a12, a22, b1, b2) => Math.atan2(b2 - a22, b1 - a12);
|
||||
const degrees = (theta) => Math.abs(theta * 180 / Math.PI % 360);
|
||||
const angle = {
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -4376,7 +4376,9 @@ function enhance(input) {
|
|||
const image13 = tf8.tidy(() => {
|
||||
const box3 = [[0.05, 0.15, 0.85, 0.85]];
|
||||
const tensor = input.image || input.tensor;
|
||||
const crop = tensor.shape.length === 3 ? tf8.image.cropAndResize(tensor.expandDims(0), box3, [0], [model4.inputs[0].shape[2], model4.inputs[0].shape[1]]) : tf8.image.cropAndResize(tensor, box3, [0], [model4.inputs[0].shape[2], model4.inputs[0].shape[1]]);
|
||||
if (!(tensor instanceof tf8.Tensor))
|
||||
return null;
|
||||
const crop = tensor.shape.length === 3 ? tf8.image.cropAndResize(tf8.expandDims(tensor, 0), box3, [0], [model4.inputs[0].shape[2], model4.inputs[0].shape[1]]) : tf8.image.cropAndResize(tensor, box3, [0], [model4.inputs[0].shape[2], model4.inputs[0].shape[1]]);
|
||||
const rgb3 = [0.2989, 0.587, 0.114];
|
||||
const [red, green, blue] = tf8.split(crop, 3, 3);
|
||||
const redNorm = tf8.mul(red, rgb3[0]);
|
||||
|
@ -25331,7 +25333,7 @@ var Human = class {
|
|||
return null;
|
||||
if (!input)
|
||||
return "input is not defined";
|
||||
if (this.tf.ENV.flags.IS_NODE && !(input instanceof this.tf.Tensor)) {
|
||||
if (this.tf.ENV.flags.IS_NODE && !(input instanceof tf18.Tensor)) {
|
||||
return "input must be a tensor";
|
||||
}
|
||||
try {
|
||||
|
@ -25383,7 +25385,7 @@ var Human = class {
|
|||
});
|
||||
_calculateFaceAngle.set(this, (mesh) => {
|
||||
if (!mesh || mesh.length < 300)
|
||||
return {};
|
||||
return {roll: null, yaw: null, pitch: null};
|
||||
const radians = (a1, a2, b1, b2) => Math.atan2(b2 - a2, b1 - a1);
|
||||
const degrees = (theta) => Math.abs(theta * 180 / Math.PI % 360);
|
||||
const angle = {
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -4376,7 +4376,9 @@ function enhance(input) {
|
|||
const image13 = tf8.tidy(() => {
|
||||
const box3 = [[0.05, 0.15, 0.85, 0.85]];
|
||||
const tensor = input.image || input.tensor;
|
||||
const crop = tensor.shape.length === 3 ? tf8.image.cropAndResize(tensor.expandDims(0), box3, [0], [model4.inputs[0].shape[2], model4.inputs[0].shape[1]]) : tf8.image.cropAndResize(tensor, box3, [0], [model4.inputs[0].shape[2], model4.inputs[0].shape[1]]);
|
||||
if (!(tensor instanceof tf8.Tensor))
|
||||
return null;
|
||||
const crop = tensor.shape.length === 3 ? tf8.image.cropAndResize(tf8.expandDims(tensor, 0), box3, [0], [model4.inputs[0].shape[2], model4.inputs[0].shape[1]]) : tf8.image.cropAndResize(tensor, box3, [0], [model4.inputs[0].shape[2], model4.inputs[0].shape[1]]);
|
||||
const rgb3 = [0.2989, 0.587, 0.114];
|
||||
const [red, green, blue] = tf8.split(crop, 3, 3);
|
||||
const redNorm = tf8.mul(red, rgb3[0]);
|
||||
|
@ -25331,7 +25333,7 @@ var Human = class {
|
|||
return null;
|
||||
if (!input)
|
||||
return "input is not defined";
|
||||
if (this.tf.ENV.flags.IS_NODE && !(input instanceof this.tf.Tensor)) {
|
||||
if (this.tf.ENV.flags.IS_NODE && !(input instanceof tf18.Tensor)) {
|
||||
return "input must be a tensor";
|
||||
}
|
||||
try {
|
||||
|
@ -25383,7 +25385,7 @@ var Human = class {
|
|||
});
|
||||
_calculateFaceAngle.set(this, (mesh) => {
|
||||
if (!mesh || mesh.length < 300)
|
||||
return {};
|
||||
return {roll: null, yaw: null, pitch: null};
|
||||
const radians = (a1, a2, b1, b2) => Math.atan2(b2 - a2, b1 - a1);
|
||||
const degrees = (theta) => Math.abs(theta * 180 / Math.PI % 360);
|
||||
const angle = {
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -28,7 +28,6 @@ export class MediaPipeFaceMesh {
|
|||
if (mesh && mesh.length > 0) {
|
||||
for (const key of Object.keys(coords.MESH_ANNOTATIONS)) annotations[key] = coords.MESH_ANNOTATIONS[key].map((index) => mesh[index]);
|
||||
}
|
||||
// const boxRaw = (prediction.box) ? { topLeft: prediction.box.startPoint, bottomRight: prediction.box.endPoint } : null;
|
||||
const box = prediction.box ? [
|
||||
Math.max(0, prediction.box.startPoint[0]),
|
||||
Math.max(0, prediction.box.startPoint[1]),
|
||||
|
|
|
@ -30,12 +30,12 @@ export function enhance(input) {
|
|||
// input received from detector is already normalized to 0..1
|
||||
// input is also assumed to be straightened
|
||||
// const data = tf.image.resizeBilinear(input, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false); // just resize to fit the embedding model
|
||||
|
||||
// do a tight crop of image and resize it to fit the model
|
||||
const box = [[0.05, 0.15, 0.85, 0.85]]; // empyrical values for top, left, bottom, right
|
||||
const tensor = input.image || input.tensor;
|
||||
if (!(tensor instanceof tf.Tensor)) return null;
|
||||
const crop = (tensor.shape.length === 3)
|
||||
? tf.image.cropAndResize(tensor.expandDims(0), box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]) // add batch dimension if missing
|
||||
? tf.image.cropAndResize(tf.expandDims(tensor, 0), box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]) // add batch dimension if missing
|
||||
: tf.image.cropAndResize(tensor, box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]);
|
||||
|
||||
// convert to black&white to avoid colorization impact
|
||||
|
|
66
src/human.ts
66
src/human.ts
|
@ -43,7 +43,7 @@ class Human {
|
|||
version: string;
|
||||
config: typeof config.default;
|
||||
state: string;
|
||||
image: { tensor: any, canvas: OffscreenCanvas | HTMLCanvasElement };
|
||||
image: { tensor: typeof tf.Tensor, canvas: OffscreenCanvas | HTMLCanvasElement };
|
||||
// classes
|
||||
tf: typeof tf;
|
||||
draw: typeof draw;
|
||||
|
@ -102,7 +102,7 @@ class Human {
|
|||
};
|
||||
// export access to image processing
|
||||
// @ts-ignore
|
||||
this.image = (input: any) => image.process(input, this.config);
|
||||
this.image = (input: tf.Tensor | ImageData | HTMLCanvasElement | HTMLVideoElement | OffscreenCanvas) => image.process(input, this.config);
|
||||
// export raw access to underlying models
|
||||
this.classes = {
|
||||
facemesh,
|
||||
|
@ -132,10 +132,10 @@ class Human {
|
|||
}
|
||||
|
||||
// quick sanity check on inputs
|
||||
#sanity = (input) => {
|
||||
#sanity = (input): null | string => {
|
||||
if (!this.#checkSanity) return null;
|
||||
if (!input) return 'input is not defined';
|
||||
if (this.tf.ENV.flags.IS_NODE && !(input instanceof this.tf.Tensor)) {
|
||||
if (this.tf.ENV.flags.IS_NODE && !(input instanceof tf.Tensor)) {
|
||||
return 'input must be a tensor';
|
||||
}
|
||||
try {
|
||||
|
@ -151,7 +151,7 @@ class Human {
|
|||
return 0;
|
||||
}
|
||||
|
||||
enhance(input: any): any {
|
||||
enhance(input: typeof tf.Tensor): typeof tf.Tensor | null {
|
||||
if (this.config.face.embedding.enabled) return embedding.enhance(input);
|
||||
return null;
|
||||
}
|
||||
|
@ -267,8 +267,8 @@ class Human {
|
|||
}
|
||||
}
|
||||
|
||||
#calculateFaceAngle = (mesh) => {
|
||||
if (!mesh || mesh.length < 300) return {};
|
||||
#calculateFaceAngle = (mesh): { roll: number | null, yaw: number | null, pitch: number | null } => {
|
||||
if (!mesh || mesh.length < 300) return { roll: null, yaw: null, pitch: null };
|
||||
const radians = (a1, a2, b1, b2) => Math.atan2(b2 - a2, b1 - a1);
|
||||
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
|
||||
const degrees = (theta) => Math.abs(((theta * 180) / Math.PI) % 360);
|
||||
|
@ -285,7 +285,7 @@ class Human {
|
|||
return angle;
|
||||
}
|
||||
|
||||
#detectFace = async (input) => {
|
||||
#detectFace = async (input): Promise<any> => {
|
||||
// run facemesh, includes blazeface and iris
|
||||
// eslint-disable-next-line no-async-promise-executor
|
||||
let timeStamp;
|
||||
|
@ -297,10 +297,10 @@ class Human {
|
|||
confidence: number,
|
||||
boxConfidence: number,
|
||||
faceConfidence: number,
|
||||
box: any,
|
||||
mesh:any,
|
||||
meshRaw: any,
|
||||
boxRaw: any,
|
||||
box: [number, number, number, number],
|
||||
mesh: Array<[number, number, number]>
|
||||
meshRaw: Array<[number, number, number]>
|
||||
boxRaw: [number, number, number, number],
|
||||
annotations: any,
|
||||
age: number,
|
||||
gender: string,
|
||||
|
@ -308,7 +308,7 @@ class Human {
|
|||
emotion: string,
|
||||
embedding: any,
|
||||
iris: number,
|
||||
angle: any,
|
||||
angle: { roll: number | null, yaw: number | null, pitch: number | null },
|
||||
}> = [];
|
||||
|
||||
this.state = 'run:face';
|
||||
|
@ -418,7 +418,45 @@ class Human {
|
|||
}
|
||||
|
||||
// main detect function
|
||||
async detect(input, userConfig = {}): Promise<{ face: Array<{ any }>, body: Array<{ any }>, hand: Array<{ any }>, gesture: Array<{ any }>, performance: object, canvas: OffscreenCanvas | HTMLCanvasElement } | { error: string }> {
|
||||
async detect(input, userConfig = {}): Promise<{
|
||||
face: Array<{
|
||||
confidence: number,
|
||||
boxConfidence: number,
|
||||
faceConfidence: number,
|
||||
box: [number, number, number, number],
|
||||
mesh: Array<[number, number, number]>
|
||||
meshRaw: Array<[number, number, number]>
|
||||
boxRaw: [number, number, number, number],
|
||||
annotations: any,
|
||||
age: number,
|
||||
gender: string,
|
||||
genderConfidence: number,
|
||||
emotion: string,
|
||||
embedding: any,
|
||||
iris: number,
|
||||
angle: { roll: number | null, yaw: number | null, pitch: number | null },
|
||||
}>,
|
||||
body: Array<{
|
||||
id: number,
|
||||
part: string,
|
||||
position: { x: number, y: number, z: number },
|
||||
score: number,
|
||||
presence: number }>,
|
||||
hand: Array<{
|
||||
confidence: number,
|
||||
box: any,
|
||||
landmarks: any,
|
||||
annotations: any,
|
||||
}>,
|
||||
gesture: Array<{
|
||||
part: string,
|
||||
gesture: string,
|
||||
}>,
|
||||
performance: { any },
|
||||
canvas: OffscreenCanvas | HTMLCanvasElement
|
||||
} | { error: string }> {
|
||||
// end definition
|
||||
|
||||
// detection happens inside a promise
|
||||
return new Promise(async (resolve) => {
|
||||
this.state = 'config';
|
||||
|
|
|
@ -14,7 +14,7 @@ let fx = null;
|
|||
// process input image and return tensor
|
||||
// input can be tensor, imagedata, htmlimageelement, htmlvideoelement
|
||||
// input is resized and run through imagefx filter
|
||||
export function process(input, config): { tensor, canvas } {
|
||||
export function process(input, config): { tensor: tf.Tensor, canvas: OffscreenCanvas | HTMLCanvasElement } {
|
||||
let tensor;
|
||||
if (input instanceof tf.Tensor) {
|
||||
tensor = tf.clone(input);
|
||||
|
|
|
@ -14,7 +14,7 @@ declare class Human {
|
|||
config: typeof config.default;
|
||||
state: string;
|
||||
image: {
|
||||
tensor: any;
|
||||
tensor: typeof tf.Tensor;
|
||||
canvas: OffscreenCanvas | HTMLCanvasElement;
|
||||
};
|
||||
tf: typeof tf;
|
||||
|
@ -53,22 +53,54 @@ declare class Human {
|
|||
largestKernelOps: any;
|
||||
} | {};
|
||||
simmilarity(embedding1: Array<number>, embedding2: Array<number>): number;
|
||||
enhance(input: any): any;
|
||||
enhance(input: typeof tf.Tensor): typeof tf.Tensor | null;
|
||||
load(userConfig?: null): Promise<void>;
|
||||
detect(input: any, userConfig?: {}): Promise<{
|
||||
face: Array<{
|
||||
any: any;
|
||||
confidence: number;
|
||||
boxConfidence: number;
|
||||
faceConfidence: number;
|
||||
box: [number, number, number, number];
|
||||
mesh: Array<[number, number, number]>;
|
||||
meshRaw: Array<[number, number, number]>;
|
||||
boxRaw: [number, number, number, number];
|
||||
annotations: any;
|
||||
age: number;
|
||||
gender: string;
|
||||
genderConfidence: number;
|
||||
emotion: string;
|
||||
embedding: any;
|
||||
iris: number;
|
||||
angle: {
|
||||
roll: number | null;
|
||||
yaw: number | null;
|
||||
pitch: number | null;
|
||||
};
|
||||
}>;
|
||||
body: Array<{
|
||||
any: any;
|
||||
id: number;
|
||||
part: string;
|
||||
position: {
|
||||
x: number;
|
||||
y: number;
|
||||
z: number;
|
||||
};
|
||||
score: number;
|
||||
presence: number;
|
||||
}>;
|
||||
hand: Array<{
|
||||
any: any;
|
||||
confidence: number;
|
||||
box: any;
|
||||
landmarks: any;
|
||||
annotations: any;
|
||||
}>;
|
||||
gesture: Array<{
|
||||
any: any;
|
||||
part: string;
|
||||
gesture: string;
|
||||
}>;
|
||||
performance: object;
|
||||
performance: {
|
||||
any: any;
|
||||
};
|
||||
canvas: OffscreenCanvas | HTMLCanvasElement;
|
||||
} | {
|
||||
error: string;
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import * as tf from '../dist/tfjs.esm.js';
|
||||
export declare function process(input: any, config: any): {
|
||||
tensor: any;
|
||||
canvas: any;
|
||||
tensor: tf.Tensor;
|
||||
canvas: OffscreenCanvas | HTMLCanvasElement;
|
||||
};
|
||||
|
|
Loading…
Reference in New Issue