diff --git a/demo/index.js b/demo/index.js index 3a8be7f0..215d5b9f 100644 --- a/demo/index.js +++ b/demo/index.js @@ -31,10 +31,10 @@ import jsonView from './helpers/jsonview.js'; let human; let userConfig = { + /* warmup: 'none', backend: 'humangl', debug: true, - /* wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.9.0/dist/', async: false, cacheSensitivity: 0.75, @@ -51,8 +51,8 @@ let userConfig = { }, object: { enabled: false }, gesture: { enabled: true }, - hand: { enabled: false }, - // hand: { enabled: true, maxDetected: 1, minConfidence: 0.5, detector: { modelPath: 'handtrack.json' } }, + // hand: { enabled: false }, + hand: { enabled: true, maxDetected: 1, minConfidence: 0.5, detector: { modelPath: 'handtrack.json' } }, body: { enabled: false }, // body: { enabled: true, modelPath: 'movenet-multipose.json' }, // body: { enabled: true, modelPath: 'posenet.json' }, diff --git a/src/face.ts b/src/face.ts index 94eeb46d..f1835940 100644 --- a/src/face.ts +++ b/src/face.ts @@ -226,10 +226,11 @@ export const detectFace = async (parent /* instance of human */, input: Tensor): delete faces[i].annotations.leftEyeIris; delete faces[i].annotations.rightEyeIris; } - const irisSize = (faces[i].annotations?.leftEyeIris && faces[i].annotations?.rightEyeIris) - /* note: average human iris size is 11.7mm */ + const irisSize = (faces[i].annotations && faces[i].annotations.leftEyeIris && faces[i].annotations.rightEyeIris + && (faces[i].annotations.leftEyeIris.length > 0) && (faces[i].annotations.rightEyeIris.length > 0) + && (faces[i].annotations.leftEyeIris[0] !== null) && (faces[i].annotations.rightEyeIris[0] !== null)) ? Math.max(Math.abs(faces[i].annotations.leftEyeIris[3][0] - faces[i].annotations.leftEyeIris[1][0]), Math.abs(faces[i].annotations.rightEyeIris[4][1] - faces[i].annotations.rightEyeIris[2][1])) / input.shape[2] - : 0; + : 0; // note: average human iris size is 11.7mm // optionally return tensor const tensor = parent.config.face.detector.return ? tf.squeeze(faces[i].tensor) : null; diff --git a/src/handtrack/handtrack.ts b/src/handtrack/handtrack.ts index cf82da5d..e847dad3 100644 --- a/src/handtrack/handtrack.ts +++ b/src/handtrack/handtrack.ts @@ -9,9 +9,11 @@ import type { GraphModel, Tensor } from '../tfjs/types'; import type { Config } from '../config'; import { env } from '../env'; import * as fingerPose from '../fingerpose/fingerpose'; +import { fakeOps } from '../tfjs/backend'; const models: [GraphModel | null, GraphModel | null] = [null, null]; const modelOutputNodes = ['StatefulPartitionedCall/Postprocessor/Slice', 'StatefulPartitionedCall/Postprocessor/ExpandDims_1']; + const inputSize = [[0, 0], [0, 0]]; const classes = [ @@ -55,12 +57,12 @@ const fingerMap = { palm: [0], }; -export async function load(config: Config): Promise<[GraphModel, GraphModel]> { - if (env.initial) { - models[0] = null; - models[1] = null; - } +export async function loadDetect(config: Config): Promise { + if (env.initial) models[0] = null; if (!models[0]) { + // handtrack model has some kernel ops defined in model but those are never referenced and non-existent in tfjs + // ideally need to prune the model itself + fakeOps(['tensorlistreserve', 'enter', 'tensorlistfromtensor', 'merge', 'loopcond', 'switch', 'exit', 'tensorliststack', 'nextiteration', 'tensorlistsetitem', 'tensorlistgetitem', 'reciprocal', 'shape', 'split', 'where'], config); models[0] = await tf.loadGraphModel(join(config.modelBasePath, config.hand.detector?.modelPath || '')) as unknown as GraphModel; const inputs = Object.values(models[0].modelSignature['inputs']); inputSize[0][0] = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[1].size) : 0; @@ -68,6 +70,11 @@ export async function load(config: Config): Promise<[GraphModel, GraphModel]> { if (!models[0] || !models[0]['modelUrl']) log('load model failed:', config.object.modelPath); else if (config.debug) log('load model:', models[0]['modelUrl']); } else if (config.debug) log('cached model:', models[0]['modelUrl']); + return models[0]; +} + +export async function loadSkeleton(config: Config): Promise { + if (env.initial) models[1] = null; if (!models[1]) { models[1] = await tf.loadGraphModel(join(config.modelBasePath, config.hand.skeleton?.modelPath || '')) as unknown as GraphModel; const inputs = Object.values(models[1].modelSignature['inputs']); @@ -76,7 +83,13 @@ export async function load(config: Config): Promise<[GraphModel, GraphModel]> { if (!models[1] || !models[1]['modelUrl']) log('load model failed:', config.object.modelPath); else if (config.debug) log('load model:', models[1]['modelUrl']); } else if (config.debug) log('cached model:', models[1]['modelUrl']); - return models as [GraphModel, GraphModel]; + return models[1]; +} + +export async function load(config: Config): Promise<[GraphModel | null, GraphModel | null]> { + if (!models[0]) await loadDetect(config); + if (!models[1]) await loadSkeleton(config); + return models; } async function detectHands(input: Tensor, config: Config): Promise { @@ -217,4 +230,20 @@ export async function predict(input: Tensor, config: Config): Promise - Original: - Writeup: +- Convert: + tensorflowjs_converter --input_format=tf_frozen_model --output_format=tfjs_graph_model \ + --output_node_names='num_detections,detection_boxes,detection_scores,detection_classes' --saved_model_tags=serve --quantize_uint8=* \ + --strip_debug_ops=* --weight_shard_size_bytes=10000000000 --control_flow_v2=true frozen_inference_graph.pb graph + +webmodel/efficientdet512d0/base/model.json +webmodel/centernet512fpn/base/model.json +https://github.com/victordibia/handtrack.js/commit/70d5d9c98e69688414cddaad044bd8730bc982d1#diff-c40e819be4ec1dc29f26913f5cdeb05202261b3a1725ab259cb235ea0f0fc5d6 + +git rev-list HEAD -- webmodel/* + 9ba7220fb31e9168aa248500cc70800566f4c719 + 70d5d9c98e69688414cddaad044bd8730bc982d1 + +git checkout 9ba7220fb31e9168aa248500cc70800566f4c719^ -- webmodel +git checkout 70d5d9c98e69688414cddaad044bd8730bc982d1^ -- webmodel + */ diff --git a/src/human.ts b/src/human.ts index 8718444d..c9abc6f1 100644 --- a/src/human.ts +++ b/src/human.ts @@ -29,7 +29,7 @@ import * as backend from './tfjs/backend'; import * as humangl from './tfjs/humangl'; import * as app from '../package.json'; import * as warmups from './warmup'; -import type { Tensor, GraphModel } from './tfjs/types'; +import type { Tensor } from './tfjs/types'; import type { DrawOptions } from './draw'; // export types @@ -37,6 +37,7 @@ export * from './config'; export * from './result'; export type { DrawOptions } from './draw'; export { env, Env } from './env'; +export { Models } from './models'; /** Defines all possible input types for **Human** detection * @typedef Input Type @@ -119,23 +120,7 @@ export class Human { /** Currently loaded models * @internal */ - models: { - face: [unknown, GraphModel | null, GraphModel | null] | null, - posenet: GraphModel | null, - blazepose: GraphModel | null, - efficientpose: GraphModel | null, - movenet: GraphModel | null, - handpose: [GraphModel | null, GraphModel | null] | null, - handtrack: [GraphModel | null, GraphModel | null] | null, - age: GraphModel | null, - gender: GraphModel | null, - emotion: GraphModel | null, - embedding: GraphModel | null, - nanodet: GraphModel | null, - centernet: GraphModel | null, - faceres: GraphModel | null, - segmentation: GraphModel | null, - }; + models: models.Models; /** Container for events dispatched by Human * @@ -187,23 +172,7 @@ export class Human { this.performance = { backend: 0, load: 0, image: 0, frames: 0, cached: 0, changed: 0, total: 0, draw: 0 }; this.events = new EventTarget(); // object that contains all initialized models - this.models = { - face: null, // array of models - handpose: null, // array of models - handtrack: null, // array of models - posenet: null, - blazepose: null, - efficientpose: null, - movenet: null, - age: null, - gender: null, - emotion: null, - embedding: null, - nanodet: null, - centernet: null, - faceres: null, - segmentation: null, - }; + this.models = new models.Models(); // reexport draw methods this.draw = { options: draw.options as DrawOptions, diff --git a/src/models.ts b/src/models.ts index ace8776f..9e652e75 100644 --- a/src/models.ts +++ b/src/models.ts @@ -12,77 +12,72 @@ import * as movenet from './movenet/movenet'; import * as nanodet from './object/nanodet'; import * as centernet from './object/centernet'; import * as segmentation from './segmentation/segmentation'; +import type { Human } from './human'; import { env } from './env'; -// import * as agegenderrace from './gear/agegenderrace'; +import * as agegenderrace from './gear/agegenderrace'; -export function reset(instance) { +/** Instances of all possible TFJS Graph Models used by Human + * - loaded as needed based on configuration + * - initialized explictly with `human.load()` method + * - initialized implicity on first call to `human.detect()` + * - each model can be `null` if not loaded, instance of `GraphModel` if loaded or `Promise` if loading + */ +export class Models { + age: null | GraphModel | Promise = null; + agegenderrace: null | GraphModel | Promise = null; + blazepose: null | GraphModel | Promise = null; + centernet: null | GraphModel | Promise = null; + efficientpose: null | GraphModel | Promise = null; + embedding: null | GraphModel | Promise = null; + emotion: null | GraphModel | Promise = null; + facedetect: null | GraphModel | Promise = null; + faceiris: null | GraphModel | Promise = null; + facemesh: null | GraphModel | Promise = null; + faceres: null | GraphModel | Promise = null; + gender: null | GraphModel | Promise = null; + handpose: null | GraphModel | Promise = null; + handskeleton: null | GraphModel | Promise = null; + handtrack: null | GraphModel | Promise = null; + movenet: null | GraphModel | Promise = null; + nanodet: null | GraphModel | Promise = null; + posenet: null | GraphModel | Promise = null; + segmentation: null | GraphModel | Promise = null; +} + +export function reset(instance: Human) { // if (instance.config.debug) log('resetting loaded models'); - instance.models = { - face: null, // array of models - handpose: null, // array of models - handtrack: null, // array of models - posenet: null, - blazepose: null, - efficientpose: null, - movenet: null, - age: null, - gender: null, - emotion: null, - embedding: null, - nanodet: null, - centernet: null, - faceres: null, - segmentation: null, - }; + for (const model of Object.keys(instance.models)) instance.models[model] = null; } /** Load method preloads all instance.configured models on-demand */ -export async function load(instance) { +export async function load(instance: Human) { if (env.initial) reset(instance); - if (instance.config.async) { // load models concurrently - [ - instance.models.face, - instance.models.emotion, - instance.models.handpose, - instance.models.handtrack, - instance.models.posenet, - instance.models.blazepose, - instance.models.efficientpose, - instance.models.movenet, - instance.models.nanodet, - instance.models.centernet, - instance.models.faceres, - instance.models.segmentation, - // instance.models.agegenderrace, - ] = await Promise.all([ - instance.models.face || (instance.config.face.enabled ? facemesh.load(instance.config) : null), - instance.models.emotion || ((instance.config.face.enabled && instance.config.face.emotion.enabled) ? emotion.load(instance.config) : null), - instance.models.handpose || (instance.config.hand.enabled && instance.config.hand.detector.modelPath.includes('handdetect') ? handpose.load(instance.config) : null), - instance.models.handtrack || (instance.config.hand.enabled && instance.config.hand.detector.modelPath.includes('handtrack') ? handtrack.load(instance.config) : null), - instance.models.posenet || (instance.config.body.enabled && instance.config.body.modelPath.includes('posenet') ? posenet.load(instance.config) : null), - instance.models.blazepose || (instance.config.body.enabled && instance.config.body.modelPath.includes('blazepose') ? blazepose.load(instance.config) : null), - instance.models.efficientpose || (instance.config.body.enabled && instance.config.body.modelPath.includes('efficientpose') ? efficientpose.load(instance.config) : null), - instance.models.movenet || (instance.config.body.enabled && instance.config.body.modelPath.includes('movenet') ? movenet.load(instance.config) : null), - instance.models.nanodet || (instance.config.object.enabled && instance.config.object.modelPath.includes('nanodet') ? nanodet.load(instance.config) : null), - instance.models.centernet || (instance.config.object.enabled && instance.config.object.modelPath.includes('centernet') ? centernet.load(instance.config) : null), - instance.models.faceres || ((instance.config.face.enabled && instance.config.face.description.enabled) ? faceres.load(instance.config) : null), - instance.models.segmentation || (instance.config.segmentation.enabled ? segmentation.load(instance.config) : null), - // instance.models.agegenderrace || ((instance.config.face.enabled && instance.config.face.agegenderrace.enabled) ? agegenderrace.load(instance.config) : null), - ]); - } else { // load models sequentially - if (instance.config.face.enabled && !instance.models.face) instance.models.face = await facemesh.load(instance.config); - if (instance.config.face.enabled && instance.config.face.emotion.enabled && !instance.models.emotion) instance.models.emotion = await emotion.load(instance.config); - if (instance.config.hand.enabled && !instance.models.handpose && instance.config.hand.detector.modelPath.includes('handdetect')) instance.models.handpose = await handpose.load(instance.config); - if (instance.config.hand.enabled && !instance.models.handtrack && instance.config.hand.detector.modelPath.includes('handtrack')) instance.models.handtrack = await handtrack.load(instance.config); - if (instance.config.body.enabled && !instance.models.posenet && instance.config.body.modelPath.includes('posenet')) instance.models.posenet = await posenet.load(instance.config); - if (instance.config.body.enabled && !instance.models.blazepose && instance.config.body.modelPath.includes('blazepose')) instance.models.blazepose = await blazepose.load(instance.config); - if (instance.config.body.enabled && !instance.models.efficientpose && instance.config.body.modelPath.includes('efficientpose')) instance.models.efficientpose = await blazepose.load(instance.config); - if (instance.config.body.enabled && !instance.models.movenet && instance.config.body.modelPath.includes('movenet')) instance.models.movenet = await movenet.load(instance.config); - if (instance.config.object.enabled && !instance.models.nanodet && instance.config.object.modelPath.includes('nanodet')) instance.models.nanodet = await nanodet.load(instance.config); - if (instance.config.object.enabled && !instance.models.centernet && instance.config.object.modelPath.includes('centernet')) instance.models.centernet = await centernet.load(instance.config); - if (instance.config.face.enabled && instance.config.face.description.enabled && !instance.models.faceres) instance.models.faceres = await faceres.load(instance.config); - if (instance.config.segmentation.enabled && !instance.models.segmentation) instance.models.segmentation = await segmentation.load(instance.config); - // if (instance.config.face.enabled && instance.config.face.agegenderrace.enabled && !instance.models.agegenderrace) instance.models.agegenderrace = await agegenderrace.load(instance.config); + if (instance.config.face.enabled) { // face model is a combo that must be loaded as a whole + if (!instance.models.facedetect) [instance.models.facedetect, instance.models.facemesh, instance.models.faceiris] = await facemesh.load(instance.config); + if (instance.config.face.mesh?.enabled && !instance.models.facemesh) [instance.models.facedetect, instance.models.facemesh, instance.models.faceiris] = await facemesh.load(instance.config); + if (instance.config.face.iris?.enabled && !instance.models.faceiris) [instance.models.facedetect, instance.models.facemesh, instance.models.faceiris] = await facemesh.load(instance.config); + } + if (instance.config.hand.enabled) { // handpose model is a combo that must be loaded as a whole + if (!instance.models.handpose && instance.config.hand.detector?.modelPath?.includes('handdetect')) [instance.models.handpose, instance.models.handskeleton] = await handpose.load(instance.config); + if (!instance.models.handskeleton && instance.config.hand.landmarks && instance.config.hand.detector?.modelPath?.includes('handdetect')) [instance.models.handpose, instance.models.handskeleton] = await handpose.load(instance.config); + } + if (instance.config.hand.enabled && !instance.models.handtrack && instance.config.hand.detector?.modelPath?.includes('handtrack')) instance.models.handtrack = handtrack.loadDetect(instance.config); + if (instance.config.hand.enabled && instance.config.hand.landmarks && !instance.models.handskeleton && instance.config.hand.detector?.modelPath?.includes('handtrack')) instance.models.handskeleton = handtrack.loadSkeleton(instance.config); + if (instance.config.body.enabled && !instance.models.posenet && instance.config.body?.modelPath?.includes('posenet')) instance.models.posenet = posenet.load(instance.config); + if (instance.config.body.enabled && !instance.models.efficientpose && instance.config.body?.modelPath?.includes('efficientpose')) instance.models.efficientpose = efficientpose.load(instance.config); + if (instance.config.body.enabled && !instance.models.blazepose && instance.config.body?.modelPath?.includes('blazepose')) instance.models.blazepose = blazepose.load(instance.config); + if (instance.config.body.enabled && !instance.models.efficientpose && instance.config.body?.modelPath?.includes('efficientpose')) instance.models.efficientpose = blazepose.load(instance.config); + if (instance.config.body.enabled && !instance.models.movenet && instance.config.body?.modelPath?.includes('movenet')) instance.models.movenet = movenet.load(instance.config); + if (instance.config.object.enabled && !instance.models.nanodet && instance.config.object?.modelPath?.includes('nanodet')) instance.models.nanodet = nanodet.load(instance.config); + if (instance.config.object.enabled && !instance.models.centernet && instance.config.object?.modelPath?.includes('centernet')) instance.models.centernet = centernet.load(instance.config); + if (instance.config.face.enabled && instance.config.face.emotion?.enabled && !instance.models.emotion) instance.models.emotion = emotion.load(instance.config); + if (instance.config.face.enabled && instance.config.face.description?.enabled && !instance.models.faceres) instance.models.faceres = faceres.load(instance.config); + if (instance.config.segmentation.enabled && !instance.models.segmentation) instance.models.segmentation = segmentation.load(instance.config); + if (instance.config.face.enabled && instance.config.face['agegenderrace']?.enabled && !instance.models.agegenderrace) instance.models.agegenderrace = agegenderrace.load(instance.config); + + // models are loaded in parallel asynchronously so lets wait until they are actually loaded + for await (const model of Object.keys(instance.models)) { + if (instance.models[model] && typeof instance.models[model] !== 'undefined') instance.models[model] = await instance.models[model]; } } diff --git a/src/object/centernet.ts b/src/object/centernet.ts index 0d70ae17..f12c0631 100644 --- a/src/object/centernet.ts +++ b/src/object/centernet.ts @@ -9,6 +9,7 @@ import type { ObjectResult } from '../result'; import type { GraphModel, Tensor } from '../tfjs/types'; import type { Config } from '../config'; import { env } from '../env'; +import { fakeOps } from '../tfjs/backend'; let model: GraphModel | null; let inputSize = 0; @@ -18,6 +19,7 @@ let skipped = Number.MAX_SAFE_INTEGER; export async function load(config: Config): Promise { if (env.initial) model = null; if (!model) { + fakeOps(['floormod'], config); model = await tf.loadGraphModel(join(config.modelBasePath, config.object.modelPath || '')) as unknown as GraphModel; const inputs = Object.values(model.modelSignature['inputs']); inputSize = Array.isArray(inputs) ? parseInt(inputs[0].tensorShape.dim[2].size) : 0; diff --git a/src/tfjs/backend.ts b/src/tfjs/backend.ts index d818541e..eb1d8102 100644 --- a/src/tfjs/backend.ts +++ b/src/tfjs/backend.ts @@ -96,3 +96,19 @@ export async function check(instance, force = false) { } return true; } + +// register fake missing tfjs ops +export function fakeOps(kernelNames: Array, config) { + // if (config.debug) log('registerKernel:', kernelNames); + for (const kernelName of kernelNames) { + const kernelConfig = { + kernelName, + backendName: config.backend, + kernelFunc: () => { if (config.debug) log('kernelFunc', kernelName, config.backend); }, + // setupFunc: () => { if (config.debug) log('kernelFunc', kernelName, config.backend); }, + // disposeFunc: () => { if (config.debug) log('kernelFunc', kernelName, config.backend); }, + }; + tf.registerKernel(kernelConfig); + } + env.env.kernels = tf.getKernelsForBackend(tf.getBackend()).map((kernel) => kernel.kernelName.toLowerCase()); // re-scan registered ops +} diff --git a/test/test-main.js b/test/test-main.js index 0792e592..62e93925 100644 --- a/test/test-main.js +++ b/test/test-main.js @@ -3,7 +3,6 @@ const process = require('process'); const canvasJS = require('canvas'); let fetch; // fetch is dynamically imported later -let tensors = 0; let config; const log = (status, ...data) => { @@ -73,10 +72,9 @@ async function testInstance(human) { log('info', 'tfjs version:', human.tf.version.tfjs); await human.load(); - tensors = human.tf.engine().state.numTensors; if (config.backend === human.tf.getBackend()) log('state', 'passed: set backend:', config.backend); else log('error', 'failed: set backend:', config.backend); - log('state', 'tensors', tensors); + log('state', 'tensors', human.tf.memory().numTensors); if (human.models) { log('state', 'passed: load models'); @@ -107,9 +105,9 @@ async function testWarmup(human, title) { return warmup; } -async function testDetect(human, input, title) { +async function testDetect(human, input, title, checkLeak = true) { await human.load(config); - tensors = human.tf.engine().state.numTensors; + const tensors = human.tf.engine().state.numTensors; const image = input ? await getImage(human, input) : human.tf.randomNormal([1, 1024, 1024, 3]); if (!image) { log('error', 'failed: detect: input is null'); @@ -130,6 +128,11 @@ async function testDetect(human, input, title) { } else { log('error', 'failed: detect', input || 'random', title); } + // check tensor leaks + if (checkLeak) { + const leak = human.tf.engine().state.numTensors - tensors; + if (leak !== 0) log('error', 'failed: memory leak', leak); + } return detect; } const evt = { image: 0, detect: 0, warmup: 0 }; @@ -170,8 +173,8 @@ async function test(Human, inputConfig) { await human.load(); const models = Object.keys(human.models).map((model) => ({ name: model, loaded: (human.models[model] !== null) })); const loaded = models.filter((model) => model.loaded); - if (models.length === 15 && loaded.length === 7) log('state', 'passed: models loaded', models.length, loaded.length); - else log('error', 'failed: models loaded', models.length, loaded.length); + if (models.length === 19 && loaded.length === 10) log('state', 'passed: models loaded', models); + else log('error', 'failed: models loaded', models); // test warmup sequences await testInstance(human); @@ -315,15 +318,15 @@ async function test(Human, inputConfig) { // test async multiple instances log('info', 'test: concurrent'); await Promise.all([ - testDetect(human, 'samples/ai-face.jpg', 'default'), - testDetect(first, 'samples/ai-face.jpg', 'default'), - testDetect(second, 'samples/ai-face.jpg', 'default'), - testDetect(human, 'samples/ai-body.jpg', 'default'), - testDetect(first, 'samples/ai-body.jpg', 'default'), - testDetect(second, 'samples/ai-body.jpg', 'default'), - testDetect(human, 'samples/ai-upper.jpg', 'default'), - testDetect(first, 'samples/ai-upper.jpg', 'default'), - testDetect(second, 'samples/ai-upper.jpg', 'default'), + testDetect(human, 'samples/ai-face.jpg', 'default', false), + testDetect(first, 'samples/ai-face.jpg', 'default', false), + testDetect(second, 'samples/ai-face.jpg', 'default', false), + testDetect(human, 'samples/ai-body.jpg', 'default', false), + testDetect(first, 'samples/ai-body.jpg', 'default', false), + testDetect(second, 'samples/ai-body.jpg', 'default', false), + testDetect(human, 'samples/ai-upper.jpg', 'default', false), + testDetect(first, 'samples/ai-upper.jpg', 'default', false), + testDetect(second, 'samples/ai-upper.jpg', 'default', false), ]); // test monkey-patch @@ -346,11 +349,6 @@ async function test(Human, inputConfig) { // tests end const t1 = process.hrtime.bigint(); - // check tensor leaks - const leak = human.tf.engine().state.numTensors - tensors; - if (leak === 0) log('state', 'passeed: no memory leak'); - else log('error', 'failed: memory leak', leak); - // check if all instances reported same const tensors1 = human.tf.engine().state.numTensors; const tensors2 = first.tf.engine().state.numTensors; @@ -360,6 +358,7 @@ async function test(Human, inputConfig) { // report end log('info', 'events:', evt); + log('info', 'tensors', human.tf.memory().numTensors); log('info', 'test complete:', Math.trunc(Number(t1 - t0) / 1000 / 1000), 'ms'); }