fix unregistered ops in tfjs

pull/356/head
Vladimir Mandic 2021-07-29 16:06:03 -04:00
parent 20e417ca1c
commit c7613f93e2
16 changed files with 75 additions and 75 deletions

View File

@ -14,7 +14,7 @@ const userConfig = {
warmup: 'none',
debug: true,
modelBasePath: '../../models/',
wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.7.0/dist/',
wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.8.0/dist/',
face: {
enabled: true,
detector: { rotation: true, return: true },
@ -75,10 +75,10 @@ async function analyze(face) {
navigator.clipboard.writeText(`{"name":"unknown", "source":"${face.fileName}", "embedding":[${embedding}]},`);
if (enhanced) {
const c = document.getElementById('orig');
const squeeze = enhanced.squeeze().div(255);
const squeeze = human.tf.div(human.tf.squeeze(enhanced), 255);
await human.tf.browser.toPixels(squeeze, c);
enhanced.dispose();
squeeze.dispose();
human.tf.dispose(enhanced);
human.tf.dispose(squeeze);
const ctx = c.getContext('2d');
ctx.font = 'small-caps 0.4rem "Lato"';
ctx.fillStyle = 'rgba(255, 255, 255, 1)';

View File

@ -30,7 +30,7 @@ let human;
let userConfig = {
warmup: 'none',
backend: 'humangl',
wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.7.0/dist/',
wasmPath: 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.8.0/dist/',
/*
async: false,
cacheSensitivity: 0,
@ -169,10 +169,10 @@ function status(msg) {
}
const compare = { enabled: false, original: null };
async function calcSimmilariry(result) {
async function calcSimmilarity(result) {
document.getElementById('compare-container').style.display = compare.enabled ? 'block' : 'none';
if (!compare.enabled) return;
if (!result || !result.face || !result.face[0].embedding) return;
if (!result || !result.face || !result.face[0] || !result.face[0].embedding) return;
if (!(result.face.length > 0) || (result.face[0].embedding.length <= 64)) return;
if (!compare.original) {
compare.original = result;
@ -181,12 +181,12 @@ async function calcSimmilariry(result) {
const enhanced = human.enhance(result.face[0]);
if (enhanced) {
const c = document.getElementById('orig');
const squeeze = enhanced.squeeze();
const norm = squeeze.div(255);
const squeeze = human.tf.squeeze(enhanced);
const norm = human.tf.div(squeeze, 255);
human.tf.browser.toPixels(norm, c);
enhanced.dispose();
squeeze.dispose();
norm.dispose();
human.tf.dispose(enhanced);
human.tf.dispose(squeeze);
human.tf.dispose(norm);
}
} else {
document.getElementById('compare-canvas').getContext('2d').drawImage(compare.original.canvas, 0, 0, 200, 200);
@ -246,7 +246,7 @@ async function drawResults(input) {
*/
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
const person = result.persons; // explicitly invoke person getter
await calcSimmilariry(result);
await calcSimmilarity(result);
// update log
const engine = human.tf.engine();

View File

@ -53,7 +53,7 @@ async function detect(img) {
process.send({ image: img, detected: result }); // send results back to main
process.send({ ready: true }); // send signal back to main that this worker is now idle and ready for next image
}
tensor.dispose();
tf.dispose(tensor);
}
async function main() {

View File

@ -66,13 +66,13 @@ async function process(jpegBuffer) {
busy = true;
const decoded = tf.node.decodeJpeg(jpegBuffer, 3); // decode jpeg buffer to raw tensor
const tensor = tf.expandDims(decoded, 0); // almost all tf models use first dimension as batch number so we add it
decoded.dispose();
tf.dispose(decoded);
log.state('input frame:', ++count, 'size:', jpegBuffer.length, 'decoded shape:', tensor.shape);
const res = await human.detect(tensor);
log.data('gesture', JSON.stringify(res.gesture));
// do processing here
tensor.dispose(); // must dispose tensor
tf.dispose(tensor); // must dispose tensor
busy = false;
}

View File

@ -42,13 +42,13 @@ export async function predict(image: Tensor, config: Config | any) {
const obj = { age: 0 };
if (config.face.age.enabled) ageT = await model.predict(enhance);
enhance.dispose();
tf.dispose(enhance);
if (ageT) {
const data = ageT.dataSync();
obj.age = Math.trunc(10 * data[0]) / 10;
}
ageT.dispose();
tf.dispose(ageT);
last = obj;
resolve(obj);

View File

@ -68,7 +68,7 @@ export async function predict(image: Tensor, config: Config): Promise<Body[]> {
let resT;
if (config.body.enabled) resT = await model.predict(tensor);
tensor.dispose();
tf.dispose(tensor);
if (resT) {
keypoints.length = 0;

View File

@ -36,20 +36,20 @@ export async function predict(image: Tensor, config: Config, idx, count) {
return new Promise(async (resolve) => {
const resize = tf.image.resizeBilinear(image, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
const [red, green, blue] = tf.split(resize, 3, 3);
resize.dispose();
tf.dispose(resize);
// weighted rgb to grayscale: https://www.mathworks.com/help/matlab/ref/rgb2gray.html
const redNorm = tf.mul(red, rgb[0]);
const greenNorm = tf.mul(green, rgb[1]);
const blueNorm = tf.mul(blue, rgb[2]);
red.dispose();
green.dispose();
blue.dispose();
tf.dispose(red);
tf.dispose(green);
tf.dispose(blue);
const grayscale = tf.addN([redNorm, greenNorm, blueNorm]);
redNorm.dispose();
greenNorm.dispose();
blueNorm.dispose();
const normalize = tf.tidy(() => grayscale.sub(0.5).mul(2));
grayscale.dispose();
tf.dispose(redNorm);
tf.dispose(greenNorm);
tf.dispose(blueNorm);
const normalize = tf.tidy(() => tf.mul(tf.sub(grayscale, 0.5), 2));
tf.dispose(grayscale);
const obj: Array<{ score: number, emotion: string }> = [];
if (config.face.emotion.enabled) {
const emotionT = await model.predict(normalize); // result is already in range 0..1, no need for additional activation
@ -60,7 +60,7 @@ export async function predict(image: Tensor, config: Config, idx, count) {
}
obj.sort((a, b) => b.score - a.score);
}
normalize.dispose();
tf.dispose(normalize);
last[idx] = obj;
lastCount = count;
resolve(obj);

View File

@ -104,7 +104,7 @@ export function enhance(input): Tensor {
const lighten = darken.div(darken.max());
*/
const norm = crop.mul(255);
const norm = tf.mul(crop, 255);
return norm;
});
@ -140,7 +140,7 @@ export async function predict(image: Tensor, config: Config, idx, count) {
obj.gender = gender[0] <= 0.5 ? 'female' : 'male';
obj.genderScore = Math.min(0.99, confidence);
}
const age = resT.find((t) => t.shape[1] === 100).argMax(1).dataSync()[0];
const age = tf.argMax(resT.find((t) => t.shape[1] === 100), 1).dataSync()[0];
const all = resT.find((t) => t.shape[1] === 100).dataSync();
obj.age = Math.round(all[age - 1] > all[age + 1] ? 10 * age - 100 * all[age - 1] : 10 * age + 100 * all[age + 1]) / 10;

View File

@ -47,7 +47,7 @@ export async function predict(image: Tensor, config: Config | any) {
const greenNorm = tf.mul(green, rgb[1]);
const blueNorm = tf.mul(blue, rgb[2]);
const grayscale = tf.addN([redNorm, greenNorm, blueNorm]);
const normalize = grayscale.sub(0.5).mul(2); // range grayscale:-1..1
const normalize = tf.mul(tf.sub(grayscale, 0.5), 2); // range grayscale:-1..1
return normalize;
});
} else {
@ -59,7 +59,7 @@ export async function predict(image: Tensor, config: Config | any) {
const obj = { gender: '', confidence: 0 };
if (config.face.gender.enabled) genderT = await model.predict(enhance);
enhance.dispose();
tf.dispose(enhance);
if (genderT) {
if (!Array.isArray(genderT)) {
@ -78,7 +78,7 @@ export async function predict(image: Tensor, config: Config | any) {
obj.confidence = Math.min(0.99, confidence);
}
}
genderT.dispose();
tf.dispose(genderT);
} else {
const gender = genderT[0].dataSync();
const confidence = Math.trunc(200 * Math.abs((gender[0] - 0.5))) / 100;

View File

@ -35,7 +35,7 @@ export class HandDetector {
normalizeLandmarks(rawPalmLandmarks, index) {
return tf.tidy(() => {
const landmarks = tf.add(tf.div(rawPalmLandmarks.reshape([-1, 7, 2]), this.inputSizeTensor), this.anchors[index]);
const landmarks = tf.add(tf.div(tf.reshape(rawPalmLandmarks, [-1, 7, 2]), this.inputSizeTensor), this.anchors[index]);
return tf.mul(landmarks, this.inputSizeTensor);
});
}
@ -43,38 +43,38 @@ export class HandDetector {
async getBoxes(input, config) {
const batched = this.model.predict(input) as Tensor;
const predictions = tf.squeeze(batched);
batched.dispose();
const scoresT = tf.tidy(() => tf.sigmoid(tf.slice(predictions, [0, 0], [-1, 1])).squeeze());
tf.dispose(batched);
const scoresT = tf.tidy(() => tf.squeeze(tf.sigmoid(tf.slice(predictions, [0, 0], [-1, 1]))));
const scores = scoresT.dataSync();
const rawBoxes = tf.slice(predictions, [0, 1], [-1, 4]);
const boxes = this.normalizeBoxes(rawBoxes);
rawBoxes.dispose();
tf.dispose(rawBoxes);
const filteredT = await tf.image.nonMaxSuppressionAsync(boxes, scores, config.hand.maxDetected, config.hand.iouThreshold, config.hand.minConfidence);
const filtered = filteredT.arraySync();
scoresT.dispose();
filteredT.dispose();
tf.dispose(scoresT);
tf.dispose(filteredT);
const hands: Array<{ box: Tensor, palmLandmarks: Tensor, confidence: number }> = [];
for (const index of filtered) {
if (scores[index] >= config.hand.minConfidence) {
const matchingBox = tf.slice(boxes, [index, 0], [1, -1]);
const rawPalmLandmarks = tf.slice(predictions, [index, 5], [1, 14]);
const palmLandmarks = tf.tidy(() => this.normalizeLandmarks(rawPalmLandmarks, index).reshape([-1, 2]));
rawPalmLandmarks.dispose();
const palmLandmarks = tf.tidy(() => tf.reshape(this.normalizeLandmarks(rawPalmLandmarks, index), [-1, 2]));
tf.dispose(rawPalmLandmarks);
hands.push({ box: matchingBox, palmLandmarks, confidence: scores[index] });
}
}
predictions.dispose();
boxes.dispose();
tf.dispose(predictions);
tf.dispose(boxes);
return hands;
}
async estimateHandBounds(input, config): Promise<{ startPoint: number[]; endPoint: number[]; palmLandmarks: number[]; confidence: number }[]> {
const inputHeight = input.shape[1];
const inputWidth = input.shape[2];
const image = tf.tidy(() => input.resizeBilinear([this.inputSize, this.inputSize]).div(127.5).sub(1));
const image = tf.tidy(() => tf.sub(tf.div(tf.image.resizeBilinear(input, [this.inputSize, this.inputSize]), 127.5), 1));
const predictions = await this.getBoxes(image, config);
image.dispose();
tf.dispose(image);
const hands: Array<{ startPoint: number[]; endPoint: number[]; palmLandmarks: number[]; confidence: number }> = [];
if (!predictions || predictions.length === 0) return hands;
for (const prediction of predictions) {
@ -82,8 +82,8 @@ export class HandDetector {
const startPoint = boxes.slice(0, 2);
const endPoint = boxes.slice(2, 4);
const palmLandmarks = prediction.palmLandmarks.arraySync();
prediction.box.dispose();
prediction.palmLandmarks.dispose();
tf.dispose(prediction.box);
tf.dispose(prediction.palmLandmarks);
hands.push(box.scaleBoxCoordinates({ startPoint, endPoint, palmLandmarks, confidence: prediction.confidence }, [inputWidth / this.inputSize, inputHeight / this.inputSize]));
}
return hands;

View File

@ -113,18 +113,18 @@ export class HandPipeline {
const rotationMatrix = util.buildRotationMatrix(-angle, palmCenter);
const newBox = useFreshBox ? this.getBoxForPalmLandmarks(currentBox.palmLandmarks, rotationMatrix) : currentBox;
const croppedInput = box.cutBoxFromImageAndResize(newBox, rotatedImage, [this.inputSize, this.inputSize]);
const handImage = croppedInput.div(255);
croppedInput.dispose();
rotatedImage.dispose();
const handImage = tf.div(croppedInput, 255);
tf.dispose(croppedInput);
tf.dispose(rotatedImage);
const [confidenceT, keypoints] = await this.handPoseModel.predict(handImage) as Array<Tensor>;
handImage.dispose();
tf.dispose(handImage);
const confidence = confidenceT.dataSync()[0];
confidenceT.dispose();
tf.dispose(confidenceT);
if (confidence >= config.hand.minConfidence) {
const keypointsReshaped = tf.reshape(keypoints, [-1, 3]);
const rawCoords = keypointsReshaped.arraySync();
keypoints.dispose();
keypointsReshaped.dispose();
tf.dispose(keypoints);
tf.dispose(keypointsReshaped);
const coords = this.transformRawCoords(rawCoords, newBox, angle, rotationMatrix);
const nextBoundingBox = this.getBoxForHandLandmarks(coords);
this.storedBoxes[i] = { ...nextBoundingBox, confidence };
@ -137,7 +137,7 @@ export class HandPipeline {
} else {
this.storedBoxes[i] = null;
}
keypoints.dispose();
tf.dispose(keypoints);
} else {
// const enlarged = box.enlargeBox(box.squarifyBox(box.shiftBox(currentBox, HAND_BOX_SHIFT_VECTOR)), handBoxEnlargeFactor);
const enlarged = box.enlargeBox(box.squarifyBox(currentBox), handBoxEnlargeFactor);

View File

@ -357,7 +357,7 @@ export class Human {
#skipFrame = async (input) => {
if (this.config.cacheSensitivity === 0) return false;
const resizeFact = 32;
const reduced: Tensor = input.resizeBilinear([Math.trunc(input.shape[1] / resizeFact), Math.trunc(input.shape[2] / resizeFact)]);
const reduced: Tensor = tf.image.resizeBilinear(input, [Math.trunc(input.shape[1] / resizeFact), Math.trunc(input.shape[2] / resizeFact)]);
// use tensor sum
/*
const sumT = this.tf.sum(reduced);
@ -448,7 +448,7 @@ export class Human {
if (elapsedTime > 0) this.performance.segmentation = elapsedTime;
if (process.canvas) {
// replace input
process.tensor.dispose();
tf.dispose(process.tensor);
process = image.process(process.canvas, this.config);
}
this.analyze('End Segmentation:');

View File

@ -161,10 +161,10 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
pixels = tf.browser ? tf.browser.fromPixels(data) : null;
}
if (pixels) {
const casted = pixels.toFloat();
tensor = casted.expandDims(0);
pixels.dispose();
casted.dispose();
const casted = tf.cast(pixels, 'float32');
tensor = tf.expandDims(casted, 0);
tf.dispose(pixels);
tf.dispose(casted);
}
}
const canvas = config.filter.return ? outCanvas : null;

View File

@ -46,7 +46,7 @@ export async function predict(image: Tensor, config: Config): Promise<Body[]> {
let resT;
if (config.body.enabled) resT = await model.predict(tensor);
tensor.dispose();
tf.dispose(tensor);
if (resT) {
keypoints.length = 0;

View File

@ -30,20 +30,20 @@ async function process(res: Tensor, inputSize, outputShape, config: Config) {
const results: Array<Item> = [];
const detections = res.arraySync();
const squeezeT = tf.squeeze(res);
res.dispose();
tf.dispose(res);
const arr = tf.split(squeezeT, 6, 1); // x1, y1, x2, y2, score, class
squeezeT.dispose();
tf.dispose(squeezeT);
const stackT = tf.stack([arr[1], arr[0], arr[3], arr[2]], 1); // reorder dims as tf.nms expects y, x
const boxesT = stackT.squeeze();
const scoresT = arr[4].squeeze();
const classesT = arr[5].squeeze();
arr.forEach((t) => t.dispose());
arr.forEach((t) => tf.dispose(t));
const nmsT = await tf.image.nonMaxSuppressionAsync(boxesT, scoresT, config.object.maxDetected, config.object.iouThreshold, config.object.minConfidence);
boxesT.dispose();
scoresT.dispose();
classesT.dispose();
tf.dispose(boxesT);
tf.dispose(scoresT);
tf.dispose(classesT);
const nms = nmsT.dataSync();
nmsT.dispose();
tf.dispose(nmsT);
let i = 0;
for (const id of nms) {
const score = Math.trunc(100 * detections[0][id][4]) / 100;
@ -80,7 +80,7 @@ export async function predict(input: Tensor, config: Config): Promise<Item[]> {
const outputSize = [input.shape[2], input.shape[1]];
const resize = tf.image.resizeBilinear(input, [model.inputSize, model.inputSize]);
const objectT = config.object.enabled ? model.execute(resize, ['tower_0/detections']) : null;
resize.dispose();
tf.dispose(resize);
const obj = await process(objectT, model.inputSize, outputSize, config);
last = obj;

View File

@ -111,14 +111,14 @@ export async function predict(image: Tensor, config: Config): Promise<Item[]> {
return new Promise(async (resolve) => {
const outputSize = [image.shape[2], image.shape[1]];
const resize = tf.image.resizeBilinear(image, [model.inputSize, model.inputSize], false);
const norm = resize.div(255);
const norm = tf.div(resize, 255);
const transpose = norm.transpose([0, 3, 1, 2]);
norm.dispose();
resize.dispose();
tf.dispose(norm);
tf.dispose(resize);
let objectT;
if (config.object.enabled) objectT = await model.predict(transpose);
transpose.dispose();
tf.dispose(transpose);
const obj = await process(objectT, model.inputSize, outputSize, config);
last = obj;